diff --git a/sql/upgrades/157.sql b/sql/upgrades/157.sql
new file mode 100644
index 00000000..4767d461
--- /dev/null
+++ b/sql/upgrades/157.sql
@@ -0,0 +1,46 @@
+-- SWH DB schema upgrade
+-- from_version: 156
+-- to_version: 157
+-- description: Add extrinsic artifact metadata
+
+-- latest schema version
+insert into dbversion(version, release, description)
+      values(157, now(), 'Work In Progress');
+
+create domain swhid as text check (value ~ '^swh:[0-9]+:.*');
+
+alter table origin_metadata
+  rename to object_metadata;
+
+
+-- Use the origin URL as identifier, instead of the origin id
+alter table object_metadata
+  add column type text;
+comment on column object_metadata.type is 'the type of object (content/directory/revision/release/snapshot/origin) the metadata is on';
+
+alter table object_metadata
+  add column origin_url text;
+
+update object_metadata
+  set
+    type = 'origin',
+    origin_url = origin.url
+  from origin
+  where object_metadata.origin_id = origin.id;
+
+alter table object_metadata
+  alter column type set not null;
+alter table object_metadata
+  alter column origin_url set not null;
+
+alter table object_metadata
+  drop column id;
+alter table object_metadata
+  drop column origin_id;
+
+alter table object_metadata
+  rename column origin_url to id;
+comment on column object_metadata.id is 'the SWHID or origin URL for which the metadata was found';
+
+create unique index object_metadata_content_authority_date_fetcher
+  on object_metadata(id, authority_id, discovery_date, fetcher_id);
diff --git a/swh/storage/cassandra/cql.py b/swh/storage/cassandra/cql.py
index 76fb3ecb..2a03a925 100644
--- a/swh/storage/cassandra/cql.py
+++ b/swh/storage/cassandra/cql.py
@@ -1,974 +1,985 @@
 # Copyright (C) 2019-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
+import datetime
 import functools
 import json
 import logging
 import random
 from typing import (
     Any,
     Callable,
     Dict,
     Iterable,
     Iterator,
     List,
     Optional,
     Tuple,
     TypeVar,
 )
 
 from cassandra import CoordinationFailure
 from cassandra.cluster import Cluster, EXEC_PROFILE_DEFAULT, ExecutionProfile, ResultSet
 from cassandra.policies import DCAwareRoundRobinPolicy, TokenAwarePolicy
 from cassandra.query import PreparedStatement, BoundStatement
 from tenacity import (
     retry,
     stop_after_attempt,
     wait_random_exponential,
     retry_if_exception_type,
 )
 
 from swh.model.model import (
     Sha1Git,
     TimestampWithTimezone,
     Timestamp,
     Person,
     Content,
     SkippedContent,
     OriginVisit,
     OriginVisitStatus,
     Origin,
 )
 
 from .common import Row, TOKEN_BEGIN, TOKEN_END, hash_url
 from .schema import CREATE_TABLES_QUERIES, HASH_ALGORITHMS
 
 
 logger = logging.getLogger(__name__)
 
 
 _execution_profiles = {
     EXEC_PROFILE_DEFAULT: ExecutionProfile(
         load_balancing_policy=TokenAwarePolicy(DCAwareRoundRobinPolicy())
     ),
 }
 # Configuration for cassandra-driver's access to servers:
 # * hit the right server directly when sending a query (TokenAwarePolicy),
 # * if there's more than one, then pick one at random that's in the same
 #   datacenter as the client (DCAwareRoundRobinPolicy)
 
 
 def create_keyspace(
     hosts: List[str], keyspace: str, port: int = 9042, *, durable_writes=True
 ):
     cluster = Cluster(hosts, port=port, execution_profiles=_execution_profiles)
     session = cluster.connect()
     extra_params = ""
     if not durable_writes:
         extra_params = "AND durable_writes = false"
     session.execute(
         """CREATE KEYSPACE IF NOT EXISTS "%s"
                        WITH REPLICATION = {
                            'class' : 'SimpleStrategy',
                            'replication_factor' : 1
                        } %s;
                     """
         % (keyspace, extra_params)
     )
     session.execute('USE "%s"' % keyspace)
     for query in CREATE_TABLES_QUERIES:
         session.execute(query)
 
 
 T = TypeVar("T")
 
 
 def _prepared_statement(query: str) -> Callable[[Callable[..., T]], Callable[..., T]]:
     """Returns a decorator usable on methods of CqlRunner, to
     inject them with a 'statement' argument, that is a prepared
     statement corresponding to the query.
 
     This only works on methods of CqlRunner, as preparing a
     statement requires a connection to a Cassandra server."""
 
     def decorator(f):
         @functools.wraps(f)
         def newf(self, *args, **kwargs) -> T:
             if f.__name__ not in self._prepared_statements:
                 statement: PreparedStatement = self._session.prepare(query)
                 self._prepared_statements[f.__name__] = statement
             return f(
                 self, *args, **kwargs, statement=self._prepared_statements[f.__name__]
             )
 
         return newf
 
     return decorator
 
 
 def _prepared_insert_statement(table_name: str, columns: List[str]):
     """Shorthand for using `_prepared_statement` for `INSERT INTO`
     statements."""
     return _prepared_statement(
         "INSERT INTO %s (%s) VALUES (%s)"
         % (table_name, ", ".join(columns), ", ".join("?" for _ in columns),)
     )
 
 
 def _prepared_exists_statement(table_name: str):
     """Shorthand for using `_prepared_statement` for queries that only
     check which ids in a list exist in the table."""
     return _prepared_statement(f"SELECT id FROM {table_name} WHERE id IN ?")
 
 
 class CqlRunner:
     """Class managing prepared statements and building queries to be sent
     to Cassandra."""
 
     def __init__(self, hosts: List[str], keyspace: str, port: int):
         self._cluster = Cluster(
             hosts, port=port, execution_profiles=_execution_profiles
         )
         self._session = self._cluster.connect(keyspace)
         self._cluster.register_user_type(
             keyspace, "microtimestamp_with_timezone", TimestampWithTimezone
         )
         self._cluster.register_user_type(keyspace, "microtimestamp", Timestamp)
         self._cluster.register_user_type(keyspace, "person", Person)
 
         self._prepared_statements: Dict[str, PreparedStatement] = {}
 
     ##########################
     # Common utility functions
     ##########################
 
     MAX_RETRIES = 3
 
     @retry(
         wait=wait_random_exponential(multiplier=1, max=10),
         stop=stop_after_attempt(MAX_RETRIES),
         retry=retry_if_exception_type(CoordinationFailure),
     )
     def _execute_with_retries(self, statement, args) -> ResultSet:
         return self._session.execute(statement, args, timeout=1000.0)
 
     @_prepared_statement(
         "UPDATE object_count SET count = count + ? "
         "WHERE partition_key = 0 AND object_type = ?"
     )
     def _increment_counter(
         self, object_type: str, nb: int, *, statement: PreparedStatement
     ) -> None:
         self._execute_with_retries(statement, [nb, object_type])
 
     def _add_one(self, statement, object_type: str, obj, keys: List[str]) -> None:
         self._increment_counter(object_type, 1)
         self._execute_with_retries(statement, [getattr(obj, key) for key in keys])
 
     def _get_random_row(self, statement) -> Optional[Row]:
         """Takes a prepared statement of the form
         "SELECT * FROM <table> WHERE token(<keys>) > ? LIMIT 1"
         and uses it to return a random row"""
         token = random.randint(TOKEN_BEGIN, TOKEN_END)
         rows = self._execute_with_retries(statement, [token])
         if not rows:
             # There are no row with a greater token; wrap around to get
             # the row with the smallest token
             rows = self._execute_with_retries(statement, [TOKEN_BEGIN])
         if rows:
             return rows.one()
         else:
             return None
 
     def _missing(self, statement, ids):
         res = self._execute_with_retries(statement, [ids])
         found_ids = {id_ for (id_,) in res}
         return [id_ for id_ in ids if id_ not in found_ids]
 
     ##########################
     # 'content' table
     ##########################
 
     _content_pk = ["sha1", "sha1_git", "sha256", "blake2s256"]
     _content_keys = [
         "sha1",
         "sha1_git",
         "sha256",
         "blake2s256",
         "length",
         "ctime",
         "status",
     ]
 
     def _content_add_finalize(self, statement: BoundStatement) -> None:
         """Returned currified by content_add_prepare, to be called when the
         content row should be added to the primary table."""
         self._execute_with_retries(statement, None)
         self._increment_counter("content", 1)
 
     @_prepared_insert_statement("content", _content_keys)
     def content_add_prepare(
         self, content, *, statement
     ) -> Tuple[int, Callable[[], None]]:
         """Prepares insertion of a Content to the main 'content' table.
         Returns a token (to be used in secondary tables), and a function to be
         called to perform the insertion in the main table."""
         statement = statement.bind(
             [getattr(content, key) for key in self._content_keys]
         )
 
         # Type used for hashing keys (usually, it will be
         # cassandra.metadata.Murmur3Token)
         token_class = self._cluster.metadata.token_map.token_class
 
         # Token of the row when it will be inserted. This is equivalent to
         # "SELECT token({', '.join(self._content_pk)}) FROM content WHERE ..."
         # after the row is inserted; but we need the token to insert in the
         # index tables *before* inserting to the main 'content' table
         token = token_class.from_key(statement.routing_key).value
         assert TOKEN_BEGIN <= token <= TOKEN_END
 
         # Function to be called after the indexes contain their respective
         # row
         finalizer = functools.partial(self._content_add_finalize, statement)
 
         return (token, finalizer)
 
     @_prepared_statement(
         "SELECT * FROM content WHERE "
         + " AND ".join(map("%s = ?".__mod__, HASH_ALGORITHMS))
     )
     def content_get_from_pk(
         self, content_hashes: Dict[str, bytes], *, statement
     ) -> Optional[Row]:
         rows = list(
             self._execute_with_retries(
                 statement, [content_hashes[algo] for algo in HASH_ALGORITHMS]
             )
         )
         assert len(rows) <= 1
         if rows:
             return rows[0]
         else:
             return None
 
     @_prepared_statement(
         "SELECT * FROM content WHERE token(" + ", ".join(_content_pk) + ") = ?"
     )
     def content_get_from_token(self, token, *, statement) -> Iterable[Row]:
         return self._execute_with_retries(statement, [token])
 
     @_prepared_statement(
         "SELECT * FROM content WHERE token(%s) > ? LIMIT 1" % ", ".join(_content_pk)
     )
     def content_get_random(self, *, statement) -> Optional[Row]:
         return self._get_random_row(statement)
 
     @_prepared_statement(
         (
             "SELECT token({0}) AS tok, {1} FROM content "
             "WHERE token({0}) >= ? AND token({0}) <= ? LIMIT ?"
         ).format(", ".join(_content_pk), ", ".join(_content_keys))
     )
     def content_get_token_range(
         self, start: int, end: int, limit: int, *, statement
     ) -> Iterable[Row]:
         return self._execute_with_retries(statement, [start, end, limit])
 
     ##########################
     # 'content_by_*' tables
     ##########################
 
     @_prepared_statement("SELECT sha1_git FROM content_by_sha1_git WHERE sha1_git IN ?")
     def content_missing_by_sha1_git(
         self, ids: List[bytes], *, statement
     ) -> List[bytes]:
         return self._missing(statement, ids)
 
     def content_index_add_one(self, algo: str, content: Content, token: int) -> None:
         """Adds a row mapping content[algo] to the token of the Content in
         the main 'content' table."""
         query = (
             f"INSERT INTO content_by_{algo} ({algo}, target_token) " f"VALUES (%s, %s)"
         )
         self._execute_with_retries(query, [content.get_hash(algo), token])
 
     def content_get_tokens_from_single_hash(
         self, algo: str, hash_: bytes
     ) -> Iterable[int]:
         assert algo in HASH_ALGORITHMS
         query = f"SELECT target_token FROM content_by_{algo} WHERE {algo} = %s"
         return (tok for (tok,) in self._execute_with_retries(query, [hash_]))
 
     ##########################
     # 'skipped_content' table
     ##########################
 
     _skipped_content_pk = ["sha1", "sha1_git", "sha256", "blake2s256"]
     _skipped_content_keys = [
         "sha1",
         "sha1_git",
         "sha256",
         "blake2s256",
         "length",
         "ctime",
         "status",
         "reason",
         "origin",
     ]
     _magic_null_pk = b"<null>"
     """
     NULLs (or all-empty blobs) are not allowed in primary keys; instead use a
     special value that can't possibly be a valid hash.
     """
 
     def _skipped_content_add_finalize(self, statement: BoundStatement) -> None:
         """Returned currified by skipped_content_add_prepare, to be called
         when the content row should be added to the primary table."""
         self._execute_with_retries(statement, None)
         self._increment_counter("skipped_content", 1)
 
     @_prepared_insert_statement("skipped_content", _skipped_content_keys)
     def skipped_content_add_prepare(
         self, content, *, statement
     ) -> Tuple[int, Callable[[], None]]:
         """Prepares insertion of a Content to the main 'skipped_content' table.
         Returns a token (to be used in secondary tables), and a function to be
         called to perform the insertion in the main table."""
 
         # Replace NULLs (which are not allowed in the partition key) with
         # an empty byte string
         content = content.to_dict()
         for key in self._skipped_content_pk:
             if content[key] is None:
                 content[key] = self._magic_null_pk
 
         statement = statement.bind(
             [content.get(key) for key in self._skipped_content_keys]
         )
 
         # Type used for hashing keys (usually, it will be
         # cassandra.metadata.Murmur3Token)
         token_class = self._cluster.metadata.token_map.token_class
 
         # Token of the row when it will be inserted. This is equivalent to
         # "SELECT token({', '.join(self._content_pk)})
         #  FROM skipped_content WHERE ..."
         # after the row is inserted; but we need the token to insert in the
         # index tables *before* inserting to the main 'skipped_content' table
         token = token_class.from_key(statement.routing_key).value
         assert TOKEN_BEGIN <= token <= TOKEN_END
 
         # Function to be called after the indexes contain their respective
         # row
         finalizer = functools.partial(self._skipped_content_add_finalize, statement)
 
         return (token, finalizer)
 
     @_prepared_statement(
         "SELECT * FROM skipped_content WHERE "
         + " AND ".join(map("%s = ?".__mod__, HASH_ALGORITHMS))
     )
     def skipped_content_get_from_pk(
         self, content_hashes: Dict[str, bytes], *, statement
     ) -> Optional[Row]:
         rows = list(
             self._execute_with_retries(
                 statement,
                 [
                     content_hashes[algo] or self._magic_null_pk
                     for algo in HASH_ALGORITHMS
                 ],
             )
         )
         assert len(rows) <= 1
         if rows:
             # TODO: convert _magic_null_pk back to None?
             return rows[0]
         else:
             return None
 
     ##########################
     # 'skipped_content_by_*' tables
     ##########################
 
     def skipped_content_index_add_one(
         self, algo: str, content: SkippedContent, token: int
     ) -> None:
         """Adds a row mapping content[algo] to the token of the SkippedContent
         in the main 'skipped_content' table."""
         query = (
             f"INSERT INTO skipped_content_by_{algo} ({algo}, target_token) "
             f"VALUES (%s, %s)"
         )
         self._execute_with_retries(
             query, [content.get_hash(algo) or self._magic_null_pk, token]
         )
 
     ##########################
     # 'revision' table
     ##########################
 
     _revision_keys = [
         "id",
         "date",
         "committer_date",
         "type",
         "directory",
         "message",
         "author",
         "committer",
         "synthetic",
         "metadata",
     ]
 
     @_prepared_exists_statement("revision")
     def revision_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
         return self._missing(statement, ids)
 
     @_prepared_insert_statement("revision", _revision_keys)
     def revision_add_one(self, revision: Dict[str, Any], *, statement) -> None:
         self._execute_with_retries(
             statement, [revision[key] for key in self._revision_keys]
         )
         self._increment_counter("revision", 1)
 
     @_prepared_statement("SELECT id FROM revision WHERE id IN ?")
     def revision_get_ids(self, revision_ids, *, statement) -> ResultSet:
         return self._execute_with_retries(statement, [revision_ids])
 
     @_prepared_statement("SELECT * FROM revision WHERE id IN ?")
     def revision_get(self, revision_ids, *, statement) -> ResultSet:
         return self._execute_with_retries(statement, [revision_ids])
 
     @_prepared_statement("SELECT * FROM revision WHERE token(id) > ? LIMIT 1")
     def revision_get_random(self, *, statement) -> Optional[Row]:
         return self._get_random_row(statement)
 
     ##########################
     # 'revision_parent' table
     ##########################
 
     _revision_parent_keys = ["id", "parent_rank", "parent_id"]
 
     @_prepared_insert_statement("revision_parent", _revision_parent_keys)
     def revision_parent_add_one(
         self, id_: Sha1Git, parent_rank: int, parent_id: Sha1Git, *, statement
     ) -> None:
         self._execute_with_retries(statement, [id_, parent_rank, parent_id])
 
     @_prepared_statement("SELECT parent_id FROM revision_parent WHERE id = ?")
     def revision_parent_get(self, revision_id: Sha1Git, *, statement) -> ResultSet:
         return self._execute_with_retries(statement, [revision_id])
 
     ##########################
     # 'release' table
     ##########################
 
     _release_keys = [
         "id",
         "target",
         "target_type",
         "date",
         "name",
         "message",
         "author",
         "synthetic",
     ]
 
     @_prepared_exists_statement("release")
     def release_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
         return self._missing(statement, ids)
 
     @_prepared_insert_statement("release", _release_keys)
     def release_add_one(self, release: Dict[str, Any], *, statement) -> None:
         self._execute_with_retries(
             statement, [release[key] for key in self._release_keys]
         )
         self._increment_counter("release", 1)
 
     @_prepared_statement("SELECT * FROM release WHERE id in ?")
     def release_get(self, release_ids: List[str], *, statement) -> None:
         return self._execute_with_retries(statement, [release_ids])
 
     @_prepared_statement("SELECT * FROM release WHERE token(id) > ? LIMIT 1")
     def release_get_random(self, *, statement) -> Optional[Row]:
         return self._get_random_row(statement)
 
     ##########################
     # 'directory' table
     ##########################
 
     _directory_keys = ["id"]
 
     @_prepared_exists_statement("directory")
     def directory_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
         return self._missing(statement, ids)
 
     @_prepared_insert_statement("directory", _directory_keys)
     def directory_add_one(self, directory_id: Sha1Git, *, statement) -> None:
         """Called after all calls to directory_entry_add_one, to
         commit/finalize the directory."""
         self._execute_with_retries(statement, [directory_id])
         self._increment_counter("directory", 1)
 
     @_prepared_statement("SELECT * FROM directory WHERE token(id) > ? LIMIT 1")
     def directory_get_random(self, *, statement) -> Optional[Row]:
         return self._get_random_row(statement)
 
     ##########################
     # 'directory_entry' table
     ##########################
 
     _directory_entry_keys = ["directory_id", "name", "type", "target", "perms"]
 
     @_prepared_insert_statement("directory_entry", _directory_entry_keys)
     def directory_entry_add_one(self, entry: Dict[str, Any], *, statement) -> None:
         self._execute_with_retries(
             statement, [entry[key] for key in self._directory_entry_keys]
         )
 
     @_prepared_statement("SELECT * FROM directory_entry WHERE directory_id IN ?")
     def directory_entry_get(self, directory_ids, *, statement) -> ResultSet:
         return self._execute_with_retries(statement, [directory_ids])
 
     ##########################
     # 'snapshot' table
     ##########################
 
     _snapshot_keys = ["id"]
 
     @_prepared_exists_statement("snapshot")
     def snapshot_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
         return self._missing(statement, ids)
 
     @_prepared_insert_statement("snapshot", _snapshot_keys)
     def snapshot_add_one(self, snapshot_id: Sha1Git, *, statement) -> None:
         self._execute_with_retries(statement, [snapshot_id])
         self._increment_counter("snapshot", 1)
 
     @_prepared_statement("SELECT * FROM snapshot WHERE id = ?")
     def snapshot_get(self, snapshot_id: Sha1Git, *, statement) -> ResultSet:
         return self._execute_with_retries(statement, [snapshot_id])
 
     @_prepared_statement("SELECT * FROM snapshot WHERE token(id) > ? LIMIT 1")
     def snapshot_get_random(self, *, statement) -> Optional[Row]:
         return self._get_random_row(statement)
 
     ##########################
     # 'snapshot_branch' table
     ##########################
 
     _snapshot_branch_keys = ["snapshot_id", "name", "target_type", "target"]
 
     @_prepared_insert_statement("snapshot_branch", _snapshot_branch_keys)
     def snapshot_branch_add_one(self, branch: Dict[str, Any], *, statement) -> None:
         self._execute_with_retries(
             statement, [branch[key] for key in self._snapshot_branch_keys]
         )
 
     @_prepared_statement(
         "SELECT ascii_bins_count(target_type) AS counts "
         "FROM snapshot_branch "
         "WHERE snapshot_id = ? "
     )
     def snapshot_count_branches(self, snapshot_id: Sha1Git, *, statement) -> ResultSet:
         return self._execute_with_retries(statement, [snapshot_id])
 
     @_prepared_statement(
         "SELECT * FROM snapshot_branch WHERE snapshot_id = ? AND name >= ? LIMIT ?"
     )
     def snapshot_branch_get(
         self, snapshot_id: Sha1Git, from_: bytes, limit: int, *, statement
     ) -> None:
         return self._execute_with_retries(statement, [snapshot_id, from_, limit])
 
     ##########################
     # 'origin' table
     ##########################
 
     origin_keys = ["sha1", "url", "type", "next_visit_id"]
 
     @_prepared_statement(
         "INSERT INTO origin (sha1, url, next_visit_id) "
         "VALUES (?, ?, 1) IF NOT EXISTS"
     )
     def origin_add_one(self, origin: Origin, *, statement) -> None:
         self._execute_with_retries(statement, [hash_url(origin.url), origin.url])
         self._increment_counter("origin", 1)
 
     @_prepared_statement("SELECT * FROM origin WHERE sha1 = ?")
     def origin_get_by_sha1(self, sha1: bytes, *, statement) -> ResultSet:
         return self._execute_with_retries(statement, [sha1])
 
     def origin_get_by_url(self, url: str) -> ResultSet:
         return self.origin_get_by_sha1(hash_url(url))
 
     @_prepared_statement(
         f'SELECT token(sha1) AS tok, {", ".join(origin_keys)} '
         f"FROM origin WHERE token(sha1) >= ? LIMIT ?"
     )
     def origin_list(self, start_token: int, limit: int, *, statement) -> ResultSet:
         return self._execute_with_retries(statement, [start_token, limit])
 
     @_prepared_statement("SELECT * FROM origin")
     def origin_iter_all(self, *, statement) -> ResultSet:
         return self._execute_with_retries(statement, [])
 
     @_prepared_statement("SELECT next_visit_id FROM origin WHERE sha1 = ?")
     def _origin_get_next_visit_id(self, origin_sha1: bytes, *, statement) -> int:
         rows = list(self._execute_with_retries(statement, [origin_sha1]))
         assert len(rows) == 1  # TODO: error handling
         return rows[0].next_visit_id
 
     @_prepared_statement(
         "UPDATE origin SET next_visit_id=? WHERE sha1 = ? IF next_visit_id=?"
     )
     def origin_generate_unique_visit_id(self, origin_url: str, *, statement) -> int:
         origin_sha1 = hash_url(origin_url)
         next_id = self._origin_get_next_visit_id(origin_sha1)
         while True:
             res = list(
                 self._execute_with_retries(
                     statement, [next_id + 1, origin_sha1, next_id]
                 )
             )
             assert len(res) == 1
             if res[0].applied:
                 # No data race
                 return next_id
             else:
                 # Someone else updated it before we did, let's try again
                 next_id = res[0].next_visit_id
                 # TODO: abort after too many attempts
 
         return next_id
 
     ##########################
     # 'origin_visit' table
     ##########################
 
     _origin_visit_keys = [
         "origin",
         "visit",
         "type",
         "date",
     ]
 
     @_prepared_statement(
         "SELECT * FROM origin_visit WHERE origin = ? AND visit > ? "
         "ORDER BY visit ASC"
     )
     def _origin_visit_get_pagination_asc_no_limit(
         self, origin_url: str, last_visit: int, *, statement
     ) -> ResultSet:
         return self._execute_with_retries(statement, [origin_url, last_visit])
 
     @_prepared_statement(
         "SELECT * FROM origin_visit WHERE origin = ? AND visit > ? "
         "ORDER BY visit ASC "
         "LIMIT ?"
     )
     def _origin_visit_get_pagination_asc_limit(
         self, origin_url: str, last_visit: int, limit: int, *, statement
     ) -> ResultSet:
         return self._execute_with_retries(statement, [origin_url, last_visit, limit])
 
     @_prepared_statement(
         "SELECT * FROM origin_visit WHERE origin = ? AND visit < ? "
         "ORDER BY visit DESC"
     )
     def _origin_visit_get_pagination_desc_no_limit(
         self, origin_url: str, last_visit: int, *, statement
     ) -> ResultSet:
         return self._execute_with_retries(statement, [origin_url, last_visit])
 
     @_prepared_statement(
         "SELECT * FROM origin_visit WHERE origin = ? AND visit < ? "
         "ORDER BY visit DESC "
         "LIMIT ?"
     )
     def _origin_visit_get_pagination_desc_limit(
         self, origin_url: str, last_visit: int, limit: int, *, statement
     ) -> ResultSet:
         return self._execute_with_retries(statement, [origin_url, last_visit, limit])
 
     @_prepared_statement(
         "SELECT * FROM origin_visit WHERE origin = ? ORDER BY visit ASC LIMIT ?"
     )
     def _origin_visit_get_no_pagination_asc_limit(
         self, origin_url: str, limit: int, *, statement
     ) -> ResultSet:
         return self._execute_with_retries(statement, [origin_url, limit])
 
     @_prepared_statement(
         "SELECT * FROM origin_visit WHERE origin = ? ORDER BY visit ASC "
     )
     def _origin_visit_get_no_pagination_asc_no_limit(
         self, origin_url: str, *, statement
     ) -> ResultSet:
         return self._execute_with_retries(statement, [origin_url])
 
     @_prepared_statement(
         "SELECT * FROM origin_visit WHERE origin = ? ORDER BY visit DESC"
     )
     def _origin_visit_get_no_pagination_desc_no_limit(
         self, origin_url: str, *, statement
     ) -> ResultSet:
         return self._execute_with_retries(statement, [origin_url])
 
     @_prepared_statement(
         "SELECT * FROM origin_visit WHERE origin = ? ORDER BY visit DESC LIMIT ?"
     )
     def _origin_visit_get_no_pagination_desc_limit(
         self, origin_url: str, limit: int, *, statement
     ) -> ResultSet:
         return self._execute_with_retries(statement, [origin_url, limit])
 
     def origin_visit_get(
         self,
         origin_url: str,
         last_visit: Optional[int],
         limit: Optional[int],
         order: str = "asc",
     ) -> ResultSet:
         order = order.lower()
         assert order in ["asc", "desc"]
 
         args: List[Any] = [origin_url]
 
         if last_visit is not None:
             page_name = "pagination"
             args.append(last_visit)
         else:
             page_name = "no_pagination"
 
         if limit is not None:
             limit_name = "limit"
             args.append(limit)
         else:
             limit_name = "no_limit"
 
         method_name = f"_origin_visit_get_{page_name}_{order}_{limit_name}"
         origin_visit_get_method = getattr(self, method_name)
         return origin_visit_get_method(*args)
 
     @_prepared_insert_statement("origin_visit", _origin_visit_keys)
     def origin_visit_add_one(self, visit: OriginVisit, *, statement) -> None:
         self._add_one(statement, "origin_visit", visit, self._origin_visit_keys)
 
     _origin_visit_status_keys = [
         "origin",
         "visit",
         "date",
         "status",
         "snapshot",
         "metadata",
     ]
 
     @_prepared_insert_statement("origin_visit_status", _origin_visit_status_keys)
     def origin_visit_status_add_one(
         self, visit_update: OriginVisitStatus, *, statement
     ) -> None:
         assert self._origin_visit_status_keys[-1] == "metadata"
         keys = self._origin_visit_status_keys
 
         metadata = json.dumps(visit_update.metadata)
         self._execute_with_retries(
             statement, [getattr(visit_update, key) for key in keys[:-1]] + [metadata]
         )
 
     def origin_visit_status_get_latest(self, origin: str, visit: int,) -> Optional[Row]:
         """Given an origin visit id, return its latest origin_visit_status
 
          """
         rows = self.origin_visit_status_get(origin, visit)
         return rows[0] if rows else None
 
     @_prepared_statement(
         "SELECT * FROM origin_visit_status "
         "WHERE origin = ? AND visit = ? "
         "ORDER BY date DESC"
     )
     def origin_visit_status_get(
         self,
         origin: str,
         visit: int,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
         *,
         statement,
     ) -> List[Row]:
         """Return all origin visit statuses for a given visit
 
         """
         return list(self._execute_with_retries(statement, [origin, visit]))
 
     @_prepared_statement("SELECT * FROM origin_visit WHERE origin = ? AND visit = ?")
     def origin_visit_get_one(
         self, origin_url: str, visit_id: int, *, statement
     ) -> Optional[Row]:
         # TODO: error handling
         rows = list(self._execute_with_retries(statement, [origin_url, visit_id]))
         if rows:
             return rows[0]
         else:
             return None
 
     @_prepared_statement("SELECT * FROM origin_visit WHERE origin = ?")
     def origin_visit_get_all(self, origin_url: str, *, statement) -> ResultSet:
         return self._execute_with_retries(statement, [origin_url])
 
     @_prepared_statement("SELECT * FROM origin_visit WHERE token(origin) >= ?")
     def _origin_visit_iter_from(self, min_token: int, *, statement) -> Iterator[Row]:
         yield from self._execute_with_retries(statement, [min_token])
 
     @_prepared_statement("SELECT * FROM origin_visit WHERE token(origin) < ?")
     def _origin_visit_iter_to(self, max_token: int, *, statement) -> Iterator[Row]:
         yield from self._execute_with_retries(statement, [max_token])
 
     def origin_visit_iter(self, start_token: int) -> Iterator[Row]:
         """Returns all origin visits in order from this token,
         and wraps around the token space."""
         yield from self._origin_visit_iter_from(start_token)
         yield from self._origin_visit_iter_to(start_token)
 
     ##########################
     # 'metadata_authority' table
     ##########################
 
     _metadata_authority_keys = ["url", "type", "metadata"]
 
     @_prepared_insert_statement("metadata_authority", _metadata_authority_keys)
     def metadata_authority_add(self, url, type, metadata, *, statement):
         return self._execute_with_retries(statement, [url, type, metadata])
 
     @_prepared_statement("SELECT * from metadata_authority WHERE type = ? AND url = ?")
     def metadata_authority_get(self, type, url, *, statement) -> Optional[Row]:
         return next(iter(self._execute_with_retries(statement, [type, url])), None)
 
     ##########################
     # 'metadata_fetcher' table
     ##########################
 
     _metadata_fetcher_keys = ["name", "version", "metadata"]
 
     @_prepared_insert_statement("metadata_fetcher", _metadata_fetcher_keys)
     def metadata_fetcher_add(self, name, version, metadata, *, statement):
         return self._execute_with_retries(statement, [name, version, metadata])
 
     @_prepared_statement(
         "SELECT * from metadata_fetcher WHERE name = ? AND version = ?"
     )
     def metadata_fetcher_get(self, name, version, *, statement) -> Optional[Row]:
         return next(iter(self._execute_with_retries(statement, [name, version])), None)
 
-    ##########################
-    # 'origin_metadata' table
-    ##########################
+    #########################
+    # 'object_metadata' table
+    #########################
 
-    _origin_metadata_keys = [
-        "origin",
+    _object_metadata_keys = [
+        "type",
+        "id",
         "authority_type",
         "authority_url",
         "discovery_date",
         "fetcher_name",
         "fetcher_version",
         "format",
         "metadata",
     ]
 
-    @_prepared_insert_statement("origin_metadata", _origin_metadata_keys)
-    def origin_metadata_add(
+    @_prepared_statement(
+        f"INSERT INTO object_metadata ({', '.join(_object_metadata_keys)}) "
+        f"VALUES ({', '.join('?' for _ in _object_metadata_keys)})"
+    )
+    def object_metadata_add(
         self,
-        origin,
+        object_type: str,
+        id: str,
         authority_type,
         authority_url,
         discovery_date,
         fetcher_name,
         fetcher_version,
         format,
         metadata,
         *,
         statement,
     ):
-        return self._execute_with_retries(
-            statement,
-            [
-                origin,
-                authority_type,
-                authority_url,
-                discovery_date,
-                fetcher_name,
-                fetcher_version,
-                format,
-                metadata,
-            ],
-        )
+        params = [
+            object_type,
+            id,
+            authority_type,
+            authority_url,
+            discovery_date,
+            fetcher_name,
+            fetcher_version,
+            format,
+            metadata,
+        ]
+
+        return self._execute_with_retries(statement, params,)
 
     @_prepared_statement(
-        "SELECT * from origin_metadata "
-        "WHERE origin=? AND authority_url=? AND discovery_date>? "
-        "AND authority_type=?"
+        "SELECT * from object_metadata "
+        "WHERE id=? AND authority_url=? AND discovery_date>? AND authority_type=?"
     )
-    def origin_metadata_get_after_date(
-        self, origin, authority_type, authority_url, after, *, statement
+    def object_metadata_get_after_date(
+        self,
+        id: str,
+        authority_type: str,
+        authority_url: str,
+        after: datetime.datetime,
+        *,
+        statement,
     ):
         return self._execute_with_retries(
-            statement, [origin, authority_url, after, authority_type]
+            statement, [id, authority_url, after, authority_type]
         )
 
     @_prepared_statement(
-        "SELECT * from origin_metadata "
-        "WHERE origin=? AND authority_type=? AND authority_url=? "
+        "SELECT * from object_metadata "
+        "WHERE id=? AND authority_type=? AND authority_url=? "
         "AND (discovery_date, fetcher_name, fetcher_version) > (?, ?, ?)"
     )
-    def origin_metadata_get_after_date_and_fetcher(
+    def object_metadata_get_after_date_and_fetcher(
         self,
-        origin,
-        authority_type,
-        authority_url,
-        after_date,
-        after_fetcher_name,
-        after_fetcher_version,
+        id: str,
+        authority_type: str,
+        authority_url: str,
+        after_date: datetime.datetime,
+        after_fetcher_name: str,
+        after_fetcher_version: str,
         *,
         statement,
     ):
         return self._execute_with_retries(
             statement,
             [
-                origin,
+                id,
                 authority_type,
                 authority_url,
                 after_date,
                 after_fetcher_name,
                 after_fetcher_version,
             ],
         )
 
     @_prepared_statement(
-        "SELECT * from origin_metadata "
-        "WHERE origin=? AND authority_url=? AND authority_type=?"
+        "SELECT * from object_metadata "
+        "WHERE id=? AND authority_url=? AND authority_type=?"
     )
-    def origin_metadata_get(
-        self, origin, authority_type, authority_url, *, statement
+    def object_metadata_get(
+        self, id: str, authority_type: str, authority_url: str, *, statement
     ) -> Iterable[Row]:
         return self._execute_with_retries(
-            statement, [origin, authority_url, authority_type]
+            statement, [id, authority_url, authority_type]
         )
 
     ##########################
     # Miscellaneous
     ##########################
 
     @_prepared_statement("SELECT uuid() FROM revision LIMIT 1;")
     def check_read(self, *, statement):
         self._execute_with_retries(statement, [])
 
     @_prepared_statement(
         "SELECT object_type, count FROM object_count WHERE partition_key=0"
     )
     def stat_counters(self, *, statement) -> ResultSet:
         return self._execute_with_retries(statement, [])
diff --git a/swh/storage/cassandra/schema.py b/swh/storage/cassandra/schema.py
index fef4cb15..89d5d7ee 100644
--- a/swh/storage/cassandra/schema.py
+++ b/swh/storage/cassandra/schema.py
@@ -1,254 +1,273 @@
 # Copyright (C) 2019-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 
 CREATE_TABLES_QUERIES = """
 CREATE OR REPLACE FUNCTION ascii_bins_count_sfunc (
     state tuple<int, map<ascii, int>>, -- (nb_none, map<target_type, nb>)
     bin_name ascii
 )
 CALLED ON NULL INPUT
 RETURNS tuple<int, map<ascii, int>>
 LANGUAGE java AS
 $$
     if (bin_name == null) {
         state.setInt(0, state.getInt(0) + 1);
     }
     else {
         Map<String, Integer> counters = state.getMap(
             1, String.class, Integer.class);
         Integer nb = counters.get(bin_name);
         if (nb == null) {
             nb = 0;
         }
         counters.put(bin_name, nb + 1);
         state.setMap(1, counters, String.class, Integer.class);
     }
     return state;
 $$
 ;
 
+
 CREATE OR REPLACE AGGREGATE ascii_bins_count ( ascii )
 SFUNC ascii_bins_count_sfunc
 STYPE tuple<int, map<ascii, int>>
 INITCOND (0, {})
 ;
 
+
 CREATE TYPE IF NOT EXISTS microtimestamp (
     seconds             bigint,
     microseconds        int
 );
 
+
 CREATE TYPE IF NOT EXISTS microtimestamp_with_timezone (
     timestamp           frozen<microtimestamp>,
     offset              smallint,
     negative_utc        boolean
 );
 
+
 CREATE TYPE IF NOT EXISTS person (
     fullname    blob,
     name        blob,
     email       blob
 );
 
+
 CREATE TABLE IF NOT EXISTS content (
     sha1          blob,
     sha1_git      blob,
     sha256        blob,
     blake2s256    blob,
     length        bigint,
     ctime         timestamp,
         -- creation time, i.e. time of (first) injection into the storage
     status        ascii,
     PRIMARY KEY ((sha1, sha1_git, sha256, blake2s256))
 );
 
+
 CREATE TABLE IF NOT EXISTS skipped_content (
     sha1          blob,
     sha1_git      blob,
     sha256        blob,
     blake2s256    blob,
     length        bigint,
     ctime         timestamp,
         -- creation time, i.e. time of (first) injection into the storage
     status        ascii,
     reason        text,
     origin        text,
     PRIMARY KEY ((sha1, sha1_git, sha256, blake2s256))
 );
 
+
 CREATE TABLE IF NOT EXISTS revision (
     id                              blob PRIMARY KEY,
     date                            microtimestamp_with_timezone,
     committer_date                  microtimestamp_with_timezone,
     type                            ascii,
     directory                       blob,  -- source code "root" directory
     message                         blob,
     author                          person,
     committer                       person,
     synthetic                       boolean,
         -- true iff revision has been created by Software Heritage
     metadata                        text
         -- extra metadata as JSON(tarball checksums,
         -- extra commit information, etc...)
 );
 
+
 CREATE TABLE IF NOT EXISTS revision_parent (
     id                     blob,
     parent_rank                     int,
         -- parent position in merge commits, 0-based
     parent_id                       blob,
     PRIMARY KEY ((id), parent_rank)
 );
 
+
 CREATE TABLE IF NOT EXISTS release
 (
     id                              blob PRIMARY KEY,
     target_type                     ascii,
     target                          blob,
     date                            microtimestamp_with_timezone,
     name                            blob,
     message                         blob,
     author                          person,
     synthetic                       boolean,
         -- true iff release has been created by Software Heritage
 );
 
+
 CREATE TABLE IF NOT EXISTS directory (
     id              blob PRIMARY KEY,
 );
 
+
 CREATE TABLE IF NOT EXISTS directory_entry (
     directory_id    blob,
     name            blob,  -- path name, relative to containing dir
     target          blob,
     perms           int,   -- unix-like permissions
     type            ascii, -- target type
     PRIMARY KEY ((directory_id), name)
 );
 
+
 CREATE TABLE IF NOT EXISTS snapshot (
     id              blob PRIMARY KEY,
 );
 
+
 -- For a given snapshot_id, branches are sorted by their name,
 -- allowing easy pagination.
 CREATE TABLE IF NOT EXISTS snapshot_branch (
     snapshot_id     blob,
     name            blob,
     target_type     ascii,
     target          blob,
     PRIMARY KEY ((snapshot_id), name)
 );
 
+
 CREATE TABLE IF NOT EXISTS origin_visit (
     origin          text,
     visit           bigint,
     date            timestamp,
     type            text,
     PRIMARY KEY ((origin), visit)
 );
 
+
 CREATE TABLE IF NOT EXISTS origin_visit_status (
     origin          text,
     visit           bigint,
     date            timestamp,
     status          ascii,
     metadata        text,
     snapshot        blob,
     PRIMARY KEY ((origin), visit, date)
 );
 
+
 CREATE TABLE IF NOT EXISTS origin (
     sha1            blob PRIMARY KEY,
     url             text,
     type            text,
     next_visit_id   int,
         -- We need integer visit ids for compatibility with the pgsql
         -- storage, so we're using lightweight transactions with this trick:
         -- https://stackoverflow.com/a/29391877/539465
 );
 
 
 CREATE TABLE IF NOT EXISTS metadata_authority (
     url             text,
     type            ascii,
     metadata        text,
     PRIMARY KEY ((url), type)
 );
 
 
 CREATE TABLE IF NOT EXISTS metadata_fetcher (
     name            ascii,
     version         ascii,
     metadata        text,
     PRIMARY KEY ((name), version)
 );
 
 
-CREATE TABLE IF NOT EXISTS origin_metadata (
-    origin          text,
+CREATE TABLE IF NOT EXISTS object_metadata (
+    type            text,
+    id              text,
+
+    -- metadata source
     authority_type  text,
     authority_url   text,
     discovery_date  timestamp,
     fetcher_name    ascii,
     fetcher_version ascii,
+
+    -- metadata itself
     format          ascii,
     metadata        blob,
-    PRIMARY KEY ((origin), authority_type, authority_url, discovery_date,
-                           fetcher_name, fetcher_version),
-    -- for now, authority_url could be in the partition key; but leaving
-    -- in the partition key allows listing authorities with metadata on an
-    -- origin if we ever need to do it.
+
+    PRIMARY KEY ((id), authority_type, authority_url, discovery_date,
+                       fetcher_name, fetcher_version)
 );
 
 
 CREATE TABLE IF NOT EXISTS object_count (
     partition_key   smallint,  -- Constant, must always be 0
     object_type     ascii,
     count           counter,
     PRIMARY KEY ((partition_key), object_type)
 );
 """.split(
-    "\n\n"
+    "\n\n\n"
 )
 
 CONTENT_INDEX_TEMPLATE = """
 -- Secondary table, used for looking up "content" from a single hash
 CREATE TABLE IF NOT EXISTS content_by_{main_algo} (
     {main_algo}   blob,
     target_token  bigint, -- value of token(pk) on the "primary" table
     PRIMARY KEY (({main_algo}), target_token)
 );
 
 CREATE TABLE IF NOT EXISTS skipped_content_by_{main_algo} (
     {main_algo}   blob,
     target_token  bigint, -- value of token(pk) on the "primary" table
     PRIMARY KEY (({main_algo}), target_token)
 );
 """
 
 TABLES = (
     "skipped_content content revision revision_parent release "
     "directory directory_entry snapshot snapshot_branch "
-    "origin_visit origin origin_metadata object_count "
+    "origin_visit origin object_metadata object_count "
     "origin_visit_status metadata_authority "
     "metadata_fetcher"
 ).split()
 
 HASH_ALGORITHMS = ["sha1", "sha1_git", "sha256", "blake2s256"]
 
 for main_algo in HASH_ALGORITHMS:
     CREATE_TABLES_QUERIES.extend(
         CONTENT_INDEX_TEMPLATE.format(
             main_algo=main_algo,
             other_algos=", ".join(
                 [algo for algo in HASH_ALGORITHMS if algo != main_algo]
             ),
         ).split("\n\n")
     )
 
     TABLES.append("content_by_%s" % main_algo)
     TABLES.append("skipped_content_by_%s" % main_algo)
diff --git a/swh/storage/cassandra/storage.py b/swh/storage/cassandra/storage.py
index cbd0b4b2..b8656b1d 100644
--- a/swh/storage/cassandra/storage.py
+++ b/swh/storage/cassandra/storage.py
@@ -1,1169 +1,1202 @@
 # Copyright (C) 2019-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import datetime
 import itertools
 import json
 import random
 import re
 from typing import Any, Dict, List, Iterable, Optional
 
 import attr
 from deprecated import deprecated
 
 from swh.core.api.serializers import msgpack_loads, msgpack_dumps
 from swh.model.model import (
     Revision,
     Release,
     Directory,
     DirectoryEntry,
     Content,
     SkippedContent,
     OriginVisit,
     OriginVisitStatus,
     Snapshot,
     Origin,
 )
 from swh.model.hashutil import DEFAULT_ALGORITHMS
 from swh.storage.objstorage import ObjStorage
 from swh.storage.writer import JournalWriter
 from swh.storage.utils import now
 
 from ..exc import StorageArgumentException, HashCollision
 from .common import TOKEN_BEGIN, TOKEN_END
 from .converters import (
     revision_to_db,
     revision_from_db,
     release_to_db,
     release_from_db,
     row_to_visit_status,
 )
 from .cql import CqlRunner
 from .schema import HASH_ALGORITHMS
 
 
 # Max block size of contents to return
 BULK_BLOCK_CONTENT_LEN_MAX = 10000
 
 
 class CassandraStorage:
     def __init__(self, hosts, keyspace, objstorage, port=9042, journal_writer=None):
         self._cql_runner = CqlRunner(hosts, keyspace, port)
         self.journal_writer = JournalWriter(journal_writer)
         self.objstorage = ObjStorage(objstorage)
 
     def check_config(self, *, check_write):
         self._cql_runner.check_read()
 
         return True
 
     def _content_get_from_hash(self, algo, hash_) -> Iterable:
         """From the name of a hash algorithm and a value of that hash,
         looks up the "hash -> token" secondary table (content_by_{algo})
         to get tokens.
         Then, looks up the main table (content) to get all contents with
         that token, and filters out contents whose hash doesn't match."""
         found_tokens = self._cql_runner.content_get_tokens_from_single_hash(algo, hash_)
 
         for token in found_tokens:
             # Query the main table ('content').
             res = self._cql_runner.content_get_from_token(token)
 
             for row in res:
                 # re-check the the hash (in case of murmur3 collision)
                 if getattr(row, algo) == hash_:
                     yield row
 
     def _content_add(self, contents: List[Content], with_data: bool) -> Dict:
         # Filter-out content already in the database.
         contents = [
             c for c in contents if not self._cql_runner.content_get_from_pk(c.to_dict())
         ]
 
         self.journal_writer.content_add(contents)
 
         if with_data:
             # First insert to the objstorage, if the endpoint is
             # `content_add` (as opposed to `content_add_metadata`).
             # TODO: this should probably be done in concurrently to inserting
             # in index tables (but still before the main table; so an entry is
             # only added to the main table after everything else was
             # successfully inserted.
             summary = self.objstorage.content_add(
                 c for c in contents if c.status != "absent"
             )
             content_add_bytes = summary["content:add:bytes"]
 
         content_add = 0
         for content in contents:
             content_add += 1
 
             # Check for sha1 or sha1_git collisions. This test is not atomic
             # with the insertion, so it won't detect a collision if both
             # contents are inserted at the same time, but it's good enough.
             #
             # The proper way to do it would probably be a BATCH, but this
             # would be inefficient because of the number of partitions we
             # need to affect (len(HASH_ALGORITHMS)+1, which is currently 5)
             for algo in {"sha1", "sha1_git"}:
                 collisions = []
                 # Get tokens of 'content' rows with the same value for
                 # sha1/sha1_git
                 rows = self._content_get_from_hash(algo, content.get_hash(algo))
                 for row in rows:
                     if getattr(row, algo) != content.get_hash(algo):
                         # collision of token(partition key), ignore this
                         # row
                         continue
 
                     for algo in HASH_ALGORITHMS:
                         if getattr(row, algo) != content.get_hash(algo):
                             # This hash didn't match; discard the row.
                             collisions.append(
                                 {algo: getattr(row, algo) for algo in HASH_ALGORITHMS}
                             )
 
                 if collisions:
                     collisions.append(content.hashes())
                     raise HashCollision(algo, content.get_hash(algo), collisions)
 
             (token, insertion_finalizer) = self._cql_runner.content_add_prepare(content)
 
             # Then add to index tables
             for algo in HASH_ALGORITHMS:
                 self._cql_runner.content_index_add_one(algo, content, token)
 
             # Then to the main table
             insertion_finalizer()
 
         summary = {
             "content:add": content_add,
         }
 
         if with_data:
             summary["content:add:bytes"] = content_add_bytes
 
         return summary
 
     def content_add(self, content: Iterable[Content]) -> Dict:
         contents = [attr.evolve(c, ctime=now()) for c in content]
         return self._content_add(list(contents), with_data=True)
 
     def content_update(self, content, keys=[]):
         raise NotImplementedError(
             "content_update is not supported by the Cassandra backend"
         )
 
     def content_add_metadata(self, content: Iterable[Content]) -> Dict:
         return self._content_add(list(content), with_data=False)
 
     def content_get(self, content):
         if len(content) > BULK_BLOCK_CONTENT_LEN_MAX:
             raise StorageArgumentException(
                 "Sending at most %s contents." % BULK_BLOCK_CONTENT_LEN_MAX
             )
         yield from self.objstorage.content_get(content)
 
     def content_get_partition(
         self,
         partition_id: int,
         nb_partitions: int,
         limit: int = 1000,
         page_token: str = None,
     ):
         if limit is None:
             raise StorageArgumentException("limit should not be None")
 
         # Compute start and end of the range of tokens covered by the
         # requested partition
         partition_size = (TOKEN_END - TOKEN_BEGIN) // nb_partitions
         range_start = TOKEN_BEGIN + partition_id * partition_size
         range_end = TOKEN_BEGIN + (partition_id + 1) * partition_size
 
         # offset the range start according to the `page_token`.
         if page_token is not None:
             if not (range_start <= int(page_token) <= range_end):
                 raise StorageArgumentException("Invalid page_token.")
             range_start = int(page_token)
 
         # Get the first rows of the range
         rows = self._cql_runner.content_get_token_range(range_start, range_end, limit)
         rows = list(rows)
 
         if len(rows) == limit:
             next_page_token: Optional[str] = str(rows[-1].tok + 1)
         else:
             next_page_token = None
 
         return {
             "contents": [row._asdict() for row in rows if row.status != "absent"],
             "next_page_token": next_page_token,
         }
 
     def content_get_metadata(self, contents: List[bytes]) -> Dict[bytes, List[Dict]]:
         result: Dict[bytes, List[Dict]] = {sha1: [] for sha1 in contents}
         for sha1 in contents:
             # Get all (sha1, sha1_git, sha256, blake2s256) whose sha1
             # matches the argument, from the index table ('content_by_sha1')
             for row in self._content_get_from_hash("sha1", sha1):
                 content_metadata = row._asdict()
                 content_metadata.pop("ctime")
                 result[content_metadata["sha1"]].append(content_metadata)
         return result
 
     def content_find(self, content):
         # Find an algorithm that is common to all the requested contents.
         # It will be used to do an initial filtering efficiently.
         filter_algos = list(set(content).intersection(HASH_ALGORITHMS))
         if not filter_algos:
             raise StorageArgumentException(
                 "content keys must contain at least one of: "
                 "%s" % ", ".join(sorted(HASH_ALGORITHMS))
             )
         common_algo = filter_algos[0]
 
         results = []
         rows = self._content_get_from_hash(common_algo, content[common_algo])
         for row in rows:
             # Re-check all the hashes, in case of collisions (either of the
             # hash of the partition key, or the hashes in it)
             for algo in HASH_ALGORITHMS:
                 if content.get(algo) and getattr(row, algo) != content[algo]:
                     # This hash didn't match; discard the row.
                     break
             else:
                 # All hashes match, keep this row.
                 results.append(
                     {
                         **row._asdict(),
                         "ctime": row.ctime.replace(tzinfo=datetime.timezone.utc),
                     }
                 )
         return results
 
     def content_missing(self, content, key_hash="sha1"):
         for cont in content:
             res = self.content_find(cont)
             if not res:
                 yield cont[key_hash]
             if any(c["status"] == "missing" for c in res):
                 yield cont[key_hash]
 
     def content_missing_per_sha1(self, contents):
         return self.content_missing([{"sha1": c for c in contents}])
 
     def content_missing_per_sha1_git(self, contents):
         return self.content_missing(
             [{"sha1_git": c for c in contents}], key_hash="sha1_git"
         )
 
     def content_get_random(self):
         return self._cql_runner.content_get_random().sha1_git
 
     def _skipped_content_get_from_hash(self, algo, hash_) -> Iterable:
         """From the name of a hash algorithm and a value of that hash,
         looks up the "hash -> token" secondary table
         (skipped_content_by_{algo}) to get tokens.
         Then, looks up the main table (content) to get all contents with
         that token, and filters out contents whose hash doesn't match."""
         found_tokens = self._cql_runner.skipped_content_get_tokens_from_single_hash(
             algo, hash_
         )
 
         for token in found_tokens:
             # Query the main table ('content').
             res = self._cql_runner.skipped_content_get_from_token(token)
 
             for row in res:
                 # re-check the the hash (in case of murmur3 collision)
                 if getattr(row, algo) == hash_:
                     yield row
 
     def _skipped_content_add(self, contents: Iterable[SkippedContent]) -> Dict:
         # Filter-out content already in the database.
         contents = [
             c
             for c in contents
             if not self._cql_runner.skipped_content_get_from_pk(c.to_dict())
         ]
 
         self.journal_writer.skipped_content_add(contents)
 
         for content in contents:
             # Compute token of the row in the main table
             (token, insertion_finalizer) = self._cql_runner.skipped_content_add_prepare(
                 content
             )
 
             # Then add to index tables
             for algo in HASH_ALGORITHMS:
                 self._cql_runner.skipped_content_index_add_one(algo, content, token)
 
             # Then to the main table
             insertion_finalizer()
 
         return {"skipped_content:add": len(contents)}
 
     def skipped_content_add(self, content: Iterable[SkippedContent]) -> Dict:
         contents = [attr.evolve(c, ctime=now()) for c in content]
         return self._skipped_content_add(contents)
 
     def skipped_content_missing(self, contents):
         for content in contents:
             if not self._cql_runner.skipped_content_get_from_pk(content):
                 yield {algo: content[algo] for algo in DEFAULT_ALGORITHMS}
 
     def directory_add(self, directories: Iterable[Directory]) -> Dict:
         directories = list(directories)
 
         # Filter out directories that are already inserted.
         missing = self.directory_missing([dir_.id for dir_ in directories])
         directories = [dir_ for dir_ in directories if dir_.id in missing]
 
         self.journal_writer.directory_add(directories)
 
         for directory in directories:
             # Add directory entries to the 'directory_entry' table
             for entry in directory.entries:
                 self._cql_runner.directory_entry_add_one(
                     {**entry.to_dict(), "directory_id": directory.id}
                 )
 
             # Add the directory *after* adding all the entries, so someone
             # calling snapshot_get_branch in the meantime won't end up
             # with half the entries.
             self._cql_runner.directory_add_one(directory.id)
 
         return {"directory:add": len(missing)}
 
     def directory_missing(self, directories):
         return self._cql_runner.directory_missing(directories)
 
     def _join_dentry_to_content(self, dentry):
         keys = (
             "status",
             "sha1",
             "sha1_git",
             "sha256",
             "length",
         )
         ret = dict.fromkeys(keys)
         ret.update(dentry.to_dict())
         if ret["type"] == "file":
             content = self.content_find({"sha1_git": ret["target"]})
             if content:
                 content = content[0]
                 for key in keys:
                     ret[key] = content[key]
         return ret
 
     def _directory_ls(self, directory_id, recursive, prefix=b""):
         if self.directory_missing([directory_id]):
             return
         rows = list(self._cql_runner.directory_entry_get([directory_id]))
 
         for row in rows:
             # Build and yield the directory entry dict
             entry = row._asdict()
             del entry["directory_id"]
             entry = DirectoryEntry.from_dict(entry)
             ret = self._join_dentry_to_content(entry)
             ret["name"] = prefix + ret["name"]
             ret["dir_id"] = directory_id
             yield ret
 
             if recursive and ret["type"] == "dir":
                 yield from self._directory_ls(
                     ret["target"], True, prefix + ret["name"] + b"/"
                 )
 
     def directory_entry_get_by_path(self, directory, paths):
         return self._directory_entry_get_by_path(directory, paths, b"")
 
     def _directory_entry_get_by_path(self, directory, paths, prefix):
         if not paths:
             return
 
         contents = list(self.directory_ls(directory))
 
         if not contents:
             return
 
         def _get_entry(entries, name):
             """Finds the entry with the requested name, prepends the
             prefix (to get its full path), and returns it.
 
             If no entry has that name, returns None."""
             for entry in entries:
                 if entry["name"] == name:
                     entry = entry.copy()
                     entry["name"] = prefix + entry["name"]
                     return entry
 
         first_item = _get_entry(contents, paths[0])
 
         if len(paths) == 1:
             return first_item
 
         if not first_item or first_item["type"] != "dir":
             return
 
         return self._directory_entry_get_by_path(
             first_item["target"], paths[1:], prefix + paths[0] + b"/"
         )
 
     def directory_ls(self, directory, recursive=False):
         yield from self._directory_ls(directory, recursive)
 
     def directory_get_random(self):
         return self._cql_runner.directory_get_random().id
 
     def revision_add(self, revisions: Iterable[Revision]) -> Dict:
         revisions = list(revisions)
 
         # Filter-out revisions already in the database
         missing = self.revision_missing([rev.id for rev in revisions])
         revisions = [rev for rev in revisions if rev.id in missing]
         self.journal_writer.revision_add(revisions)
 
         for revision in revisions:
             revobject = revision_to_db(revision)
             if revobject:
                 # Add parents first
                 for (rank, parent) in enumerate(revobject["parents"]):
                     self._cql_runner.revision_parent_add_one(
                         revobject["id"], rank, parent
                     )
 
                 # Then write the main revision row.
                 # Writing this after all parents were written ensures that
                 # read endpoints don't return a partial view while writing
                 # the parents
                 self._cql_runner.revision_add_one(revobject)
 
         return {"revision:add": len(revisions)}
 
     def revision_missing(self, revisions):
         return self._cql_runner.revision_missing(revisions)
 
     def revision_get(self, revisions):
         rows = self._cql_runner.revision_get(revisions)
         revs = {}
         for row in rows:
             # TODO: use a single query to get all parents?
             # (it might have lower latency, but requires more code and more
             # bandwidth, because revision id would be part of each returned
             # row)
             parent_rows = self._cql_runner.revision_parent_get(row.id)
             # parent_rank is the clustering key, so results are already
             # sorted by rank.
             parents = tuple(row.parent_id for row in parent_rows)
             rev = revision_from_db(row, parents=parents)
             revs[rev.id] = rev.to_dict()
 
         for rev_id in revisions:
             yield revs.get(rev_id)
 
     def _get_parent_revs(self, rev_ids, seen, limit, short):
         if limit and len(seen) >= limit:
             return
         rev_ids = [id_ for id_ in rev_ids if id_ not in seen]
         if not rev_ids:
             return
         seen |= set(rev_ids)
 
         # We need this query, even if short=True, to return consistent
         # results (ie. not return only a subset of a revision's parents
         # if it is being written)
         if short:
             rows = self._cql_runner.revision_get_ids(rev_ids)
         else:
             rows = self._cql_runner.revision_get(rev_ids)
 
         for row in rows:
             # TODO: use a single query to get all parents?
             # (it might have less latency, but requires less code and more
             # bandwidth (because revision id would be part of each returned
             # row)
             parent_rows = self._cql_runner.revision_parent_get(row.id)
 
             # parent_rank is the clustering key, so results are already
             # sorted by rank.
             parents = tuple(row.parent_id for row in parent_rows)
 
             if short:
                 yield (row.id, parents)
             else:
                 rev = revision_from_db(row, parents=parents)
                 yield rev.to_dict()
             yield from self._get_parent_revs(parents, seen, limit, short)
 
     def revision_log(self, revisions, limit=None):
         seen = set()
         yield from self._get_parent_revs(revisions, seen, limit, False)
 
     def revision_shortlog(self, revisions, limit=None):
         seen = set()
         yield from self._get_parent_revs(revisions, seen, limit, True)
 
     def revision_get_random(self):
         return self._cql_runner.revision_get_random().id
 
     def release_add(self, releases: Iterable[Release]) -> Dict:
         to_add = []
         for rel in releases:
             if rel not in to_add:
                 to_add.append(rel)
         missing = set(self.release_missing([rel.id for rel in to_add]))
         to_add = [rel for rel in to_add if rel.id in missing]
 
         self.journal_writer.release_add(to_add)
 
         for release in to_add:
             if release:
                 self._cql_runner.release_add_one(release_to_db(release))
 
         return {"release:add": len(to_add)}
 
     def release_missing(self, releases):
         return self._cql_runner.release_missing(releases)
 
     def release_get(self, releases):
         rows = self._cql_runner.release_get(releases)
         rels = {}
         for row in rows:
             release = release_from_db(row)
             rels[row.id] = release.to_dict()
 
         for rel_id in releases:
             yield rels.get(rel_id)
 
     def release_get_random(self):
         return self._cql_runner.release_get_random().id
 
     def snapshot_add(self, snapshots: Iterable[Snapshot]) -> Dict:
         missing = self._cql_runner.snapshot_missing([snp.id for snp in snapshots])
         snapshots = [snp for snp in snapshots if snp.id in missing]
 
         for snapshot in snapshots:
             self.journal_writer.snapshot_add([snapshot])
 
             # Add branches
             for (branch_name, branch) in snapshot.branches.items():
                 if branch is None:
                     target_type = None
                     target = None
                 else:
                     target_type = branch.target_type.value
                     target = branch.target
                 self._cql_runner.snapshot_branch_add_one(
                     {
                         "snapshot_id": snapshot.id,
                         "name": branch_name,
                         "target_type": target_type,
                         "target": target,
                     }
                 )
 
             # Add the snapshot *after* adding all the branches, so someone
             # calling snapshot_get_branch in the meantime won't end up
             # with half the branches.
             self._cql_runner.snapshot_add_one(snapshot.id)
 
         return {"snapshot:add": len(snapshots)}
 
     def snapshot_missing(self, snapshots):
         return self._cql_runner.snapshot_missing(snapshots)
 
     def snapshot_get(self, snapshot_id):
         return self.snapshot_get_branches(snapshot_id)
 
     def snapshot_get_by_origin_visit(self, origin, visit):
         try:
             visit = self.origin_visit_get_by(origin, visit)
         except IndexError:
             return None
 
         return self.snapshot_get(visit["snapshot"])
 
     def snapshot_count_branches(self, snapshot_id):
         if self._cql_runner.snapshot_missing([snapshot_id]):
             # Makes sure we don't fetch branches for a snapshot that is
             # being added.
             return None
         rows = list(self._cql_runner.snapshot_count_branches(snapshot_id))
         assert len(rows) == 1
         (nb_none, counts) = rows[0].counts
         counts = dict(counts)
         if nb_none:
             counts[None] = nb_none
         return counts
 
     def snapshot_get_branches(
         self, snapshot_id, branches_from=b"", branches_count=1000, target_types=None
     ):
         if self._cql_runner.snapshot_missing([snapshot_id]):
             # Makes sure we don't fetch branches for a snapshot that is
             # being added.
             return None
 
         branches = []
         while len(branches) < branches_count + 1:
             new_branches = list(
                 self._cql_runner.snapshot_branch_get(
                     snapshot_id, branches_from, branches_count + 1
                 )
             )
 
             if not new_branches:
                 break
 
             branches_from = new_branches[-1].name
 
             new_branches_filtered = new_branches
 
             # Filter by target_type
             if target_types:
                 new_branches_filtered = [
                     branch
                     for branch in new_branches_filtered
                     if branch.target is not None and branch.target_type in target_types
                 ]
 
             branches.extend(new_branches_filtered)
 
             if len(new_branches) < branches_count + 1:
                 break
 
         if len(branches) > branches_count:
             last_branch = branches.pop(-1).name
         else:
             last_branch = None
 
         branches = {
             branch.name: {"target": branch.target, "target_type": branch.target_type,}
             if branch.target
             else None
             for branch in branches
         }
 
         return {
             "id": snapshot_id,
             "branches": branches,
             "next_branch": last_branch,
         }
 
     def snapshot_get_random(self):
         return self._cql_runner.snapshot_get_random().id
 
     def object_find_by_sha1_git(self, ids):
         results = {id_: [] for id_ in ids}
         missing_ids = set(ids)
 
         # Mind the order, revision is the most likely one for a given ID,
         # so we check revisions first.
         queries = [
             ("revision", self._cql_runner.revision_missing),
             ("release", self._cql_runner.release_missing),
             ("content", self._cql_runner.content_missing_by_sha1_git),
             ("directory", self._cql_runner.directory_missing),
         ]
 
         for (object_type, query_fn) in queries:
             found_ids = missing_ids - set(query_fn(missing_ids))
             for sha1_git in found_ids:
                 results[sha1_git].append(
                     {"sha1_git": sha1_git, "type": object_type,}
                 )
                 missing_ids.remove(sha1_git)
 
             if not missing_ids:
                 # We found everything, skipping the next queries.
                 break
 
         return results
 
     def origin_get(self, origins):
         if isinstance(origins, dict):
             # Old API
             return_single = True
             origins = [origins]
         else:
             return_single = False
 
         if any("id" in origin for origin in origins):
             raise StorageArgumentException("Origin ids are not supported.")
 
         results = [self.origin_get_one(origin) for origin in origins]
 
         if return_single:
             assert len(results) == 1
             return results[0]
         else:
             return results
 
     def origin_get_one(self, origin: Dict[str, Any]) -> Optional[Dict[str, Any]]:
         if "id" in origin:
             raise StorageArgumentException("Origin ids are not supported.")
         if "url" not in origin:
             raise StorageArgumentException("Missing origin url")
         rows = self._cql_runner.origin_get_by_url(origin["url"])
 
         rows = list(rows)
         if rows:
             assert len(rows) == 1
             result = rows[0]._asdict()
             return {
                 "url": result["url"],
             }
         else:
             return None
 
     def origin_get_by_sha1(self, sha1s):
         results = []
         for sha1 in sha1s:
             rows = self._cql_runner.origin_get_by_sha1(sha1)
             if rows:
                 results.append({"url": rows.one().url})
             else:
                 results.append(None)
         return results
 
     def origin_list(self, page_token: Optional[str] = None, limit: int = 100) -> dict:
         # Compute what token to begin the listing from
         start_token = TOKEN_BEGIN
         if page_token:
             start_token = int(page_token)
             if not (TOKEN_BEGIN <= start_token <= TOKEN_END):
                 raise StorageArgumentException("Invalid page_token.")
 
         rows = self._cql_runner.origin_list(start_token, limit)
         rows = list(rows)
 
         if len(rows) == limit:
             next_page_token: Optional[str] = str(rows[-1].tok + 1)
         else:
             next_page_token = None
 
         return {
             "origins": [{"url": row.url} for row in rows],
             "next_page_token": next_page_token,
         }
 
     def origin_search(
         self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False
     ):
         # TODO: remove this endpoint, swh-search should be used instead.
         origins = self._cql_runner.origin_iter_all()
         if regexp:
             pat = re.compile(url_pattern)
             origins = [orig for orig in origins if pat.search(orig.url)]
         else:
             origins = [orig for orig in origins if url_pattern in orig.url]
 
         if with_visit:
             origins = [orig for orig in origins if orig.next_visit_id > 1]
 
         return [{"url": orig.url,} for orig in origins[offset : offset + limit]]
 
     def origin_add(self, origins: Iterable[Origin]) -> Dict[str, int]:
         known_origins = [
             Origin.from_dict(d)
             for d in self.origin_get([origin.to_dict() for origin in origins])
             if d is not None
         ]
         to_add = [origin for origin in origins if origin not in known_origins]
         self.journal_writer.origin_add(to_add)
         for origin in to_add:
             self._cql_runner.origin_add_one(origin)
         return {"origin:add": len(to_add)}
 
     @deprecated("Use origin_add([origin]) instead")
     def origin_add_one(self, origin: Origin) -> str:
         known_origin = self.origin_get_one(origin.to_dict())
 
         if known_origin:
             origin_url = known_origin["url"]
         else:
             self.journal_writer.origin_add([origin])
 
             self._cql_runner.origin_add_one(origin)
             origin_url = origin.url
 
         return origin_url
 
     def origin_visit_add(self, visits: Iterable[OriginVisit]) -> Iterable[OriginVisit]:
         for visit in visits:
             origin = self.origin_get({"url": visit.origin})
             if not origin:  # Cannot add a visit without an origin
                 raise StorageArgumentException("Unknown origin %s", visit.origin)
 
         all_visits = []
         nb_visits = 0
         for visit in visits:
             nb_visits += 1
             if not visit.visit:
                 visit_id = self._cql_runner.origin_generate_unique_visit_id(
                     visit.origin
                 )
                 visit = attr.evolve(visit, visit=visit_id)
             self.journal_writer.origin_visit_add([visit])
             self._cql_runner.origin_visit_add_one(visit)
             assert visit.visit is not None
             all_visits.append(visit)
             self._origin_visit_status_add(
                 OriginVisitStatus(
                     origin=visit.origin,
                     visit=visit.visit,
                     date=visit.date,
                     status="created",
                     snapshot=None,
                 )
             )
 
         return all_visits
 
     def _origin_visit_status_add(self, visit_status: OriginVisitStatus) -> None:
         """Add an origin visit status"""
         self.journal_writer.origin_visit_status_add([visit_status])
         self._cql_runner.origin_visit_status_add_one(visit_status)
 
     def origin_visit_status_add(
         self, visit_statuses: Iterable[OriginVisitStatus]
     ) -> None:
         # First round to check existence (fail early if any is ko)
         for visit_status in visit_statuses:
             origin_url = self.origin_get({"url": visit_status.origin})
             if not origin_url:
                 raise StorageArgumentException(f"Unknown origin {visit_status.origin}")
 
         for visit_status in visit_statuses:
             self._origin_visit_status_add(visit_status)
 
     def _origin_visit_merge(
         self, visit: Dict[str, Any], visit_status: OriginVisitStatus,
     ) -> Dict[str, Any]:
         """Merge origin_visit and visit_status together.
 
         """
         return OriginVisit.from_dict(
             {
                 # default to the values in visit
                 **visit,
                 # override with the last update
                 **visit_status.to_dict(),
                 # visit['origin'] is the URL (via a join), while
                 # visit_status['origin'] is only an id.
                 "origin": visit["origin"],
                 # but keep the date of the creation of the origin visit
                 "date": visit["date"],
             }
         ).to_dict()
 
     def _origin_visit_apply_last_status(self, visit: Dict[str, Any]) -> Dict[str, Any]:
         """Retrieve the latest visit status information for the origin visit.
         Then merge it with the visit and return it.
 
         """
         row = self._cql_runner.origin_visit_status_get_latest(
             visit["origin"], visit["visit"]
         )
         assert row is not None
         return self._origin_visit_merge(visit, row_to_visit_status(row))
 
     def _origin_visit_get_updated(self, origin: str, visit_id: int) -> Dict[str, Any]:
         """Retrieve origin visit and latest origin visit status and merge them
         into an origin visit.
 
         """
         row_visit = self._cql_runner.origin_visit_get_one(origin, visit_id)
         assert row_visit is not None
         visit = self._format_origin_visit_row(row_visit)
         return self._origin_visit_apply_last_status(visit)
 
     @staticmethod
     def _format_origin_visit_row(visit):
         return {
             **visit._asdict(),
             "origin": visit.origin,
             "date": visit.date.replace(tzinfo=datetime.timezone.utc),
         }
 
     def origin_visit_get(
         self,
         origin: str,
         last_visit: Optional[int] = None,
         limit: Optional[int] = None,
         order: str = "asc",
     ) -> Iterable[Dict[str, Any]]:
         rows = self._cql_runner.origin_visit_get(origin, last_visit, limit, order)
 
         for row in rows:
             visit = self._format_origin_visit_row(row)
             yield self._origin_visit_apply_last_status(visit)
 
     def origin_visit_find_by_date(
         self, origin: str, visit_date: datetime.datetime
     ) -> Optional[Dict[str, Any]]:
         # Iterator over all the visits of the origin
         # This should be ok for now, as there aren't too many visits
         # per origin.
         rows = list(self._cql_runner.origin_visit_get_all(origin))
 
         def key(visit):
             dt = visit.date.replace(tzinfo=datetime.timezone.utc) - visit_date
             return (abs(dt), -visit.visit)
 
         if rows:
             row = min(rows, key=key)
             visit = self._format_origin_visit_row(row)
             return self._origin_visit_apply_last_status(visit)
         return None
 
     def origin_visit_get_by(self, origin: str, visit: int) -> Optional[Dict[str, Any]]:
         row = self._cql_runner.origin_visit_get_one(origin, visit)
         if row:
             visit_ = self._format_origin_visit_row(row)
             return self._origin_visit_apply_last_status(visit_)
         return None
 
     def origin_visit_get_latest(
         self,
         origin: str,
         type: Optional[str] = None,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
     ) -> Optional[Dict[str, Any]]:
         # TODO: Do not fetch all visits
         rows = self._cql_runner.origin_visit_get_all(origin)
         latest_visit = None
         for row in rows:
             visit = self._format_origin_visit_row(row)
             updated_visit = self._origin_visit_apply_last_status(visit)
             if type is not None and updated_visit["type"] != type:
                 continue
             if allowed_statuses and updated_visit["status"] not in allowed_statuses:
                 continue
             if require_snapshot and updated_visit["snapshot"] is None:
                 continue
 
             # updated_visit is a candidate
             if latest_visit is not None:
                 if updated_visit["date"] < latest_visit["date"]:
                     continue
                 if updated_visit["visit"] < latest_visit["visit"]:
                     continue
 
             latest_visit = updated_visit
 
         return latest_visit
 
     def origin_visit_status_get_latest(
         self,
         origin_url: str,
         visit: int,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
     ) -> Optional[OriginVisitStatus]:
         rows = self._cql_runner.origin_visit_status_get(
             origin_url, visit, allowed_statuses, require_snapshot
         )
         # filtering is done python side as we cannot do it server side
         if allowed_statuses:
             rows = [row for row in rows if row.status in allowed_statuses]
         if require_snapshot:
             rows = [row for row in rows if row.snapshot is not None]
         if not rows:
             return None
         return row_to_visit_status(rows[0])
 
     def origin_visit_get_random(self, type: str) -> Optional[Dict[str, Any]]:
         back_in_the_day = now() - datetime.timedelta(weeks=12)  # 3 months back
 
         # Random position to start iteration at
         start_token = random.randint(TOKEN_BEGIN, TOKEN_END)
 
         # Iterator over all visits, ordered by token(origins) then visit_id
         rows = self._cql_runner.origin_visit_iter(start_token)
         for row in rows:
             visit = self._format_origin_visit_row(row)
             visit_status = self._origin_visit_apply_last_status(visit)
             if (
                 visit_status["date"] > back_in_the_day
                 and visit_status["status"] == "full"
             ):
                 return visit_status
         else:
             return None
 
     def stat_counters(self):
         rows = self._cql_runner.stat_counters()
         keys = (
             "content",
             "directory",
             "origin",
             "origin_visit",
             "release",
             "revision",
             "skipped_content",
             "snapshot",
         )
         stats = {key: 0 for key in keys}
         stats.update({row.object_type: row.count for row in rows})
         return stats
 
     def refresh_stat_counters(self):
         pass
 
     def origin_metadata_add(
         self,
         origin_url: str,
         discovery_date: datetime.datetime,
         authority: Dict[str, Any],
         fetcher: Dict[str, Any],
         format: str,
         metadata: bytes,
     ) -> None:
         if not isinstance(origin_url, str):
             raise StorageArgumentException(
-                "origin_id must be str, not %r" % (origin_url,)
+                "origin_url must be str, not %r" % (origin_url,)
             )
+        self._object_metadata_add(
+            "origin", origin_url, discovery_date, authority, fetcher, format, metadata,
+        )
+
+    def origin_metadata_get(
+        self,
+        origin_url: str,
+        authority: Dict[str, str],
+        after: Optional[datetime.datetime] = None,
+        page_token: Optional[bytes] = None,
+        limit: int = 1000,
+    ) -> Dict[str, Any]:
+        if not isinstance(origin_url, str):
+            raise TypeError("origin_url must be str, not %r" % (origin_url,))
+
+        res = self._object_metadata_get(
+            "origin", origin_url, authority, after, page_token, limit
+        )
+        for result in res["results"]:
+            result["origin_url"] = result.pop("id")
+
+        return res
+
+    def _object_metadata_add(
+        self,
+        object_type: str,
+        id: str,
+        discovery_date: datetime.datetime,
+        authority: Dict[str, Any],
+        fetcher: Dict[str, Any],
+        format: str,
+        metadata: bytes,
+    ) -> None:
         if not self._cql_runner.metadata_authority_get(**authority):
             raise StorageArgumentException(f"Unknown authority {authority}")
         if not self._cql_runner.metadata_fetcher_get(**fetcher):
             raise StorageArgumentException(f"Unknown fetcher {fetcher}")
 
         try:
-            self._cql_runner.origin_metadata_add(
-                origin_url,
+            self._cql_runner.object_metadata_add(
+                object_type,
+                id,
                 authority["type"],
                 authority["url"],
                 discovery_date,
                 fetcher["name"],
                 fetcher["version"],
                 format,
                 metadata,
             )
         except TypeError as e:
             raise StorageArgumentException(*e.args)
 
-    def origin_metadata_get(
+    def _object_metadata_get(
         self,
-        origin_url: str,
+        object_type: str,
+        id: str,
         authority: Dict[str, str],
         after: Optional[datetime.datetime] = None,
         page_token: Optional[bytes] = None,
         limit: int = 1000,
     ) -> Dict[str, Any]:
-        if not isinstance(origin_url, str):
-            raise TypeError("origin_url must be str, not %r" % (origin_url,))
-
         if page_token is not None:
             (after_date, after_fetcher_name, after_fetcher_url) = msgpack_loads(
                 page_token
             )
             if after and after_date < after:
                 raise StorageArgumentException(
                     "page_token is inconsistent with the value of 'after'."
                 )
-            entries = self._cql_runner.origin_metadata_get_after_date_and_fetcher(
-                origin_url,
+            entries = self._cql_runner.object_metadata_get_after_date_and_fetcher(
+                id,
                 authority["type"],
                 authority["url"],
                 after_date,
                 after_fetcher_name,
                 after_fetcher_url,
             )
         elif after is not None:
-            entries = self._cql_runner.origin_metadata_get_after_date(
-                origin_url, authority["type"], authority["url"], after
+            entries = self._cql_runner.object_metadata_get_after_date(
+                id, authority["type"], authority["url"], after
             )
         else:
-            entries = self._cql_runner.origin_metadata_get(
-                origin_url, authority["type"], authority["url"]
+            entries = self._cql_runner.object_metadata_get(
+                id, authority["type"], authority["url"]
             )
 
         if limit:
             entries = itertools.islice(entries, 0, limit + 1)
 
         results = []
         for entry in entries:
             discovery_date = entry.discovery_date.replace(tzinfo=datetime.timezone.utc)
-            results.append(
-                {
-                    "origin_url": entry.origin,
-                    "authority": {
-                        "type": entry.authority_type,
-                        "url": entry.authority_url,
-                    },
-                    "fetcher": {
-                        "name": entry.fetcher_name,
-                        "version": entry.fetcher_version,
-                    },
-                    "discovery_date": discovery_date,
-                    "format": entry.format,
-                    "metadata": entry.metadata,
-                }
-            )
+
+            result = {
+                "id": entry.id,
+                "authority": {
+                    "type": entry.authority_type,
+                    "url": entry.authority_url,
+                },
+                "fetcher": {
+                    "name": entry.fetcher_name,
+                    "version": entry.fetcher_version,
+                },
+                "discovery_date": discovery_date,
+                "format": entry.format,
+                "metadata": entry.metadata,
+            }
+
+            results.append(result)
 
         if len(results) > limit:
             results.pop()
             assert len(results) == limit
             last_result = results[-1]
             next_page_token: Optional[bytes] = msgpack_dumps(
                 (
                     last_result["discovery_date"],
                     last_result["fetcher"]["name"],
                     last_result["fetcher"]["version"],
                 )
             )
         else:
             next_page_token = None
 
         return {
             "next_page_token": next_page_token,
             "results": results,
         }
 
     def metadata_fetcher_add(
         self, name: str, version: str, metadata: Dict[str, Any]
     ) -> None:
         self._cql_runner.metadata_fetcher_add(name, version, json.dumps(metadata))
 
     def metadata_fetcher_get(self, name: str, version: str) -> Optional[Dict[str, Any]]:
         fetcher = self._cql_runner.metadata_fetcher_get(name, version)
         if fetcher:
             return {
                 "name": fetcher.name,
                 "version": fetcher.version,
                 "metadata": json.loads(fetcher.metadata),
             }
         else:
             return None
 
     def metadata_authority_add(
         self, type: str, url: str, metadata: Dict[str, Any]
     ) -> None:
         self._cql_runner.metadata_authority_add(url, type, json.dumps(metadata))
 
     def metadata_authority_get(self, type: str, url: str) -> Optional[Dict[str, Any]]:
         authority = self._cql_runner.metadata_authority_get(type, url)
         if authority:
             return {
                 "type": authority.type,
                 "url": authority.url,
                 "metadata": json.loads(authority.metadata),
             }
         else:
             return None
 
     def clear_buffers(self, object_types: Optional[Iterable[str]] = None) -> None:
         """Do nothing
 
         """
         return None
 
     def flush(self, object_types: Optional[Iterable[str]] = None) -> Dict:
         return {}
diff --git a/swh/storage/db.py b/swh/storage/db.py
index 64e2cf52..b6c43c37 100644
--- a/swh/storage/db.py
+++ b/swh/storage/db.py
@@ -1,1266 +1,1283 @@
 # Copyright (C) 2015-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import datetime
 import random
 import select
 from typing import Any, Dict, Iterable, List, Optional, Tuple
 
 from swh.core.db import BaseDb
 from swh.core.db.db_utils import stored_procedure, jsonize
 from swh.core.db.db_utils import execute_values_generator
 from swh.model.model import OriginVisit, OriginVisitStatus, SHA1_SIZE
 
 
 class Db(BaseDb):
     """Proxy to the SWH DB, with wrappers around stored procedures
 
     """
 
     def mktemp_dir_entry(self, entry_type, cur=None):
         self._cursor(cur).execute(
             "SELECT swh_mktemp_dir_entry(%s)", (("directory_entry_%s" % entry_type),)
         )
 
     @stored_procedure("swh_mktemp_revision")
     def mktemp_revision(self, cur=None):
         pass
 
     @stored_procedure("swh_mktemp_release")
     def mktemp_release(self, cur=None):
         pass
 
     @stored_procedure("swh_mktemp_snapshot_branch")
     def mktemp_snapshot_branch(self, cur=None):
         pass
 
     def register_listener(self, notify_queue, cur=None):
         """Register a listener for NOTIFY queue `notify_queue`"""
         self._cursor(cur).execute("LISTEN %s" % notify_queue)
 
     def listen_notifies(self, timeout):
         """Listen to notifications for `timeout` seconds"""
         if select.select([self.conn], [], [], timeout) == ([], [], []):
             return
         else:
             self.conn.poll()
             while self.conn.notifies:
                 yield self.conn.notifies.pop(0)
 
     @stored_procedure("swh_content_add")
     def content_add_from_temp(self, cur=None):
         pass
 
     @stored_procedure("swh_directory_add")
     def directory_add_from_temp(self, cur=None):
         pass
 
     @stored_procedure("swh_skipped_content_add")
     def skipped_content_add_from_temp(self, cur=None):
         pass
 
     @stored_procedure("swh_revision_add")
     def revision_add_from_temp(self, cur=None):
         pass
 
     @stored_procedure("swh_release_add")
     def release_add_from_temp(self, cur=None):
         pass
 
     def content_update_from_temp(self, keys_to_update, cur=None):
         cur = self._cursor(cur)
         cur.execute(
             """select swh_content_update(ARRAY[%s] :: text[])""" % keys_to_update
         )
 
     content_get_metadata_keys = [
         "sha1",
         "sha1_git",
         "sha256",
         "blake2s256",
         "length",
         "status",
     ]
 
     content_add_keys = content_get_metadata_keys + ["ctime"]
 
     skipped_content_keys = [
         "sha1",
         "sha1_git",
         "sha256",
         "blake2s256",
         "length",
         "reason",
         "status",
         "origin",
     ]
 
     def content_get_metadata_from_sha1s(self, sha1s, cur=None):
         cur = self._cursor(cur)
         yield from execute_values_generator(
             cur,
             """
             select t.sha1, %s from (values %%s) as t (sha1)
             inner join content using (sha1)
             """
             % ", ".join(self.content_get_metadata_keys[1:]),
             ((sha1,) for sha1 in sha1s),
         )
 
     def content_get_range(self, start, end, limit=None, cur=None):
         """Retrieve contents within range [start, end].
 
         """
         cur = self._cursor(cur)
         query = """select %s from content
                    where %%s <= sha1 and sha1 <= %%s
                    order by sha1
                    limit %%s""" % ", ".join(
             self.content_get_metadata_keys
         )
         cur.execute(query, (start, end, limit))
         yield from cur
 
     content_hash_keys = ["sha1", "sha1_git", "sha256", "blake2s256"]
 
     def content_missing_from_list(self, contents, cur=None):
         cur = self._cursor(cur)
 
         keys = ", ".join(self.content_hash_keys)
         equality = " AND ".join(
             ("t.%s = c.%s" % (key, key)) for key in self.content_hash_keys
         )
 
         yield from execute_values_generator(
             cur,
             """
             SELECT %s
             FROM (VALUES %%s) as t(%s)
             WHERE NOT EXISTS (
                 SELECT 1 FROM content c
                 WHERE %s
             )
             """
             % (keys, keys, equality),
             (tuple(c[key] for key in self.content_hash_keys) for c in contents),
         )
 
     def content_missing_per_sha1(self, sha1s, cur=None):
         cur = self._cursor(cur)
 
         yield from execute_values_generator(
             cur,
             """
         SELECT t.sha1 FROM (VALUES %s) AS t(sha1)
         WHERE NOT EXISTS (
             SELECT 1 FROM content c WHERE c.sha1 = t.sha1
         )""",
             ((sha1,) for sha1 in sha1s),
         )
 
     def content_missing_per_sha1_git(self, contents, cur=None):
         cur = self._cursor(cur)
 
         yield from execute_values_generator(
             cur,
             """
         SELECT t.sha1_git FROM (VALUES %s) AS t(sha1_git)
         WHERE NOT EXISTS (
             SELECT 1 FROM content c WHERE c.sha1_git = t.sha1_git
         )""",
             ((sha1,) for sha1 in contents),
         )
 
     def skipped_content_missing(self, contents, cur=None):
         if not contents:
             return []
         cur = self._cursor(cur)
 
         query = """SELECT * FROM (VALUES %s) AS t (%s)
                    WHERE not exists
                    (SELECT 1 FROM skipped_content s WHERE
                        s.sha1 is not distinct from t.sha1::sha1 and
                        s.sha1_git is not distinct from t.sha1_git::sha1 and
                        s.sha256 is not distinct from t.sha256::bytea);""" % (
             (", ".join("%s" for _ in contents)),
             ", ".join(self.content_hash_keys),
         )
         cur.execute(
             query,
             [tuple(cont[key] for key in self.content_hash_keys) for cont in contents],
         )
 
         yield from cur
 
     def snapshot_exists(self, snapshot_id, cur=None):
         """Check whether a snapshot with the given id exists"""
         cur = self._cursor(cur)
 
         cur.execute("""SELECT 1 FROM snapshot where id=%s""", (snapshot_id,))
 
         return bool(cur.fetchone())
 
     def snapshot_missing_from_list(self, snapshots, cur=None):
         cur = self._cursor(cur)
         yield from execute_values_generator(
             cur,
             """
             SELECT id FROM (VALUES %s) as t(id)
             WHERE NOT EXISTS (
                 SELECT 1 FROM snapshot d WHERE d.id = t.id
             )
                 """,
             ((id,) for id in snapshots),
         )
 
     def snapshot_add(self, snapshot_id, cur=None):
         """Add a snapshot from the temporary table"""
         cur = self._cursor(cur)
 
         cur.execute("""SELECT swh_snapshot_add(%s)""", (snapshot_id,))
 
     snapshot_count_cols = ["target_type", "count"]
 
     def snapshot_count_branches(self, snapshot_id, cur=None):
         cur = self._cursor(cur)
         query = """\
            SELECT %s FROM swh_snapshot_count_branches(%%s)
         """ % ", ".join(
             self.snapshot_count_cols
         )
 
         cur.execute(query, (snapshot_id,))
 
         yield from cur
 
     snapshot_get_cols = ["snapshot_id", "name", "target", "target_type"]
 
     def snapshot_get_by_id(
         self,
         snapshot_id,
         branches_from=b"",
         branches_count=None,
         target_types=None,
         cur=None,
     ):
         cur = self._cursor(cur)
         query = """\
            SELECT %s
            FROM swh_snapshot_get_by_id(%%s, %%s, %%s, %%s :: snapshot_target[])
         """ % ", ".join(
             self.snapshot_get_cols
         )
 
         cur.execute(query, (snapshot_id, branches_from, branches_count, target_types))
 
         yield from cur
 
     def snapshot_get_by_origin_visit(self, origin_url, visit_id, cur=None):
         cur = self._cursor(cur)
         query = """\
            SELECT ovs.snapshot
            FROM origin_visit ov
            INNER JOIN origin o ON o.id = ov.origin
            INNER JOIN origin_visit_status ovs
              ON ov.origin = ovs.origin AND ov.visit = ovs.visit
            WHERE o.url=%s AND ov.visit=%s
            ORDER BY ovs.date DESC LIMIT 1
         """
 
         cur.execute(query, (origin_url, visit_id))
         ret = cur.fetchone()
         if ret:
             return ret[0]
 
     def snapshot_get_random(self, cur=None):
         return self._get_random_row_from_table("snapshot", ["id"], "id", cur)
 
     content_find_cols = [
         "sha1",
         "sha1_git",
         "sha256",
         "blake2s256",
         "length",
         "ctime",
         "status",
     ]
 
     def content_find(
         self, sha1=None, sha1_git=None, sha256=None, blake2s256=None, cur=None
     ):
         """Find the content optionally on a combination of the following
         checksums sha1, sha1_git, sha256 or blake2s256.
 
         Args:
             sha1: sha1 content
             git_sha1: the sha1 computed `a la git` sha1 of the content
             sha256: sha256 content
             blake2s256: blake2s256 content
 
         Returns:
             The tuple (sha1, sha1_git, sha256, blake2s256) if found or None.
 
         """
         cur = self._cursor(cur)
 
         checksum_dict = {
             "sha1": sha1,
             "sha1_git": sha1_git,
             "sha256": sha256,
             "blake2s256": blake2s256,
         }
         where_parts = []
         args = []
         # Adds only those keys which have value other than None
         for algorithm in checksum_dict:
             if checksum_dict[algorithm] is not None:
                 args.append(checksum_dict[algorithm])
                 where_parts.append(algorithm + "= %s")
         query = " AND ".join(where_parts)
         cur.execute(
             """SELECT %s
                        FROM content WHERE %s
                        """
             % (",".join(self.content_find_cols), query),
             args,
         )
         content = cur.fetchall()
         return content
 
     def content_get_random(self, cur=None):
         return self._get_random_row_from_table("content", ["sha1_git"], "sha1_git", cur)
 
     def directory_missing_from_list(self, directories, cur=None):
         cur = self._cursor(cur)
         yield from execute_values_generator(
             cur,
             """
             SELECT id FROM (VALUES %s) as t(id)
             WHERE NOT EXISTS (
                 SELECT 1 FROM directory d WHERE d.id = t.id
             )
             """,
             ((id,) for id in directories),
         )
 
     directory_ls_cols = [
         "dir_id",
         "type",
         "target",
         "name",
         "perms",
         "status",
         "sha1",
         "sha1_git",
         "sha256",
         "length",
     ]
 
     def directory_walk_one(self, directory, cur=None):
         cur = self._cursor(cur)
         cols = ", ".join(self.directory_ls_cols)
         query = "SELECT %s FROM swh_directory_walk_one(%%s)" % cols
         cur.execute(query, (directory,))
         yield from cur
 
     def directory_walk(self, directory, cur=None):
         cur = self._cursor(cur)
         cols = ", ".join(self.directory_ls_cols)
         query = "SELECT %s FROM swh_directory_walk(%%s)" % cols
         cur.execute(query, (directory,))
         yield from cur
 
     def directory_entry_get_by_path(self, directory, paths, cur=None):
         """Retrieve a directory entry by path.
 
         """
         cur = self._cursor(cur)
 
         cols = ", ".join(self.directory_ls_cols)
         query = "SELECT %s FROM swh_find_directory_entry_by_path(%%s, %%s)" % cols
         cur.execute(query, (directory, paths))
 
         data = cur.fetchone()
         if set(data) == {None}:
             return None
         return data
 
     def directory_get_random(self, cur=None):
         return self._get_random_row_from_table("directory", ["id"], "id", cur)
 
     def revision_missing_from_list(self, revisions, cur=None):
         cur = self._cursor(cur)
 
         yield from execute_values_generator(
             cur,
             """
             SELECT id FROM (VALUES %s) as t(id)
             WHERE NOT EXISTS (
                 SELECT 1 FROM revision r WHERE r.id = t.id
             )
             """,
             ((id,) for id in revisions),
         )
 
     revision_add_cols = [
         "id",
         "date",
         "date_offset",
         "date_neg_utc_offset",
         "committer_date",
         "committer_date_offset",
         "committer_date_neg_utc_offset",
         "type",
         "directory",
         "message",
         "author_fullname",
         "author_name",
         "author_email",
         "committer_fullname",
         "committer_name",
         "committer_email",
         "metadata",
         "synthetic",
     ]
 
     revision_get_cols = revision_add_cols + ["parents"]
 
     def origin_visit_add(self, origin, ts, type, cur=None):
         """Add a new origin_visit for origin origin at timestamp ts.
 
         Args:
             origin: origin concerned by the visit
             ts: the date of the visit
             type: type of loader for the visit
 
         Returns:
             The new visit index step for that origin
 
         """
         cur = self._cursor(cur)
         self._cursor(cur).execute(
             "SELECT swh_origin_visit_add(%s, %s, %s)", (origin, ts, type)
         )
         return cur.fetchone()[0]
 
     origin_visit_status_cols = [
         "origin",
         "visit",
         "date",
         "status",
         "snapshot",
         "metadata",
     ]
 
     def origin_visit_status_add(
         self, visit_status: OriginVisitStatus, cur=None
     ) -> None:
         """Add new origin visit status
 
         """
         assert self.origin_visit_status_cols[0] == "origin"
         assert self.origin_visit_status_cols[-1] == "metadata"
         cols = self.origin_visit_status_cols[1:-1]
         cur = self._cursor(cur)
         cur.execute(
             f"WITH origin_id as (select id from origin where url=%s) "
             f"INSERT INTO origin_visit_status "
             f"(origin, {', '.join(cols)}, metadata) "
             f"VALUES ((select id from origin_id), "
             f"{', '.join(['%s']*len(cols))}, %s) "
             f"ON CONFLICT (origin, visit, date) do nothing",
             [visit_status.origin]
             + [getattr(visit_status, key) for key in cols]
             + [jsonize(visit_status.metadata)],
         )
 
     origin_visit_upsert_cols = [
         "origin",
         "visit",
         "date",
         "type",
     ]
 
     def origin_visit_upsert(self, origin_visit: OriginVisit, cur=None) -> None:
         # doing an extra query like this is way simpler than trying to join
         # the origin id in the query below
         ov = origin_visit
         origin_id = next(self.origin_id_get_by_url([ov.origin]))
 
         cur = self._cursor(cur)
         query = """INSERT INTO origin_visit ({cols}) VALUES ({values})
                    ON CONFLICT ON CONSTRAINT origin_visit_pkey DO
                    UPDATE SET {updates}""".format(
             cols=", ".join(self.origin_visit_upsert_cols),
             values=", ".join("%s" for col in self.origin_visit_upsert_cols),
             updates=", ".join(
                 "{0}=excluded.{0}".format(col) for col in self.origin_visit_upsert_cols
             ),
         )
         cur.execute(
             query, (origin_id, ov.visit, ov.date, ov.type),
         )
 
     origin_visit_get_cols = [
         "origin",
         "visit",
         "date",
         "type",
         "status",
         "metadata",
         "snapshot",
     ]
     origin_visit_select_cols = [
         "o.url AS origin",
         "ov.visit",
         "ov.date",
         "ov.type AS type",
         "ovs.status",
         "ovs.metadata",
         "ovs.snapshot",
     ]
 
     origin_visit_status_select_cols = [
         "o.url AS origin",
         "ovs.visit",
         "ovs.date",
         "ovs.status",
         "ovs.snapshot",
         "ovs.metadata",
     ]
 
     def _make_origin_visit_status(
         self, row: Optional[Tuple[Any]]
     ) -> Optional[Dict[str, Any]]:
         """Make an origin_visit_status dict out of a row
 
         """
         if not row:
             return None
         return dict(zip(self.origin_visit_status_cols, row))
 
     def origin_visit_status_get_latest(
         self,
         origin_url: str,
         visit: int,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
         cur=None,
     ) -> Optional[Dict[str, Any]]:
         """Given an origin visit id, return its latest origin_visit_status
 
         """
         cur = self._cursor(cur)
 
         query_parts = [
             "SELECT %s" % ", ".join(self.origin_visit_status_select_cols),
             "FROM origin_visit_status ovs ",
             "INNER JOIN origin o ON o.id = ovs.origin",
         ]
         query_parts.append("WHERE o.url = %s")
         query_params: List[Any] = [origin_url]
         query_parts.append("AND ovs.visit = %s")
         query_params.append(visit)
 
         if require_snapshot:
             query_parts.append("AND ovs.snapshot is not null")
 
         if allowed_statuses:
             query_parts.append("AND ovs.status IN %s")
             query_params.append(tuple(allowed_statuses))
 
         query_parts.append("ORDER BY ovs.date DESC LIMIT 1")
         query = "\n".join(query_parts)
 
         cur.execute(query, tuple(query_params))
         row = cur.fetchone()
         return self._make_origin_visit_status(row)
 
     def origin_visit_get_all(
         self, origin_id, last_visit=None, order="asc", limit=None, cur=None
     ):
         """Retrieve all visits for origin with id origin_id.
 
         Args:
             origin_id: The occurrence's origin
 
         Yields:
             The visits for that origin
 
         """
         cur = self._cursor(cur)
         assert order.lower() in ["asc", "desc"]
 
         query_parts = [
             "SELECT DISTINCT ON (ov.visit) %s "
             % ", ".join(self.origin_visit_select_cols),
             "FROM origin_visit ov",
             "INNER JOIN origin o ON o.id = ov.origin",
             "INNER JOIN origin_visit_status ovs",
             "ON ov.origin = ovs.origin AND ov.visit = ovs.visit",
         ]
         query_parts.append("WHERE o.url = %s")
         query_params: List[Any] = [origin_id]
 
         if last_visit is not None:
             op_comparison = ">" if order == "asc" else "<"
             query_parts.append(f"and ov.visit {op_comparison} %s")
             query_params.append(last_visit)
 
         if order == "asc":
             query_parts.append("ORDER BY ov.visit ASC, ovs.date DESC")
         elif order == "desc":
             query_parts.append("ORDER BY ov.visit DESC, ovs.date DESC")
         else:
             assert False
 
         if limit is not None:
             query_parts.append("LIMIT %s")
             query_params.append(limit)
 
         query = "\n".join(query_parts)
         cur.execute(query, tuple(query_params))
         yield from cur
 
     def origin_visit_get(self, origin_id, visit_id, cur=None):
         """Retrieve information on visit visit_id of origin origin_id.
 
         Args:
             origin_id: the origin concerned
             visit_id: The visit step for that origin
 
         Returns:
             The origin_visit information
 
         """
         cur = self._cursor(cur)
 
         query = """\
             SELECT %s
             FROM origin_visit ov
             INNER JOIN origin o ON o.id = ov.origin
             INNER JOIN origin_visit_status ovs
             ON ov.origin = ovs.origin AND ov.visit = ovs.visit
             WHERE o.url = %%s AND ov.visit = %%s
             ORDER BY ovs.date DESC
             LIMIT 1
             """ % (
             ", ".join(self.origin_visit_select_cols)
         )
 
         cur.execute(query, (origin_id, visit_id))
         r = cur.fetchall()
         if not r:
             return None
         return r[0]
 
     def origin_visit_find_by_date(self, origin, visit_date, cur=None):
         cur = self._cursor(cur)
         cur.execute(
             "SELECT * FROM swh_visit_find_by_date(%s, %s)", (origin, visit_date)
         )
         rows = cur.fetchall()
         if rows:
             visit = dict(zip(self.origin_visit_get_cols, rows[0]))
             visit["origin"] = origin
             return visit
 
     def origin_visit_exists(self, origin_id, visit_id, cur=None):
         """Check whether an origin visit with the given ids exists"""
         cur = self._cursor(cur)
 
         query = "SELECT 1 FROM origin_visit where origin = %s AND visit = %s"
 
         cur.execute(query, (origin_id, visit_id))
 
         return bool(cur.fetchone())
 
     def origin_visit_get_latest(
         self,
         origin_id: str,
         type: Optional[str],
         allowed_statuses: Optional[Iterable[str]],
         require_snapshot: bool,
         cur=None,
     ):
         """Retrieve the most recent origin_visit of the given origin,
         with optional filters.
 
         Args:
             origin_id: the origin concerned
             type: Optional visit type to filter on
             allowed_statuses: the visit statuses allowed for the returned visit
             require_snapshot (bool): If True, only a visit with a known
                 snapshot will be returned.
 
         Returns:
             The origin_visit information, or None if no visit matches.
         """
         cur = self._cursor(cur)
 
         query_parts = [
             "SELECT %s" % ", ".join(self.origin_visit_select_cols),
             "FROM origin_visit ov ",
             "INNER JOIN origin o ON o.id = ov.origin",
             "INNER JOIN origin_visit_status ovs ",
             "ON o.id = ovs.origin AND ov.visit = ovs.visit ",
         ]
         query_parts.append("WHERE o.url = %s")
         query_params: List[Any] = [origin_id]
 
         if type is not None:
             query_parts.append("AND ov.type = %s")
             query_params.append(type)
 
         if require_snapshot:
             query_parts.append("AND ovs.snapshot is not null")
 
         if allowed_statuses:
             query_parts.append("AND ovs.status IN %s")
             query_params.append(tuple(allowed_statuses))
 
         query_parts.append(
             "ORDER BY ov.date DESC, ov.visit DESC, ovs.date DESC LIMIT 1"
         )
 
         query = "\n".join(query_parts)
 
         cur.execute(query, tuple(query_params))
         r = cur.fetchone()
         if not r:
             return None
         return r
 
     def origin_visit_get_random(self, type, cur=None):
         """Randomly select one origin visit that was full and in the last 3
            months
 
         """
         cur = self._cursor(cur)
         columns = ",".join(self.origin_visit_select_cols)
         query = f"""select {columns}
                     from origin_visit ov
                     inner join origin o on ov.origin=o.id
                     inner join origin_visit_status ovs
                       on ov.origin = ovs.origin and ov.visit = ovs.visit
                     where ovs.status='full'
                       and ov.type=%s
                       and ov.date > now() - '3 months'::interval
                       and random() < 0.1
                     limit 1
                  """
         cur.execute(query, (type,))
         return cur.fetchone()
 
     @staticmethod
     def mangle_query_key(key, main_table):
         if key == "id":
             return "t.id"
         if key == "parents":
             return """
             ARRAY(
             SELECT rh.parent_id::bytea
             FROM revision_history rh
             WHERE rh.id = t.id
             ORDER BY rh.parent_rank
             )"""
         if "_" not in key:
             return "%s.%s" % (main_table, key)
 
         head, tail = key.split("_", 1)
         if head in ("author", "committer") and tail in (
             "name",
             "email",
             "id",
             "fullname",
         ):
             return "%s.%s" % (head, tail)
 
         return "%s.%s" % (main_table, key)
 
     def revision_get_from_list(self, revisions, cur=None):
         cur = self._cursor(cur)
 
         query_keys = ", ".join(
             self.mangle_query_key(k, "revision") for k in self.revision_get_cols
         )
 
         yield from execute_values_generator(
             cur,
             """
             SELECT %s FROM (VALUES %%s) as t(sortkey, id)
             LEFT JOIN revision ON t.id = revision.id
             LEFT JOIN person author ON revision.author = author.id
             LEFT JOIN person committer ON revision.committer = committer.id
             ORDER BY sortkey
             """
             % query_keys,
             ((sortkey, id) for sortkey, id in enumerate(revisions)),
         )
 
     def revision_log(self, root_revisions, limit=None, cur=None):
         cur = self._cursor(cur)
 
         query = """SELECT %s
                    FROM swh_revision_log(%%s, %%s)
                 """ % ", ".join(
             self.revision_get_cols
         )
 
         cur.execute(query, (root_revisions, limit))
         yield from cur
 
     revision_shortlog_cols = ["id", "parents"]
 
     def revision_shortlog(self, root_revisions, limit=None, cur=None):
         cur = self._cursor(cur)
 
         query = """SELECT %s
                    FROM swh_revision_list(%%s, %%s)
                 """ % ", ".join(
             self.revision_shortlog_cols
         )
 
         cur.execute(query, (root_revisions, limit))
         yield from cur
 
     def revision_get_random(self, cur=None):
         return self._get_random_row_from_table("revision", ["id"], "id", cur)
 
     def release_missing_from_list(self, releases, cur=None):
         cur = self._cursor(cur)
         yield from execute_values_generator(
             cur,
             """
             SELECT id FROM (VALUES %s) as t(id)
             WHERE NOT EXISTS (
                 SELECT 1 FROM release r WHERE r.id = t.id
             )
             """,
             ((id,) for id in releases),
         )
 
     object_find_by_sha1_git_cols = ["sha1_git", "type"]
 
     def object_find_by_sha1_git(self, ids, cur=None):
         cur = self._cursor(cur)
 
         yield from execute_values_generator(
             cur,
             """
             WITH t (sha1_git) AS (VALUES %s),
             known_objects as ((
                 select
                   id as sha1_git,
                   'release'::object_type as type,
                   object_id
                 from release r
                 where exists (select 1 from t where t.sha1_git = r.id)
             ) union all (
                 select
                   id as sha1_git,
                   'revision'::object_type as type,
                   object_id
                 from revision r
                 where exists (select 1 from t where t.sha1_git = r.id)
             ) union all (
                 select
                   id as sha1_git,
                   'directory'::object_type as type,
                   object_id
                 from directory d
                 where exists (select 1 from t where t.sha1_git = d.id)
             ) union all (
                 select
                   sha1_git as sha1_git,
                   'content'::object_type as type,
                   object_id
                 from content c
                 where exists (select 1 from t where t.sha1_git = c.sha1_git)
             ))
             select t.sha1_git as sha1_git, k.type
             from t
             left join known_objects k on t.sha1_git = k.sha1_git
             """,
             ((id,) for id in ids),
         )
 
     def stat_counters(self, cur=None):
         cur = self._cursor(cur)
         cur.execute("SELECT * FROM swh_stat_counters()")
         yield from cur
 
     def origin_add(self, url, cur=None):
         """Insert a new origin and return the new identifier."""
         insert = """INSERT INTO origin (url) values (%s)
                     RETURNING url"""
 
         cur.execute(insert, (url,))
         return cur.fetchone()[0]
 
     origin_cols = ["url"]
 
     def origin_get_by_url(self, origins, cur=None):
         """Retrieve origin `(type, url)` from urls if found."""
         cur = self._cursor(cur)
 
         query = """SELECT %s FROM (VALUES %%s) as t(url)
                    LEFT JOIN origin ON t.url = origin.url
                 """ % ",".join(
             "origin." + col for col in self.origin_cols
         )
 
         yield from execute_values_generator(cur, query, ((url,) for url in origins))
 
     def origin_get_by_sha1(self, sha1s, cur=None):
         """Retrieve origin urls from sha1s if found."""
         cur = self._cursor(cur)
 
         query = """SELECT %s FROM (VALUES %%s) as t(sha1)
                    LEFT JOIN origin ON t.sha1 = digest(origin.url, 'sha1')
                 """ % ",".join(
             "origin." + col for col in self.origin_cols
         )
 
         yield from execute_values_generator(cur, query, ((sha1,) for sha1 in sha1s))
 
     def origin_id_get_by_url(self, origins, cur=None):
         """Retrieve origin `(type, url)` from urls if found."""
         cur = self._cursor(cur)
 
         query = """SELECT id FROM (VALUES %s) as t(url)
                    LEFT JOIN origin ON t.url = origin.url
                 """
 
         for row in execute_values_generator(cur, query, ((url,) for url in origins)):
             yield row[0]
 
     origin_get_range_cols = ["id", "url"]
 
     def origin_get_range(self, origin_from=1, origin_count=100, cur=None):
         """Retrieve ``origin_count`` origins whose ids are greater
         or equal than ``origin_from``.
 
         Origins are sorted by id before retrieving them.
 
         Args:
             origin_from (int): the minimum id of origins to retrieve
             origin_count (int): the maximum number of origins to retrieve
         """
         cur = self._cursor(cur)
 
         query = """SELECT %s
                    FROM origin WHERE id >= %%s
                    ORDER BY id LIMIT %%s
                 """ % ",".join(
             self.origin_get_range_cols
         )
 
         cur.execute(query, (origin_from, origin_count))
         yield from cur
 
     def _origin_query(
         self,
         url_pattern,
         count=False,
         offset=0,
         limit=50,
         regexp=False,
         with_visit=False,
         cur=None,
     ):
         """
         Method factorizing query creation for searching and counting origins.
         """
         cur = self._cursor(cur)
 
         if count:
             origin_cols = "COUNT(*)"
         else:
             origin_cols = ",".join(self.origin_cols)
 
         query = """SELECT %s
                    FROM origin o
                    WHERE """
         if with_visit:
             query += """
                    EXISTS (
                      SELECT 1
                      FROM origin_visit ov
                      INNER JOIN origin_visit_status ovs
                        ON ov.origin = ovs.origin AND ov.visit = ovs.visit
                      INNER JOIN snapshot ON ovs.snapshot=snapshot.id
                      WHERE ov.origin=o.id
                      )
                    AND """
         query += "url %s %%s "
         if not count:
             query += "ORDER BY id OFFSET %%s LIMIT %%s"
 
         if not regexp:
             query = query % (origin_cols, "ILIKE")
             query_params = ("%" + url_pattern + "%", offset, limit)
         else:
             query = query % (origin_cols, "~*")
             query_params = (url_pattern, offset, limit)
 
         if count:
             query_params = (query_params[0],)
 
         cur.execute(query, query_params)
 
     def origin_search(
         self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False, cur=None
     ):
         """Search for origins whose urls contain a provided string pattern
         or match a provided regular expression.
         The search is performed in a case insensitive way.
 
         Args:
             url_pattern (str): the string pattern to search for in origin urls
             offset (int): number of found origins to skip before returning
                 results
             limit (int): the maximum number of found origins to return
             regexp (bool): if True, consider the provided pattern as a regular
                 expression and returns origins whose urls match it
             with_visit (bool): if True, filter out origins with no visit
 
         """
         self._origin_query(
             url_pattern,
             offset=offset,
             limit=limit,
             regexp=regexp,
             with_visit=with_visit,
             cur=cur,
         )
         yield from cur
 
     def origin_count(self, url_pattern, regexp=False, with_visit=False, cur=None):
         """Count origins whose urls contain a provided string pattern
         or match a provided regular expression.
         The pattern search in origin urls is performed in a case insensitive
         way.
 
         Args:
             url_pattern (str): the string pattern to search for in origin urls
             regexp (bool): if True, consider the provided pattern as a regular
                 expression and returns origins whose urls match it
             with_visit (bool): if True, filter out origins with no visit
         """
         self._origin_query(
             url_pattern, count=True, regexp=regexp, with_visit=with_visit, cur=cur
         )
         return cur.fetchone()[0]
 
     release_add_cols = [
         "id",
         "target",
         "target_type",
         "date",
         "date_offset",
         "date_neg_utc_offset",
         "name",
         "comment",
         "synthetic",
         "author_fullname",
         "author_name",
         "author_email",
     ]
     release_get_cols = release_add_cols
 
     def release_get_from_list(self, releases, cur=None):
         cur = self._cursor(cur)
         query_keys = ", ".join(
             self.mangle_query_key(k, "release") for k in self.release_get_cols
         )
 
         yield from execute_values_generator(
             cur,
             """
             SELECT %s FROM (VALUES %%s) as t(sortkey, id)
             LEFT JOIN release ON t.id = release.id
             LEFT JOIN person author ON release.author = author.id
             ORDER BY sortkey
             """
             % query_keys,
             ((sortkey, id) for sortkey, id in enumerate(releases)),
         )
 
     def release_get_random(self, cur=None):
         return self._get_random_row_from_table("release", ["id"], "id", cur)
 
-    origin_metadata_get_cols = [
-        "origin.url",
+    _object_metadata_insert_cols = [
+        "type",
+        "id",
+        "authority_id",
+        "fetcher_id",
+        "discovery_date",
+        "format",
+        "metadata",
+    ]
+    """List of columns of the object_metadata table, used when writing
+    metadata."""
+
+    _object_metadata_insert_query = f"""
+        INSERT INTO object_metadata
+            ({', '.join(_object_metadata_insert_cols)})
+        VALUES ({', '.join('%s' for _ in _object_metadata_insert_cols)})
+        ON CONFLICT (id, authority_id, discovery_date, fetcher_id)
+        DO UPDATE SET
+            format=EXCLUDED.format,
+            metadata=EXCLUDED.metadata
+    """
+
+    object_metadata_get_cols = [
+        "id",
         "discovery_date",
         "metadata_authority.type",
         "metadata_authority.url",
         "metadata_fetcher.id",
         "metadata_fetcher.name",
         "metadata_fetcher.version",
         "format",
         "metadata",
     ]
+    """List of columns of the object_metadata, metadata_authority,
+    and metadata_fetcher tables, used when reading object metadata."""
+
+    _object_metadata_select_query = f"""
+        SELECT
+            object_metadata.id AS id,
+            {', '.join(object_metadata_get_cols[1:-1])},
+            object_metadata.metadata AS metadata
+        FROM object_metadata
+        INNER JOIN metadata_authority
+            ON (metadata_authority.id=authority_id)
+        INNER JOIN metadata_fetcher ON (metadata_fetcher.id=fetcher_id)
+        WHERE object_metadata.id=%s AND authority_id=%s
+    """
 
-    def origin_metadata_add(
+    def object_metadata_add(
         self,
-        origin: str,
+        object_type: str,
+        id: str,
         discovery_date: datetime.datetime,
-        authority: int,
-        fetcher: int,
+        authority_id: int,
+        fetcher_id: int,
         format: str,
         metadata: bytes,
-        cur=None,
-    ) -> None:
-        """ Add an origin_metadata for the origin at ts with provider, tool and
-        metadata.
-
-        Args:
-            origin: the origin's id for which the metadata is added
-            discovery_date: time when the metadata was found
-            authority: the metadata provider identifier
-            fetcher: the tool's identifier used to extract metadata
-            format: the format of the metadata
-            metadata: the metadata retrieved at the time and location
-        """
-        cur = self._cursor(cur)
-        insert = """INSERT INTO origin_metadata (origin_id, discovery_date,
-                    authority_id, fetcher_id, format, metadata)
-                    SELECT id, %s, %s, %s, %s, %s FROM origin WHERE url = %s
-                    ON CONFLICT (origin_id, authority_id, discovery_date, fetcher_id)
-                    DO UPDATE SET
-                        format=EXCLUDED.format,
-                        metadata=EXCLUDED.metadata
-                 """
-        cur.execute(
-            insert, (discovery_date, authority, fetcher, format, metadata, origin),
+        cur,
+    ):
+        query = self._object_metadata_insert_query
+        args: Dict[str, Any] = dict(
+            type=object_type,
+            id=id,
+            authority_id=authority_id,
+            fetcher_id=fetcher_id,
+            discovery_date=discovery_date,
+            format=format,
+            metadata=metadata,
         )
+        params = [args[col] for col in self._object_metadata_insert_cols]
 
-    def origin_metadata_get(
+        cur.execute(query, params)
+
+    def object_metadata_get(
         self,
-        origin_url: str,
-        authority: int,
+        object_type: str,
+        id: str,
+        authority_id: int,
         after_time: Optional[datetime.datetime],
         after_fetcher: Optional[int],
-        limit: Optional[int],
-        cur=None,
+        limit: int,
+        cur,
     ):
-        cur = self._cursor(cur)
-        assert self.origin_metadata_get_cols[-1] == "metadata"
-        query_parts = [
-            f"SELECT {', '.join(self.origin_metadata_get_cols[0:-1])}, "
-            f"  origin_metadata.metadata AS metadata "
-            f"FROM origin_metadata "
-            f"INNER JOIN metadata_authority "
-            f"  ON (metadata_authority.id=authority_id) "
-            f"INNER JOIN metadata_fetcher ON (metadata_fetcher.id=fetcher_id) "
-            f"INNER JOIN origin ON (origin.id=origin_metadata.origin_id) "
-            f"WHERE origin.url=%s AND authority_id=%s "
-        ]
-        args = [origin_url, authority]
+        query_parts = [self._object_metadata_select_query]
+        args = [id, authority_id]
 
         if after_fetcher is not None:
             assert after_time
             query_parts.append("AND (discovery_date, fetcher_id) > (%s, %s)")
             args.extend([after_time, after_fetcher])
         elif after_time is not None:
             query_parts.append("AND discovery_date > %s")
             args.append(after_time)
 
         query_parts.append("ORDER BY discovery_date, fetcher_id")
 
         if limit:
             query_parts.append("LIMIT %s")
             args.append(limit)
 
         cur.execute(" ".join(query_parts), args)
         yield from cur
 
     metadata_fetcher_cols = ["name", "version", "metadata"]
 
     def metadata_fetcher_add(
         self, name: str, version: str, metadata: bytes, cur=None
     ) -> None:
         cur = self._cursor(cur)
         cur.execute(
             "INSERT INTO metadata_fetcher (name, version, metadata) "
             "VALUES (%s, %s, %s) ON CONFLICT DO NOTHING",
             (name, version, jsonize(metadata)),
         )
 
     def metadata_fetcher_get(self, name: str, version: str, cur=None):
         cur = self._cursor(cur)
         cur.execute(
             f"SELECT {', '.join(self.metadata_fetcher_cols)} "
             f"FROM metadata_fetcher "
             f"WHERE name=%s AND version=%s",
             (name, version),
         )
         return cur.fetchone()
 
     def metadata_fetcher_get_id(
         self, name: str, version: str, cur=None
     ) -> Optional[int]:
         cur = self._cursor(cur)
         cur.execute(
             "SELECT id FROM metadata_fetcher WHERE name=%s AND version=%s",
             (name, version),
         )
         row = cur.fetchone()
         if row:
             return row[0]
         else:
             return None
 
     metadata_authority_cols = ["type", "url", "metadata"]
 
     def metadata_authority_add(
         self, type: str, url: str, metadata: bytes, cur=None
     ) -> None:
         cur = self._cursor(cur)
         cur.execute(
             "INSERT INTO metadata_authority (type, url, metadata) "
             "VALUES (%s, %s, %s) ON CONFLICT DO NOTHING",
             (type, url, jsonize(metadata)),
         )
 
     def metadata_authority_get(self, type: str, url: str, cur=None):
         cur = self._cursor(cur)
         cur.execute(
             f"SELECT {', '.join(self.metadata_authority_cols)} "
             f"FROM metadata_authority "
             f"WHERE type=%s AND url=%s",
             (type, url),
         )
         return cur.fetchone()
 
     def metadata_authority_get_id(self, type: str, url: str, cur=None) -> Optional[int]:
         cur = self._cursor(cur)
         cur.execute(
             "SELECT id FROM metadata_authority WHERE type=%s AND url=%s", (type, url)
         )
         row = cur.fetchone()
         if row:
             return row[0]
         else:
             return None
 
     def _get_random_row_from_table(self, table_name, cols, id_col, cur=None):
         random_sha1 = bytes(random.randint(0, 255) for _ in range(SHA1_SIZE))
         cur = self._cursor(cur)
         query = """
             (SELECT {cols} FROM {table} WHERE {id_col} >= %s
              ORDER BY {id_col} LIMIT 1)
             UNION
             (SELECT {cols} FROM {table} WHERE {id_col} < %s
              ORDER BY {id_col} DESC LIMIT 1)
             LIMIT 1
             """.format(
             cols=", ".join(cols), table=table_name, id_col=id_col
         )
         cur.execute(query, (random_sha1, random_sha1))
         row = cur.fetchone()
         if row:
             return row[0]
diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py
index ea1cb044..91a30f28 100644
--- a/swh/storage/in_memory.py
+++ b/swh/storage/in_memory.py
@@ -1,1212 +1,1254 @@
 # Copyright (C) 2015-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import re
 import bisect
 import collections
 import copy
 import datetime
 import itertools
 import random
 
 from collections import defaultdict
 from datetime import timedelta
 from typing import (
     Any,
     Callable,
     Dict,
     Generic,
     Hashable,
     Iterable,
     Iterator,
     List,
     Optional,
     Tuple,
     TypeVar,
 )
 
 import attr
 
 from deprecated import deprecated
 
 from swh.core.api.serializers import msgpack_loads, msgpack_dumps
 from swh.model.model import (
     BaseContent,
     Content,
     SkippedContent,
     Directory,
     Revision,
     Release,
     Snapshot,
     OriginVisit,
     OriginVisitStatus,
     Origin,
     SHA1_SIZE,
 )
 from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex
 from swh.storage.objstorage import ObjStorage
 from swh.storage.utils import now
 
 from .exc import StorageArgumentException, HashCollision
 
 from .converters import origin_url_to_sha1
 from .utils import get_partition_bounds_bytes
 from .writer import JournalWriter
 
 # Max block size of contents to return
 BULK_BLOCK_CONTENT_LEN_MAX = 10000
 
 
 SortedListItem = TypeVar("SortedListItem")
 SortedListKey = TypeVar("SortedListKey")
 
 FetcherKey = Tuple[str, str]
 
 
 class SortedList(collections.UserList, Generic[SortedListKey, SortedListItem]):
     data: List[Tuple[SortedListKey, SortedListItem]]
 
     # https://github.com/python/mypy/issues/708
     # key: Callable[[SortedListItem], SortedListKey]
 
     def __init__(
         self,
         data: List[SortedListItem] = None,
         key: Optional[Callable[[SortedListItem], SortedListKey]] = None,
     ):
         if key is None:
 
             def key(item):
                 return item
 
         assert key is not None  # for mypy
         super().__init__(sorted((key(x), x) for x in data or []))
 
         self.key: Callable[[SortedListItem], SortedListKey] = key
 
     def add(self, item: SortedListItem):
         k = self.key(item)
         bisect.insort(self.data, (k, item))
 
     def __iter__(self) -> Iterator[SortedListItem]:
         for (k, item) in self.data:
             yield item
 
     def iter_from(self, start_key: Any) -> Iterator[SortedListItem]:
         """Returns an iterator over all the elements whose key is greater
         or equal to `start_key`.
         (This is an efficient equivalent to:
         `(x for x in L if key(x) >= start_key)`)
         """
         from_index = bisect.bisect_left(self.data, (start_key,))
         for (k, item) in itertools.islice(self.data, from_index, None):
             yield item
 
     def iter_after(self, start_key: Any) -> Iterator[SortedListItem]:
         """Same as iter_from, but using a strict inequality."""
         it = self.iter_from(start_key)
         for item in it:
             if self.key(item) > start_key:  # type: ignore
                 yield item
                 break
 
         yield from it
 
 
 class InMemoryStorage:
     def __init__(self, journal_writer=None):
 
         self.reset()
         self.journal_writer = JournalWriter(journal_writer)
 
     def reset(self):
         self._contents = {}
         self._content_indexes = defaultdict(lambda: defaultdict(set))
         self._skipped_contents = {}
         self._skipped_content_indexes = defaultdict(lambda: defaultdict(set))
         self._directories = {}
         self._revisions = {}
         self._releases = {}
         self._snapshots = {}
         self._origins = {}
         self._origins_by_id = []
         self._origins_by_sha1 = {}
         self._origin_visits = {}
         self._origin_visit_statuses: Dict[Tuple[str, int], List[OriginVisitStatus]] = {}
         self._persons = {}
 
         # {origin_url: {authority: [metadata]}}
-        self._origin_metadata: Dict[
+        self._object_metadata: Dict[
             str,
             Dict[
                 Hashable,
                 SortedList[Tuple[datetime.datetime, FetcherKey], Dict[str, Any]],
             ],
         ] = defaultdict(
             lambda: defaultdict(
                 lambda: SortedList(key=lambda x: (x["discovery_date"], x["fetcher"]))
             )
         )  # noqa
 
         self._metadata_fetchers: Dict[FetcherKey, Dict[str, Any]] = {}
         self._metadata_authorities: Dict[Hashable, Dict[str, Any]] = {}
         self._objects = defaultdict(list)
         self._sorted_sha1s = SortedList[bytes, bytes]()
 
         self.objstorage = ObjStorage({"cls": "memory", "args": {}})
 
     def check_config(self, *, check_write):
         return True
 
     def _content_add(self, contents: Iterable[Content], with_data: bool) -> Dict:
         self.journal_writer.content_add(contents)
 
         content_add = 0
         if with_data:
             summary = self.objstorage.content_add(
                 c for c in contents if c.status != "absent"
             )
             content_add_bytes = summary["content:add:bytes"]
 
         for content in contents:
             key = self._content_key(content)
             if key in self._contents:
                 continue
             for algorithm in DEFAULT_ALGORITHMS:
                 hash_ = content.get_hash(algorithm)
                 if hash_ in self._content_indexes[algorithm] and (
                     algorithm not in {"blake2s256", "sha256"}
                 ):
                     colliding_content_hashes = []
                     # Add the already stored contents
                     for content_hashes_set in self._content_indexes[algorithm][hash_]:
                         hashes = dict(content_hashes_set)
                         colliding_content_hashes.append(hashes)
                     # Add the new colliding content
                     colliding_content_hashes.append(content.hashes())
                     raise HashCollision(algorithm, hash_, colliding_content_hashes)
             for algorithm in DEFAULT_ALGORITHMS:
                 hash_ = content.get_hash(algorithm)
                 self._content_indexes[algorithm][hash_].add(key)
             self._objects[content.sha1_git].append(("content", content.sha1))
             self._contents[key] = content
             self._sorted_sha1s.add(content.sha1)
             self._contents[key] = attr.evolve(self._contents[key], data=None)
             content_add += 1
 
         summary = {
             "content:add": content_add,
         }
         if with_data:
             summary["content:add:bytes"] = content_add_bytes
 
         return summary
 
     def content_add(self, content: Iterable[Content]) -> Dict:
         content = [attr.evolve(c, ctime=now()) for c in content]
         return self._content_add(content, with_data=True)
 
     def content_update(self, content, keys=[]):
         self.journal_writer.content_update(content)
 
         for cont_update in content:
             cont_update = cont_update.copy()
             sha1 = cont_update.pop("sha1")
             for old_key in self._content_indexes["sha1"][sha1]:
                 old_cont = self._contents.pop(old_key)
 
                 for algorithm in DEFAULT_ALGORITHMS:
                     hash_ = old_cont.get_hash(algorithm)
                     self._content_indexes[algorithm][hash_].remove(old_key)
 
                 new_cont = attr.evolve(old_cont, **cont_update)
                 new_key = self._content_key(new_cont)
 
                 self._contents[new_key] = new_cont
 
                 for algorithm in DEFAULT_ALGORITHMS:
                     hash_ = new_cont.get_hash(algorithm)
                     self._content_indexes[algorithm][hash_].add(new_key)
 
     def content_add_metadata(self, content: Iterable[Content]) -> Dict:
         return self._content_add(content, with_data=False)
 
     def content_get(self, content):
         # FIXME: Make this method support slicing the `data`.
         if len(content) > BULK_BLOCK_CONTENT_LEN_MAX:
             raise StorageArgumentException(
                 "Sending at most %s contents." % BULK_BLOCK_CONTENT_LEN_MAX
             )
         yield from self.objstorage.content_get(content)
 
     def content_get_range(self, start, end, limit=1000):
         if limit is None:
             raise StorageArgumentException("limit should not be None")
         sha1s = (
             (sha1, content_key)
             for sha1 in self._sorted_sha1s.iter_from(start)
             for content_key in self._content_indexes["sha1"][sha1]
         )
         matched = []
         next_content = None
         for sha1, key in sha1s:
             if sha1 > end:
                 break
             if len(matched) >= limit:
                 next_content = sha1
                 break
             matched.append(self._contents[key].to_dict())
         return {
             "contents": matched,
             "next": next_content,
         }
 
     def content_get_partition(
         self,
         partition_id: int,
         nb_partitions: int,
         limit: int = 1000,
         page_token: str = None,
     ):
         if limit is None:
             raise StorageArgumentException("limit should not be None")
         (start, end) = get_partition_bounds_bytes(
             partition_id, nb_partitions, SHA1_SIZE
         )
         if page_token:
             start = hash_to_bytes(page_token)
         if end is None:
             end = b"\xff" * SHA1_SIZE
         result = self.content_get_range(start, end, limit)
         result2 = {
             "contents": result["contents"],
             "next_page_token": None,
         }
         if result["next"]:
             result2["next_page_token"] = hash_to_hex(result["next"])
         return result2
 
     def content_get_metadata(self, contents: List[bytes]) -> Dict[bytes, List[Dict]]:
         result: Dict = {sha1: [] for sha1 in contents}
         for sha1 in contents:
             if sha1 in self._content_indexes["sha1"]:
                 objs = self._content_indexes["sha1"][sha1]
                 # only 1 element as content_add_metadata would have raised a
                 # hash collision otherwise
                 for key in objs:
                     d = self._contents[key].to_dict()
                     del d["ctime"]
                     if "data" in d:
                         del d["data"]
                     result[sha1].append(d)
         return result
 
     def content_find(self, content):
         if not set(content).intersection(DEFAULT_ALGORITHMS):
             raise StorageArgumentException(
                 "content keys must contain at least one of: %s"
                 % ", ".join(sorted(DEFAULT_ALGORITHMS))
             )
         found = []
         for algo in DEFAULT_ALGORITHMS:
             hash = content.get(algo)
             if hash and hash in self._content_indexes[algo]:
                 found.append(self._content_indexes[algo][hash])
 
         if not found:
             return []
 
         keys = list(set.intersection(*found))
         return [self._contents[key].to_dict() for key in keys]
 
     def content_missing(self, content, key_hash="sha1"):
         for cont in content:
             for (algo, hash_) in cont.items():
                 if algo not in DEFAULT_ALGORITHMS:
                     continue
                 if hash_ not in self._content_indexes.get(algo, []):
                     yield cont[key_hash]
                     break
             else:
                 for result in self.content_find(cont):
                     if result["status"] == "missing":
                         yield cont[key_hash]
 
     def content_missing_per_sha1(self, contents):
         for content in contents:
             if content not in self._content_indexes["sha1"]:
                 yield content
 
     def content_missing_per_sha1_git(self, contents):
         for content in contents:
             if content not in self._content_indexes["sha1_git"]:
                 yield content
 
     def content_get_random(self):
         return random.choice(list(self._content_indexes["sha1_git"]))
 
     def _skipped_content_add(self, contents: List[SkippedContent]) -> Dict:
         self.journal_writer.skipped_content_add(contents)
 
         summary = {"skipped_content:add": 0}
 
         missing_contents = self.skipped_content_missing([c.hashes() for c in contents])
         missing = {self._content_key(c) for c in missing_contents}
         contents = [c for c in contents if self._content_key(c) in missing]
         for content in contents:
             key = self._content_key(content)
             for algo in DEFAULT_ALGORITHMS:
                 if content.get_hash(algo):
                     self._skipped_content_indexes[algo][content.get_hash(algo)].add(key)
             self._skipped_contents[key] = content
             summary["skipped_content:add"] += 1
 
         return summary
 
     def skipped_content_add(self, content: Iterable[SkippedContent]) -> Dict:
         content = [attr.evolve(c, ctime=now()) for c in content]
         return self._skipped_content_add(content)
 
     def skipped_content_missing(self, contents):
         for content in contents:
             matches = list(self._skipped_contents.values())
             for (algorithm, key) in self._content_key(content):
                 if algorithm == "blake2s256":
                     continue
                 # Filter out skipped contents with the same hash
                 matches = [
                     match for match in matches if match.get_hash(algorithm) == key
                 ]
             # if none of the contents match
             if not matches:
                 yield {algo: content[algo] for algo in DEFAULT_ALGORITHMS}
 
     def directory_add(self, directories: Iterable[Directory]) -> Dict:
         directories = [dir_ for dir_ in directories if dir_.id not in self._directories]
         self.journal_writer.directory_add(directories)
 
         count = 0
         for directory in directories:
             count += 1
             self._directories[directory.id] = directory
             self._objects[directory.id].append(("directory", directory.id))
 
         return {"directory:add": count}
 
     def directory_missing(self, directories):
         for id in directories:
             if id not in self._directories:
                 yield id
 
     def _join_dentry_to_content(self, dentry):
         keys = (
             "status",
             "sha1",
             "sha1_git",
             "sha256",
             "length",
         )
         ret = dict.fromkeys(keys)
         ret.update(dentry)
         if ret["type"] == "file":
             # TODO: Make it able to handle more than one content
             content = self.content_find({"sha1_git": ret["target"]})
             if content:
                 content = content[0]
                 for key in keys:
                     ret[key] = content[key]
         return ret
 
     def _directory_ls(self, directory_id, recursive, prefix=b""):
         if directory_id in self._directories:
             for entry in self._directories[directory_id].entries:
                 ret = self._join_dentry_to_content(entry.to_dict())
                 ret["name"] = prefix + ret["name"]
                 ret["dir_id"] = directory_id
                 yield ret
                 if recursive and ret["type"] == "dir":
                     yield from self._directory_ls(
                         ret["target"], True, prefix + ret["name"] + b"/"
                     )
 
     def directory_ls(self, directory, recursive=False):
         yield from self._directory_ls(directory, recursive)
 
     def directory_entry_get_by_path(self, directory, paths):
         return self._directory_entry_get_by_path(directory, paths, b"")
 
     def directory_get_random(self):
         if not self._directories:
             return None
         return random.choice(list(self._directories))
 
     def _directory_entry_get_by_path(self, directory, paths, prefix):
         if not paths:
             return
 
         contents = list(self.directory_ls(directory))
 
         if not contents:
             return
 
         def _get_entry(entries, name):
             for entry in entries:
                 if entry["name"] == name:
                     entry = entry.copy()
                     entry["name"] = prefix + entry["name"]
                     return entry
 
         first_item = _get_entry(contents, paths[0])
 
         if len(paths) == 1:
             return first_item
 
         if not first_item or first_item["type"] != "dir":
             return
 
         return self._directory_entry_get_by_path(
             first_item["target"], paths[1:], prefix + paths[0] + b"/"
         )
 
     def revision_add(self, revisions: Iterable[Revision]) -> Dict:
         revisions = [rev for rev in revisions if rev.id not in self._revisions]
         self.journal_writer.revision_add(revisions)
 
         count = 0
         for revision in revisions:
             revision = attr.evolve(
                 revision,
                 committer=self._person_add(revision.committer),
                 author=self._person_add(revision.author),
             )
             self._revisions[revision.id] = revision
             self._objects[revision.id].append(("revision", revision.id))
             count += 1
 
         return {"revision:add": count}
 
     def revision_missing(self, revisions):
         for id in revisions:
             if id not in self._revisions:
                 yield id
 
     def revision_get(self, revisions):
         for id in revisions:
             if id in self._revisions:
                 yield self._revisions.get(id).to_dict()
             else:
                 yield None
 
     def _get_parent_revs(self, rev_id, seen, limit):
         if limit and len(seen) >= limit:
             return
         if rev_id in seen or rev_id not in self._revisions:
             return
         seen.add(rev_id)
         yield self._revisions[rev_id].to_dict()
         for parent in self._revisions[rev_id].parents:
             yield from self._get_parent_revs(parent, seen, limit)
 
     def revision_log(self, revisions, limit=None):
         seen = set()
         for rev_id in revisions:
             yield from self._get_parent_revs(rev_id, seen, limit)
 
     def revision_shortlog(self, revisions, limit=None):
         yield from (
             (rev["id"], rev["parents"]) for rev in self.revision_log(revisions, limit)
         )
 
     def revision_get_random(self):
         return random.choice(list(self._revisions))
 
     def release_add(self, releases: Iterable[Release]) -> Dict:
         to_add = []
         for rel in releases:
             if rel.id not in self._releases and rel not in to_add:
                 to_add.append(rel)
         self.journal_writer.release_add(to_add)
 
         for rel in to_add:
             if rel.author:
                 self._person_add(rel.author)
             self._objects[rel.id].append(("release", rel.id))
             self._releases[rel.id] = rel
 
         return {"release:add": len(to_add)}
 
     def release_missing(self, releases):
         yield from (rel for rel in releases if rel not in self._releases)
 
     def release_get(self, releases):
         for rel_id in releases:
             if rel_id in self._releases:
                 yield self._releases[rel_id].to_dict()
             else:
                 yield None
 
     def release_get_random(self):
         return random.choice(list(self._releases))
 
     def snapshot_add(self, snapshots: Iterable[Snapshot]) -> Dict:
         count = 0
         snapshots = (snap for snap in snapshots if snap.id not in self._snapshots)
         for snapshot in snapshots:
             self.journal_writer.snapshot_add([snapshot])
             self._snapshots[snapshot.id] = snapshot
             self._objects[snapshot.id].append(("snapshot", snapshot.id))
             count += 1
 
         return {"snapshot:add": count}
 
     def snapshot_missing(self, snapshots):
         for id in snapshots:
             if id not in self._snapshots:
                 yield id
 
     def snapshot_get(self, snapshot_id):
         return self.snapshot_get_branches(snapshot_id)
 
     def snapshot_get_by_origin_visit(self, origin, visit):
         origin_url = self._get_origin_url(origin)
         if not origin_url:
             return
 
         if origin_url not in self._origins or visit > len(
             self._origin_visits[origin_url]
         ):
             return None
 
         visit = self._origin_visit_get_updated(origin_url, visit)
         snapshot_id = visit.snapshot
         if snapshot_id:
             return self.snapshot_get(snapshot_id)
         else:
             return None
 
     def snapshot_count_branches(self, snapshot_id):
         snapshot = self._snapshots[snapshot_id]
         return collections.Counter(
             branch.target_type.value if branch else None
             for branch in snapshot.branches.values()
         )
 
     def snapshot_get_branches(
         self, snapshot_id, branches_from=b"", branches_count=1000, target_types=None
     ):
         snapshot = self._snapshots.get(snapshot_id)
         if snapshot is None:
             return None
         sorted_branch_names = sorted(snapshot.branches)
         from_index = bisect.bisect_left(sorted_branch_names, branches_from)
         if target_types:
             next_branch = None
             branches = {}
             for branch_name in sorted_branch_names[from_index:]:
                 branch = snapshot.branches[branch_name]
                 if branch and branch.target_type.value in target_types:
                     if len(branches) < branches_count:
                         branches[branch_name] = branch
                     else:
                         next_branch = branch_name
                         break
         else:
             # As there is no 'target_types', we can do that much faster
             to_index = from_index + branches_count
             returned_branch_names = sorted_branch_names[from_index:to_index]
             branches = {
                 branch_name: snapshot.branches[branch_name]
                 for branch_name in returned_branch_names
             }
             if to_index >= len(sorted_branch_names):
                 next_branch = None
             else:
                 next_branch = sorted_branch_names[to_index]
 
         branches = {
             name: branch.to_dict() if branch else None
             for (name, branch) in branches.items()
         }
 
         return {
             "id": snapshot_id,
             "branches": branches,
             "next_branch": next_branch,
         }
 
     def snapshot_get_random(self):
         return random.choice(list(self._snapshots))
 
     def object_find_by_sha1_git(self, ids):
         ret = {}
         for id_ in ids:
             objs = self._objects.get(id_, [])
             ret[id_] = [{"sha1_git": id_, "type": obj[0],} for obj in objs]
         return ret
 
     def _convert_origin(self, t):
         if t is None:
             return None
 
         return t.to_dict()
 
     def origin_get(self, origins):
         if isinstance(origins, dict):
             # Old API
             return_single = True
             origins = [origins]
         else:
             return_single = False
 
         # Sanity check to be error-compatible with the pgsql backend
         if any("id" in origin for origin in origins) and not all(
             "id" in origin for origin in origins
         ):
             raise StorageArgumentException(
                 'Either all origins or none at all should have an "id".'
             )
         if any("url" in origin for origin in origins) and not all(
             "url" in origin for origin in origins
         ):
             raise StorageArgumentException(
                 "Either all origins or none at all should have " 'an "url" key.'
             )
 
         results = []
         for origin in origins:
             result = None
             if "url" in origin:
                 if origin["url"] in self._origins:
                     result = self._origins[origin["url"]]
             else:
                 raise StorageArgumentException("Origin must have an url.")
             results.append(self._convert_origin(result))
 
         if return_single:
             assert len(results) == 1
             return results[0]
         else:
             return results
 
     def origin_get_by_sha1(self, sha1s):
         return [self._convert_origin(self._origins_by_sha1.get(sha1)) for sha1 in sha1s]
 
     def origin_get_range(self, origin_from=1, origin_count=100):
         origin_from = max(origin_from, 1)
         if origin_from <= len(self._origins_by_id):
             max_idx = origin_from + origin_count - 1
             if max_idx > len(self._origins_by_id):
                 max_idx = len(self._origins_by_id)
             for idx in range(origin_from - 1, max_idx):
                 origin = self._convert_origin(self._origins[self._origins_by_id[idx]])
                 yield {"id": idx + 1, **origin}
 
     def origin_list(self, page_token: Optional[str] = None, limit: int = 100) -> dict:
         origin_urls = sorted(self._origins)
         if page_token:
             from_ = bisect.bisect_left(origin_urls, page_token)
         else:
             from_ = 0
 
         result = {
             "origins": [
                 {"url": origin_url} for origin_url in origin_urls[from_ : from_ + limit]
             ]
         }
 
         if from_ + limit < len(origin_urls):
             result["next_page_token"] = origin_urls[from_ + limit]
 
         return result
 
     def origin_search(
         self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False
     ):
         origins = map(self._convert_origin, self._origins.values())
         if regexp:
             pat = re.compile(url_pattern)
             origins = [orig for orig in origins if pat.search(orig["url"])]
         else:
             origins = [orig for orig in origins if url_pattern in orig["url"]]
         if with_visit:
             filtered_origins = []
             for orig in origins:
                 visits = (
                     self._origin_visit_get_updated(ov.origin, ov.visit)
                     for ov in self._origin_visits[orig["url"]]
                 )
                 for ov in visits:
                     if ov.snapshot and ov.snapshot in self._snapshots:
                         filtered_origins.append(orig)
                         break
         else:
             filtered_origins = origins
 
         return filtered_origins[offset : offset + limit]
 
     def origin_count(self, url_pattern, regexp=False, with_visit=False):
         return len(
             self.origin_search(
                 url_pattern,
                 regexp=regexp,
                 with_visit=with_visit,
                 limit=len(self._origins),
             )
         )
 
     def origin_add(self, origins: Iterable[Origin]) -> Dict[str, int]:
         origins = list(origins)
         added = 0
         for origin in origins:
             if origin.url not in self._origins:
                 self.origin_add_one(origin)
                 added += 1
 
         return {"origin:add": added}
 
     @deprecated("Use origin_add([origin]) instead")
     def origin_add_one(self, origin: Origin) -> str:
         if origin.url not in self._origins:
             self.journal_writer.origin_add([origin])
             # generate an origin_id because it is needed by origin_get_range.
             # TODO: remove this when we remove origin_get_range
             origin_id = len(self._origins) + 1
             self._origins_by_id.append(origin.url)
             assert len(self._origins_by_id) == origin_id
 
             self._origins[origin.url] = origin
             self._origins_by_sha1[origin_url_to_sha1(origin.url)] = origin
             self._origin_visits[origin.url] = []
             self._objects[origin.url].append(("origin", origin.url))
 
         return origin.url
 
     def origin_visit_add(self, visits: Iterable[OriginVisit]) -> Iterable[OriginVisit]:
         for visit in visits:
             origin = self.origin_get({"url": visit.origin})
             if not origin:  # Cannot add a visit without an origin
                 raise StorageArgumentException("Unknown origin %s", visit.origin)
 
         all_visits = []
         for visit in visits:
             origin_url = visit.origin
             if origin_url in self._origins:
                 origin = self._origins[origin_url]
                 if visit.visit:
                     self.journal_writer.origin_visit_add([visit])
                     while len(self._origin_visits[origin_url]) < visit.visit:
                         self._origin_visits[origin_url].append(None)
                     self._origin_visits[origin_url][visit.visit - 1] = visit
                 else:
                     # visit ids are in the range [1, +inf[
                     visit_id = len(self._origin_visits[origin_url]) + 1
                     visit = attr.evolve(visit, visit=visit_id)
                     self.journal_writer.origin_visit_add([visit])
                     self._origin_visits[origin_url].append(visit)
                     visit_key = (origin_url, visit.visit)
                     self._objects[visit_key].append(("origin_visit", None))
                 assert visit.visit is not None
                 self._origin_visit_status_add_one(
                     OriginVisitStatus(
                         origin=visit.origin,
                         visit=visit.visit,
                         date=visit.date,
                         status="created",
                         snapshot=None,
                     )
                 )
                 all_visits.append(visit)
 
         return all_visits
 
     def _origin_visit_status_add_one(self, visit_status: OriginVisitStatus) -> None:
         """Add an origin visit status without checks. If already present, do nothing.
 
         """
         self.journal_writer.origin_visit_status_add([visit_status])
         visit_key = (visit_status.origin, visit_status.visit)
         self._origin_visit_statuses.setdefault(visit_key, [])
         visit_statuses = self._origin_visit_statuses[visit_key]
         if visit_status not in visit_statuses:
             visit_statuses.append(visit_status)
 
     def origin_visit_status_add(
         self, visit_statuses: Iterable[OriginVisitStatus],
     ) -> None:
         # First round to check existence (fail early if any is ko)
         for visit_status in visit_statuses:
             origin_url = self.origin_get({"url": visit_status.origin})
             if not origin_url:
                 raise StorageArgumentException(f"Unknown origin {visit_status.origin}")
 
         for visit_status in visit_statuses:
             self._origin_visit_status_add_one(visit_status)
 
     def _origin_visit_get_updated(self, origin: str, visit_id: int) -> OriginVisit:
         """Merge origin visit and latest origin visit status
 
         """
         assert visit_id >= 1
         visit = self._origin_visits[origin][visit_id - 1]
         assert visit is not None
         visit_key = (origin, visit_id)
 
         visit_update = max(self._origin_visit_statuses[visit_key], key=lambda v: v.date)
         return OriginVisit.from_dict(
             {
                 # default to the values in visit
                 **visit.to_dict(),
                 # override with the last update
                 **visit_update.to_dict(),
                 # but keep the date of the creation of the origin visit
                 "date": visit.date,
             }
         )
 
     def origin_visit_get(
         self,
         origin: str,
         last_visit: Optional[int] = None,
         limit: Optional[int] = None,
         order: str = "asc",
     ) -> Iterable[Dict[str, Any]]:
         order = order.lower()
         assert order in ["asc", "desc"]
         origin_url = self._get_origin_url(origin)
         if origin_url in self._origin_visits:
             visits = self._origin_visits[origin_url]
             visits = sorted(visits, key=lambda v: v.visit, reverse=(order == "desc"))
             if last_visit is not None:
                 if order == "asc":
                     visits = [v for v in visits if v.visit > last_visit]
                 else:
                     visits = [v for v in visits if v.visit < last_visit]
             if limit is not None:
                 visits = visits[:limit]
             for visit in visits:
                 if not visit:
                     continue
                 visit_id = visit.visit
 
                 visit_update = self._origin_visit_get_updated(origin_url, visit_id)
                 assert visit_update is not None
                 yield visit_update.to_dict()
 
     def origin_visit_find_by_date(
         self, origin: str, visit_date: datetime.datetime
     ) -> Optional[Dict[str, Any]]:
         origin_url = self._get_origin_url(origin)
         if origin_url in self._origin_visits:
             visits = self._origin_visits[origin_url]
             visit = min(visits, key=lambda v: (abs(v.date - visit_date), -v.visit))
             visit_update = self._origin_visit_get_updated(origin, visit.visit)
             assert visit_update is not None
             return visit_update.to_dict()
         return None
 
     def origin_visit_get_by(self, origin: str, visit: int) -> Optional[Dict[str, Any]]:
         origin_url = self._get_origin_url(origin)
         if origin_url in self._origin_visits and visit <= len(
             self._origin_visits[origin_url]
         ):
             visit_update = self._origin_visit_get_updated(origin_url, visit)
             assert visit_update is not None
             return visit_update.to_dict()
         return None
 
     def origin_visit_get_latest(
         self,
         origin: str,
         type: Optional[str] = None,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
     ) -> Optional[Dict[str, Any]]:
         ori = self._origins.get(origin)
         if not ori:
             return None
         visits = self._origin_visits[ori.url]
 
         visits = [
             self._origin_visit_get_updated(visit.origin, visit.visit)
             for visit in visits
             if visit is not None
         ]
 
         if type is not None:
             visits = [visit for visit in visits if visit.type == type]
         if allowed_statuses is not None:
             visits = [visit for visit in visits if visit.status in allowed_statuses]
         if require_snapshot:
             visits = [visit for visit in visits if visit.snapshot]
 
         visit = max(visits, key=lambda v: (v.date, v.visit), default=None)
         if visit is None:
             return None
         return visit.to_dict()
 
     def origin_visit_status_get_latest(
         self,
         origin_url: str,
         visit: int,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
     ) -> Optional[OriginVisitStatus]:
         ori = self._origins.get(origin_url)
         if not ori:
             return None
 
         visit_key = (origin_url, visit)
         visits = self._origin_visit_statuses.get(visit_key)
         if not visits:
             return None
 
         if allowed_statuses is not None:
             visits = [visit for visit in visits if visit.status in allowed_statuses]
         if require_snapshot:
             visits = [visit for visit in visits if visit.snapshot]
 
         visit_status = max(visits, key=lambda v: (v.date, v.visit), default=None)
         return visit_status
 
     def _select_random_origin_visit_by_type(self, type: str) -> str:
         while True:
             url = random.choice(list(self._origin_visits.keys()))
             random_origin_visits = self._origin_visits[url]
             if random_origin_visits[0].type == type:
                 return url
 
     def origin_visit_get_random(self, type: str) -> Optional[Dict[str, Any]]:
         url = self._select_random_origin_visit_by_type(type)
         random_origin_visits = copy.deepcopy(self._origin_visits[url])
         random_origin_visits.reverse()
         back_in_the_day = now() - timedelta(weeks=12)  # 3 months back
         # This should be enough for tests
         for visit in random_origin_visits:
             updated_visit = self._origin_visit_get_updated(url, visit.visit)
             assert updated_visit is not None
             if updated_visit.date > back_in_the_day and updated_visit.status == "full":
                 return updated_visit.to_dict()
         else:
             return None
 
     def stat_counters(self):
         keys = (
             "content",
             "directory",
             "origin",
             "origin_visit",
             "person",
             "release",
             "revision",
             "skipped_content",
             "snapshot",
         )
         stats = {key: 0 for key in keys}
         stats.update(
             collections.Counter(
                 obj_type
                 for (obj_type, obj_id) in itertools.chain(*self._objects.values())
             )
         )
         return stats
 
     def refresh_stat_counters(self):
         pass
 
+    def content_metadata_add(
+        self,
+        id: str,
+        discovery_date: datetime.datetime,
+        authority: Dict[str, Any],
+        fetcher: Dict[str, Any],
+        format: str,
+        metadata: bytes,
+    ) -> None:
+        self._object_metadata_add(
+            "content", id, discovery_date, authority, fetcher, format, metadata,
+        )
+
     def origin_metadata_add(
         self,
         origin_url: str,
         discovery_date: datetime.datetime,
         authority: Dict[str, Any],
         fetcher: Dict[str, Any],
         format: str,
         metadata: bytes,
     ) -> None:
         if not isinstance(origin_url, str):
             raise StorageArgumentException(
-                "origin_id must be str, not %r" % (origin_url,)
+                "origin_url must be str, not %r" % (origin_url,)
             )
+        self._object_metadata_add(
+            "origin", origin_url, discovery_date, authority, fetcher, format, metadata,
+        )
+
+    def _object_metadata_add(
+        self,
+        object_type: str,
+        id: str,
+        discovery_date: datetime.datetime,
+        authority: Dict[str, Any],
+        fetcher: Dict[str, Any],
+        format: str,
+        metadata: bytes,
+    ) -> None:
         if not isinstance(metadata, bytes):
             raise StorageArgumentException(
                 "metadata must be bytes, not %r" % (metadata,)
             )
         authority_key = self._metadata_authority_key(authority)
         if authority_key not in self._metadata_authorities:
             raise StorageArgumentException(f"Unknown authority {authority}")
         fetcher_key = self._metadata_fetcher_key(fetcher)
         if fetcher_key not in self._metadata_fetchers:
             raise StorageArgumentException(f"Unknown fetcher {fetcher}")
 
-        origin_metadata_list = self._origin_metadata[origin_url][authority_key]
+        object_metadata_list = self._object_metadata[id][authority_key]
 
-        origin_metadata = {
-            "origin_url": origin_url,
+        object_metadata: Dict[str, Any] = {
+            "id": id,
             "discovery_date": discovery_date,
             "authority": authority_key,
             "fetcher": fetcher_key,
             "format": format,
             "metadata": metadata,
         }
 
-        for existing_origin_metadata in origin_metadata_list:
+        for existing_object_metadata in object_metadata_list:
             if (
-                existing_origin_metadata["fetcher"] == fetcher_key
-                and existing_origin_metadata["discovery_date"] == discovery_date
+                existing_object_metadata["fetcher"] == fetcher_key
+                and existing_object_metadata["discovery_date"] == discovery_date
             ):
                 # Duplicate of an existing one; replace it.
-                existing_origin_metadata.update(origin_metadata)
+                existing_object_metadata.update(object_metadata)
                 break
         else:
-            origin_metadata_list.add(origin_metadata)
-        return None
+            object_metadata_list.add(object_metadata)
 
     def origin_metadata_get(
         self,
         origin_url: str,
         authority: Dict[str, str],
         after: Optional[datetime.datetime] = None,
         page_token: Optional[bytes] = None,
         limit: int = 1000,
     ) -> Dict[str, Any]:
         if not isinstance(origin_url, str):
             raise TypeError("origin_url must be str, not %r" % (origin_url,))
 
+        res = self._object_metadata_get(
+            "origin", origin_url, authority, after, page_token, limit
+        )
+        res["results"] = copy.deepcopy(res["results"])
+        for result in res["results"]:
+            result["origin_url"] = result.pop("id")
+
+        return res
+
+    def _object_metadata_get(
+        self,
+        object_type: str,
+        id: str,
+        authority: Dict[str, str],
+        after: Optional[datetime.datetime] = None,
+        page_token: Optional[bytes] = None,
+        limit: int = 1000,
+    ) -> Dict[str, Any]:
         authority_key = self._metadata_authority_key(authority)
 
         if page_token is not None:
             (after_time, after_fetcher) = msgpack_loads(page_token)
             after_fetcher = tuple(after_fetcher)
             if after is not None and after > after_time:
                 raise StorageArgumentException(
                     "page_token is inconsistent with the value of 'after'."
                 )
-            entries = self._origin_metadata[origin_url][authority_key].iter_after(
+            entries = self._object_metadata[id][authority_key].iter_after(
                 (after_time, after_fetcher)
             )
         elif after is not None:
-            entries = self._origin_metadata[origin_url][authority_key].iter_from(
-                (after,)
-            )
+            entries = self._object_metadata[id][authority_key].iter_from((after,))
             entries = (entry for entry in entries if entry["discovery_date"] > after)
         else:
-            entries = iter(self._origin_metadata[origin_url][authority_key])
+            entries = iter(self._object_metadata[id][authority_key])
 
         if limit:
             entries = itertools.islice(entries, 0, limit + 1)
 
         results = []
         for entry in entries:
             authority = self._metadata_authorities[entry["authority"]]
             fetcher = self._metadata_fetchers[entry["fetcher"]]
             if after:
                 assert entry["discovery_date"] > after
             results.append(
                 {
                     **entry,
                     "authority": {"type": authority["type"], "url": authority["url"],},
                     "fetcher": {
                         "name": fetcher["name"],
                         "version": fetcher["version"],
                     },
                 }
             )
 
         if len(results) > limit:
             results.pop()
             assert len(results) == limit
             last_result = results[-1]
             next_page_token: Optional[bytes] = msgpack_dumps(
                 (
                     last_result["discovery_date"],
                     self._metadata_fetcher_key(last_result["fetcher"]),
                 )
             )
         else:
             next_page_token = None
 
         return {
             "next_page_token": next_page_token,
             "results": results,
         }
 
     def metadata_fetcher_add(
         self, name: str, version: str, metadata: Dict[str, Any]
     ) -> None:
         fetcher = {
             "name": name,
             "version": version,
             "metadata": metadata,
         }
         key = self._metadata_fetcher_key(fetcher)
         if key not in self._metadata_fetchers:
             self._metadata_fetchers[key] = fetcher
 
     def metadata_fetcher_get(self, name: str, version: str) -> Optional[Dict[str, Any]]:
         return self._metadata_fetchers.get(
             self._metadata_fetcher_key({"name": name, "version": version})
         )
 
     def metadata_authority_add(
         self, type: str, url: str, metadata: Dict[str, Any]
     ) -> None:
         authority = {
             "type": type,
             "url": url,
             "metadata": metadata,
         }
         key = self._metadata_authority_key(authority)
         self._metadata_authorities[key] = authority
 
     def metadata_authority_get(self, type: str, url: str) -> Optional[Dict[str, Any]]:
         return self._metadata_authorities.get(
             self._metadata_authority_key({"type": type, "url": url})
         )
 
     def _get_origin_url(self, origin):
         if isinstance(origin, str):
             return origin
         else:
             raise TypeError("origin must be a string.")
 
     def _person_add(self, person):
         key = ("person", person.fullname)
         if key not in self._objects:
             self._persons[person.fullname] = person
             self._objects[key].append(key)
 
         return self._persons[person.fullname]
 
     @staticmethod
     def _content_key(content):
         """ A stable key and the algorithm for a content"""
         if isinstance(content, BaseContent):
             content = content.to_dict()
         return tuple((key, content.get(key)) for key in sorted(DEFAULT_ALGORITHMS))
 
     @staticmethod
     def _metadata_fetcher_key(fetcher: Dict) -> FetcherKey:
         return (fetcher["name"], fetcher["version"])
 
     @staticmethod
     def _metadata_authority_key(authority: Dict) -> Hashable:
         return (authority["type"], authority["url"])
 
     def diff_directories(self, from_dir, to_dir, track_renaming=False):
         raise NotImplementedError("InMemoryStorage.diff_directories")
 
     def diff_revisions(self, from_rev, to_rev, track_renaming=False):
         raise NotImplementedError("InMemoryStorage.diff_revisions")
 
     def diff_revision(self, revision, track_renaming=False):
         raise NotImplementedError("InMemoryStorage.diff_revision")
 
     def clear_buffers(self, object_types: Optional[Iterable[str]] = None) -> None:
         """Do nothing
 
         """
         return None
 
     def flush(self, object_types: Optional[Iterable[str]] = None) -> Dict:
         return {}
diff --git a/swh/storage/interface.py b/swh/storage/interface.py
index 68a759db..58629280 100644
--- a/swh/storage/interface.py
+++ b/swh/storage/interface.py
@@ -1,1299 +1,1299 @@
 # Copyright (C) 2015-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import datetime
 
 from typing import Any, Dict, Iterable, List, Optional
 
 from swh.core.api import remote_api_endpoint
 from swh.model.model import (
     Content,
     Directory,
     Origin,
     OriginVisit,
     OriginVisitStatus,
     Revision,
     Release,
     Snapshot,
     SkippedContent,
 )
 
 
 def deprecated(f):
     f.deprecated_endpoint = True
     return f
 
 
 class StorageInterface:
     @remote_api_endpoint("check_config")
     def check_config(self, *, check_write):
         """Check that the storage is configured and ready to go."""
         ...
 
     @remote_api_endpoint("content/add")
     def content_add(self, content: Iterable[Content]) -> Dict:
         """Add content blobs to the storage
 
         Args:
             contents (iterable): iterable of dictionaries representing
                 individual pieces of content to add. Each dictionary has the
                 following keys:
 
                 - data (bytes): the actual content
                 - length (int): content length
                 - one key for each checksum algorithm in
                   :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
                   corresponding checksum
                 - status (str): one of visible, hidden
 
         Raises:
 
             The following exceptions can occur:
 
             - HashCollision in case of collision
             - Any other exceptions raise by the db
 
             In case of errors, some of the content may have been stored in
             the DB and in the objstorage.
             Since additions to both idempotent, that should not be a problem.
 
         Returns:
             Summary dict with the following keys and associated values:
 
                 content:add: New contents added
                 content:add:bytes: Sum of the contents' length data
         """
         ...
 
     @remote_api_endpoint("content/update")
     def content_update(self, content, keys=[]):
         """Update content blobs to the storage. Does nothing for unknown
         contents or skipped ones.
 
         Args:
             content (iterable): iterable of dictionaries representing
                 individual pieces of content to update. Each dictionary has the
                 following keys:
 
                 - data (bytes): the actual content
                 - length (int): content length (default: -1)
                 - one key for each checksum algorithm in
                   :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
                   corresponding checksum
                 - status (str): one of visible, hidden, absent
 
             keys (list): List of keys (str) whose values needs an update, e.g.,
                 new hash column
 
         """
         ...
 
     @remote_api_endpoint("content/add_metadata")
     def content_add_metadata(self, content: Iterable[Content]) -> Dict:
         """Add content metadata to the storage (like `content_add`, but
         without inserting to the objstorage).
 
         Args:
             content (iterable): iterable of dictionaries representing
                 individual pieces of content to add. Each dictionary has the
                 following keys:
 
                 - length (int): content length (default: -1)
                 - one key for each checksum algorithm in
                   :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
                   corresponding checksum
                 - status (str): one of visible, hidden, absent
                 - reason (str): if status = absent, the reason why
                 - origin (int): if status = absent, the origin we saw the
                   content in
                 - ctime (datetime): time of insertion in the archive
 
         Returns:
             Summary dict with the following key and associated values:
 
                 content:add: New contents added
                 skipped_content:add: New skipped contents (no data) added
         """
         ...
 
     @remote_api_endpoint("content/data")
     def content_get(self, content):
         """Retrieve in bulk contents and their data.
 
         This generator yields exactly as many items than provided sha1
         identifiers, but callers should not assume this will always be true.
 
         It may also yield `None` values in case an object was not found.
 
         Args:
             content: iterables of sha1
 
         Yields:
             Dict[str, bytes]: Generates streams of contents as dict with their
                 raw data:
 
                 - sha1 (bytes): content id
                 - data (bytes): content's raw data
 
         Raises:
             ValueError in case of too much contents are required.
             cf. BULK_BLOCK_CONTENT_LEN_MAX
 
         """
         ...
 
     @deprecated
     @remote_api_endpoint("content/range")
     def content_get_range(self, start, end, limit=1000):
         """Retrieve contents within range [start, end] bound by limit.
 
         Note that this function may return more than one blob per hash. The
         limit is enforced with multiplicity (ie. two blobs with the same hash
         will count twice toward the limit).
 
         Args:
             **start** (bytes): Starting identifier range (expected smaller
                            than end)
             **end** (bytes): Ending identifier range (expected larger
                              than start)
             **limit** (int): Limit result (default to 1000)
 
         Returns:
             a dict with keys:
             - contents [dict]: iterable of contents in between the range.
             - next (bytes): There remains content in the range
               starting from this next sha1
 
         """
         ...
 
     @remote_api_endpoint("content/partition")
     def content_get_partition(
         self,
         partition_id: int,
         nb_partitions: int,
         limit: int = 1000,
         page_token: str = None,
     ):
         """Splits contents into nb_partitions, and returns one of these based on
         partition_id (which must be in [0, nb_partitions-1])
 
         There is no guarantee on how the partitioning is done, or the
         result order.
 
         Args:
             partition_id (int): index of the partition to fetch
             nb_partitions (int): total number of partitions to split into
             limit (int): Limit result (default to 1000)
             page_token (Optional[str]): opaque token used for pagination.
 
         Returns:
             a dict with keys:
               - contents (List[dict]): iterable of contents in the partition.
               - **next_page_token** (Optional[str]): opaque token to be used as
                 `page_token` for retrieving the next page. if absent, there is
                 no more pages to gather.
         """
         ...
 
     @remote_api_endpoint("content/metadata")
     def content_get_metadata(self, contents: List[bytes]) -> Dict[bytes, List[Dict]]:
         """Retrieve content metadata in bulk
 
         Args:
             content: iterable of content identifiers (sha1)
 
         Returns:
             a dict with keys the content's sha1 and the associated value
             either the existing content's metadata or None if the content does
             not exist.
 
         """
         ...
 
     @remote_api_endpoint("content/missing")
     def content_missing(self, content, key_hash="sha1"):
         """List content missing from storage
 
         Args:
             content ([dict]): iterable of dictionaries whose keys are
                               either 'length' or an item of
                               :data:`swh.model.hashutil.ALGORITHMS`;
                               mapped to the corresponding checksum
                               (or length).
 
             key_hash (str): name of the column to use as hash id
                             result (default: 'sha1')
 
         Returns:
             iterable ([bytes]): missing content ids (as per the
             key_hash column)
 
         Raises:
             TODO: an exception when we get a hash collision.
 
         """
         ...
 
     @remote_api_endpoint("content/missing/sha1")
     def content_missing_per_sha1(self, contents):
         """List content missing from storage based only on sha1.
 
         Args:
             contents: Iterable of sha1 to check for absence.
 
         Returns:
             iterable: missing ids
 
         Raises:
             TODO: an exception when we get a hash collision.
 
         """
         ...
 
     @remote_api_endpoint("content/missing/sha1_git")
     def content_missing_per_sha1_git(self, contents):
         """List content missing from storage based only on sha1_git.
 
         Args:
             contents (Iterable): An iterable of content id (sha1_git)
 
         Yields:
             missing contents sha1_git
         """
         ...
 
     @remote_api_endpoint("content/present")
     def content_find(self, content):
         """Find a content hash in db.
 
         Args:
             content: a dictionary representing one content hash, mapping
                 checksum algorithm names (see swh.model.hashutil.ALGORITHMS) to
                 checksum values
 
         Returns:
             a triplet (sha1, sha1_git, sha256) if the content exist
             or None otherwise.
 
         Raises:
             ValueError: in case the key of the dictionary is not sha1, sha1_git
                 nor sha256.
 
         """
         ...
 
     @remote_api_endpoint("content/get_random")
     def content_get_random(self):
         """Finds a random content id.
 
         Returns:
             a sha1_git
         """
         ...
 
     @remote_api_endpoint("content/skipped/add")
     def skipped_content_add(self, content: Iterable[SkippedContent]) -> Dict:
         """Add contents to the skipped_content list, which contains
         (partial) information about content missing from the archive.
 
         Args:
             contents (iterable): iterable of dictionaries representing
                 individual pieces of content to add. Each dictionary has the
                 following keys:
 
                 - length (Optional[int]): content length (default: -1)
                 - one key for each checksum algorithm in
                   :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
                   corresponding checksum; each is optional
                 - status (str): must be "absent"
                 - reason (str): the reason why the content is absent
                 - origin (int): if status = absent, the origin we saw the
                   content in
 
         Raises:
 
             The following exceptions can occur:
 
             - HashCollision in case of collision
             - Any other exceptions raise by the backend
 
             In case of errors, some content may have been stored in
             the DB and in the objstorage.
             Since additions to both idempotent, that should not be a problem.
 
         Returns:
             Summary dict with the following key and associated values:
 
                 skipped_content:add: New skipped contents (no data) added
         """
         ...
 
     @remote_api_endpoint("content/skipped/missing")
     def skipped_content_missing(self, contents):
         """List skipped_content missing from storage
 
         Args:
             content: iterable of dictionaries containing the data for each
                 checksum algorithm.
 
         Returns:
             iterable: missing signatures
 
         """
         ...
 
     @remote_api_endpoint("directory/add")
     def directory_add(self, directories: Iterable[Directory]) -> Dict:
         """Add directories to the storage
 
         Args:
             directories (iterable): iterable of dictionaries representing the
                 individual directories to add. Each dict has the following
                 keys:
 
                 - id (sha1_git): the id of the directory to add
                 - entries (list): list of dicts for each entry in the
                       directory.  Each dict has the following keys:
 
                       - name (bytes)
                       - type (one of 'file', 'dir', 'rev'): type of the
                         directory entry (file, directory, revision)
                       - target (sha1_git): id of the object pointed at by the
                         directory entry
                       - perms (int): entry permissions
 
         Returns:
             Summary dict of keys with associated count as values:
 
                 directory:add: Number of directories actually added
 
         """
         ...
 
     @remote_api_endpoint("directory/missing")
     def directory_missing(self, directories):
         """List directories missing from storage
 
         Args:
             directories (iterable): an iterable of directory ids
 
         Yields:
             missing directory ids
 
         """
         ...
 
     @remote_api_endpoint("directory/ls")
     def directory_ls(self, directory, recursive=False):
         """Get entries for one directory.
 
         Args:
             - directory: the directory to list entries from.
             - recursive: if flag on, this list recursively from this directory.
 
         Returns:
             List of entries for such directory.
 
         If `recursive=True`, names in the path of a dir/file not at the
         root are concatenated with a slash (`/`).
 
         """
         ...
 
     @remote_api_endpoint("directory/path")
     def directory_entry_get_by_path(self, directory, paths):
         """Get the directory entry (either file or dir) from directory with path.
 
         Args:
             - directory: sha1 of the top level directory
             - paths: path to lookup from the top level directory. From left
               (top) to right (bottom).
 
         Returns:
             The corresponding directory entry if found, None otherwise.
 
         """
         ...
 
     @remote_api_endpoint("directory/get_random")
     def directory_get_random(self):
         """Finds a random directory id.
 
         Returns:
             a sha1_git
         """
         ...
 
     @remote_api_endpoint("revision/add")
     def revision_add(self, revisions: Iterable[Revision]) -> Dict:
         """Add revisions to the storage
 
         Args:
             revisions (Iterable[dict]): iterable of dictionaries representing
                 the individual revisions to add. Each dict has the following
                 keys:
 
                 - **id** (:class:`sha1_git`): id of the revision to add
                 - **date** (:class:`dict`): date the revision was written
                 - **committer_date** (:class:`dict`): date the revision got
                   added to the origin
                 - **type** (one of 'git', 'tar'): type of the
                   revision added
                 - **directory** (:class:`sha1_git`): the directory the
                   revision points at
                 - **message** (:class:`bytes`): the message associated with
                   the revision
                 - **author** (:class:`Dict[str, bytes]`): dictionary with
                   keys: name, fullname, email
                 - **committer** (:class:`Dict[str, bytes]`): dictionary with
                   keys: name, fullname, email
                 - **metadata** (:class:`jsonb`): extra information as
                   dictionary
                 - **synthetic** (:class:`bool`): revision's nature (tarball,
                   directory creates synthetic revision`)
                 - **parents** (:class:`list[sha1_git]`): the parents of
                   this revision
 
         date dictionaries have the form defined in :mod:`swh.model`.
 
         Returns:
             Summary dict of keys with associated count as values
 
                 revision:add: New objects actually stored in db
 
         """
         ...
 
     @remote_api_endpoint("revision/missing")
     def revision_missing(self, revisions):
         """List revisions missing from storage
 
         Args:
             revisions (iterable): revision ids
 
         Yields:
             missing revision ids
 
         """
         ...
 
     @remote_api_endpoint("revision")
     def revision_get(self, revisions):
         """Get all revisions from storage
 
         Args:
             revisions: an iterable of revision ids
 
         Returns:
             iterable: an iterable of revisions as dictionaries (or None if the
                 revision doesn't exist)
 
         """
         ...
 
     @remote_api_endpoint("revision/log")
     def revision_log(self, revisions, limit=None):
         """Fetch revision entry from the given root revisions.
 
         Args:
             revisions: array of root revision to lookup
             limit: limitation on the output result. Default to None.
 
         Yields:
             List of revision log from such revisions root.
 
         """
         ...
 
     @remote_api_endpoint("revision/shortlog")
     def revision_shortlog(self, revisions, limit=None):
         """Fetch the shortlog for the given revisions
 
         Args:
             revisions: list of root revisions to lookup
             limit: depth limitation for the output
 
         Yields:
             a list of (id, parents) tuples.
 
         """
         ...
 
     @remote_api_endpoint("revision/get_random")
     def revision_get_random(self):
         """Finds a random revision id.
 
         Returns:
             a sha1_git
         """
         ...
 
     @remote_api_endpoint("release/add")
     def release_add(self, releases: Iterable[Release]) -> Dict:
         """Add releases to the storage
 
         Args:
             releases (Iterable[dict]): iterable of dictionaries representing
                 the individual releases to add. Each dict has the following
                 keys:
 
                 - **id** (:class:`sha1_git`): id of the release to add
                 - **revision** (:class:`sha1_git`): id of the revision the
                   release points to
                 - **date** (:class:`dict`): the date the release was made
                 - **name** (:class:`bytes`): the name of the release
                 - **comment** (:class:`bytes`): the comment associated with
                   the release
                 - **author** (:class:`Dict[str, bytes]`): dictionary with
                   keys: name, fullname, email
 
         the date dictionary has the form defined in :mod:`swh.model`.
 
         Returns:
             Summary dict of keys with associated count as values
 
                 release:add: New objects contents actually stored in db
 
         """
         ...
 
     @remote_api_endpoint("release/missing")
     def release_missing(self, releases):
         """List releases missing from storage
 
         Args:
             releases: an iterable of release ids
 
         Returns:
             a list of missing release ids
 
         """
         ...
 
     @remote_api_endpoint("release")
     def release_get(self, releases):
         """Given a list of sha1, return the releases's information
 
         Args:
             releases: list of sha1s
 
         Yields:
             dicts with the same keys as those given to `release_add`
             (or ``None`` if a release does not exist)
 
         """
         ...
 
     @remote_api_endpoint("release/get_random")
     def release_get_random(self):
         """Finds a random release id.
 
         Returns:
             a sha1_git
         """
         ...
 
     @remote_api_endpoint("snapshot/add")
     def snapshot_add(self, snapshots: Iterable[Snapshot]) -> Dict:
         """Add snapshots to the storage.
 
         Args:
             snapshot ([dict]): the snapshots to add, containing the
               following keys:
 
               - **id** (:class:`bytes`): id of the snapshot
               - **branches** (:class:`dict`): branches the snapshot contains,
                 mapping the branch name (:class:`bytes`) to the branch target,
                 itself a :class:`dict` (or ``None`` if the branch points to an
                 unknown object)
 
                 - **target_type** (:class:`str`): one of ``content``,
                   ``directory``, ``revision``, ``release``,
                   ``snapshot``, ``alias``
                 - **target** (:class:`bytes`): identifier of the target
                   (currently a ``sha1_git`` for all object kinds, or the name
                   of the target branch for aliases)
 
         Raises:
             ValueError: if the origin or visit id does not exist.
 
         Returns:
 
             Summary dict of keys with associated count as values
 
                 snapshot:add: Count of object actually stored in db
 
         """
         ...
 
     @remote_api_endpoint("snapshot/missing")
     def snapshot_missing(self, snapshots):
         """List snapshots missing from storage
 
         Args:
             snapshots (iterable): an iterable of snapshot ids
 
         Yields:
             missing snapshot ids
 
         """
         ...
 
     @remote_api_endpoint("snapshot")
     def snapshot_get(self, snapshot_id):
         """Get the content, possibly partial, of a snapshot with the given id
 
         The branches of the snapshot are iterated in the lexicographical
         order of their names.
 
         .. warning:: At most 1000 branches contained in the snapshot will be
             returned for performance reasons. In order to browse the whole
             set of branches, the method :meth:`snapshot_get_branches`
             should be used instead.
 
         Args:
             snapshot_id (bytes): identifier of the snapshot
         Returns:
             dict: a dict with three keys:
                 * **id**: identifier of the snapshot
                 * **branches**: a dict of branches contained in the snapshot
                   whose keys are the branches' names.
                 * **next_branch**: the name of the first branch not returned
                   or :const:`None` if the snapshot has less than 1000
                   branches.
         """
         ...
 
     @remote_api_endpoint("snapshot/by_origin_visit")
     def snapshot_get_by_origin_visit(self, origin, visit):
         """Get the content, possibly partial, of a snapshot for the given origin visit
 
         The branches of the snapshot are iterated in the lexicographical
         order of their names.
 
         .. warning:: At most 1000 branches contained in the snapshot will be
             returned for performance reasons. In order to browse the whole
             set of branches, the method :meth:`snapshot_get_branches`
             should be used instead.
 
         Args:
             origin (int): the origin identifier
             visit (int): the visit identifier
         Returns:
             dict: None if the snapshot does not exist;
               a dict with three keys otherwise:
                 * **id**: identifier of the snapshot
                 * **branches**: a dict of branches contained in the snapshot
                   whose keys are the branches' names.
                 * **next_branch**: the name of the first branch not returned
                   or :const:`None` if the snapshot has less than 1000
                   branches.
 
         """
         ...
 
     @remote_api_endpoint("snapshot/count_branches")
     def snapshot_count_branches(self, snapshot_id):
         """Count the number of branches in the snapshot with the given id
 
         Args:
             snapshot_id (bytes): identifier of the snapshot
 
         Returns:
             dict: A dict whose keys are the target types of branches and
             values their corresponding amount
         """
         ...
 
     @remote_api_endpoint("snapshot/get_branches")
     def snapshot_get_branches(
         self, snapshot_id, branches_from=b"", branches_count=1000, target_types=None
     ):
         """Get the content, possibly partial, of a snapshot with the given id
 
         The branches of the snapshot are iterated in the lexicographical
         order of their names.
 
         Args:
             snapshot_id (bytes): identifier of the snapshot
             branches_from (bytes): optional parameter used to skip branches
                 whose name is lesser than it before returning them
             branches_count (int): optional parameter used to restrain
                 the amount of returned branches
             target_types (list): optional parameter used to filter the
                 target types of branch to return (possible values that can be
                 contained in that list are `'content', 'directory',
                 'revision', 'release', 'snapshot', 'alias'`)
         Returns:
             dict: None if the snapshot does not exist;
               a dict with three keys otherwise:
                 * **id**: identifier of the snapshot
                 * **branches**: a dict of branches contained in the snapshot
                   whose keys are the branches' names.
                 * **next_branch**: the name of the first branch not returned
                   or :const:`None` if the snapshot has less than
                   `branches_count` branches after `branches_from` included.
         """
         ...
 
     @remote_api_endpoint("snapshot/get_random")
     def snapshot_get_random(self):
         """Finds a random snapshot id.
 
         Returns:
             a sha1_git
         """
         ...
 
     @remote_api_endpoint("origin/visit/add")
     def origin_visit_add(self, visits: Iterable[OriginVisit]) -> Iterable[OriginVisit]:
         """Add visits to storage. If the visits have no id, they will be created and assigned
         one. The resulted visits are visits with their visit id set.
 
         Args:
             visits: Iterable of OriginVisit objects to add
 
         Raises:
             StorageArgumentException if some origin visit reference unknown origins
 
         Returns:
             Iterable[OriginVisit] stored
 
         """
         ...
 
     @remote_api_endpoint("origin/visit_status/add")
     def origin_visit_status_add(
         self, visit_statuses: Iterable[OriginVisitStatus],
     ) -> None:
         """Add origin visit statuses.
 
         Args:
             visit_statuses: origin visit statuses to add
 
         Raises: StorageArgumentException if the origin of the visit status is unknown
 
         """
         ...
 
     @remote_api_endpoint("origin/visit/get")
     def origin_visit_get(
         self,
         origin: str,
         last_visit: Optional[int] = None,
         limit: Optional[int] = None,
         order: str = "asc",
     ) -> Iterable[Dict[str, Any]]:
         """Retrieve all the origin's visit's information.
 
         Args:
             origin: The visited origin
             last_visit: Starting point from which listing the next visits
                 Default to None
             limit: Number of results to return from the last visit.
                 Default to None
             order: Order on visit id fields to list origin visits (default to asc)
 
         Yields:
             List of visits.
 
         """
         ...
 
     @remote_api_endpoint("origin/visit/find_by_date")
     def origin_visit_find_by_date(
         self, origin: str, visit_date: datetime.datetime
     ) -> Optional[Dict[str, Any]]:
         """Retrieves the origin visit whose date is closest to the provided
         timestamp.
         In case of a tie, the visit with largest id is selected.
 
         Args:
             origin: origin (URL)
             visit_date: expected visit date
 
         Returns:
             A visit
 
         """
         ...
 
     @remote_api_endpoint("origin/visit/getby")
     def origin_visit_get_by(self, origin: str, visit: int) -> Optional[Dict[str, Any]]:
         """Retrieve origin visit's information.
 
         Args:
             origin: origin (URL)
             visit: visit id
 
         Returns:
             The information on that particular (origin, visit) or None if
             it does not exist
 
         """
         ...
 
     @remote_api_endpoint("origin/visit/get_latest")
     def origin_visit_get_latest(
         self,
         origin: str,
         type: Optional[str] = None,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
     ) -> Optional[Dict[str, Any]]:
         """Get the latest origin visit for the given origin, optionally
         looking only for those with one of the given allowed_statuses
         or for those with a snapshot.
 
         Args:
             origin: origin URL
             type: Optional visit type to filter on (e.g git, tar, dsc, svn,
             hg, npm, pypi, ...)
             allowed_statuses: list of visit statuses considered
                 to find the latest visit. For instance,
                 ``allowed_statuses=['full']`` will only consider visits that
                 have successfully run to completion.
             require_snapshot: If True, only a visit with a snapshot
                 will be returned.
 
         Returns:
             dict: a dict with the following keys:
 
                 - **origin**: the URL of the origin
                 - **visit**: origin visit id
                 - **type**: type of loader used for the visit
                 - **date**: timestamp of such visit
                 - **status**: Visit's new status
                 - **metadata**: Data associated to the visit
                 - **snapshot** (Optional[sha1_git]): identifier of the snapshot
                     associated to the visit
 
         """
         ...
 
     @remote_api_endpoint("origin/visit_status/get_latest")
     def origin_visit_status_get_latest(
         self,
         origin_url: str,
         visit: int,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
     ) -> Optional[OriginVisitStatus]:
         """Get the latest origin visit status for the given origin visit, optionally
         looking only for those with one of the given allowed_statuses or with a
         snapshot.
 
         Args:
             origin: origin URL
 
             allowed_statuses: list of visit statuses considered to find the latest
                 visit. Possible values are {created, ongoing, partial, full}. For
                 instance, ``allowed_statuses=['full']`` will only consider visits that
                 have successfully run to completion.
             require_snapshot: If True, only a visit with a snapshot
                 will be returned.
 
         Returns:
             The OriginVisitStatus matching the criteria
 
         """
         ...
 
     @remote_api_endpoint("origin/visit/get_random")
     def origin_visit_get_random(self, type: str) -> Optional[Dict[str, Any]]:
         """Randomly select one successful origin visit with <type>
         made in the last 3 months.
 
         Returns:
             dict representing an origin visit, in the same format as
             :py:meth:`origin_visit_get`.
 
         """
         ...
 
     @remote_api_endpoint("object/find_by_sha1_git")
     def object_find_by_sha1_git(self, ids):
         """Return the objects found with the given ids.
 
         Args:
             ids: a generator of sha1_gits
 
         Returns:
             dict: a mapping from id to the list of objects found. Each object
             found is itself a dict with keys:
 
             - sha1_git: the input id
             - type: the type of object found
 
         """
         ...
 
     @remote_api_endpoint("origin/get")
     def origin_get(self, origins):
         """Return origins, either all identified by their ids or all
         identified by tuples (type, url).
 
         If the url is given and the type is omitted, one of the origins with
         that url is returned.
 
         Args:
             origin: a list of dictionaries representing the individual
                 origins to find.
                 These dicts have the key url:
 
                 - url (bytes): the url the origin points to
 
         Returns:
             dict: the origin dictionary with the keys:
 
             - id: origin's id
             - url: origin's url
 
         Raises:
             ValueError: if the url or the id don't exist.
 
         """
         ...
 
     @remote_api_endpoint("origin/get_sha1")
     def origin_get_by_sha1(self, sha1s):
         """Return origins, identified by the sha1 of their URLs.
 
         Args:
             sha1s (list[bytes]): a list of sha1s
 
         Yields:
             dicts containing origin information as returned
             by :meth:`swh.storage.storage.Storage.origin_get`, or None if an
             origin matching the sha1 is not found.
 
         """
         ...
 
     @deprecated
     @remote_api_endpoint("origin/get_range")
     def origin_get_range(self, origin_from=1, origin_count=100):
         """Retrieve ``origin_count`` origins whose ids are greater
         or equal than ``origin_from``.
 
         Origins are sorted by id before retrieving them.
 
         Args:
             origin_from (int): the minimum id of origins to retrieve
             origin_count (int): the maximum number of origins to retrieve
 
         Yields:
             dicts containing origin information as returned
             by :meth:`swh.storage.storage.Storage.origin_get`.
         """
         ...
 
     @remote_api_endpoint("origin/list")
     def origin_list(self, page_token: Optional[str] = None, limit: int = 100) -> dict:
         """Returns the list of origins
 
         Args:
             page_token: opaque token used for pagination.
             limit: the maximum number of results to return
 
         Returns:
             dict: dict with the following keys:
               - **next_page_token** (str, optional): opaque token to be used as
                 `page_token` for retrieving the next page. if absent, there is
                 no more pages to gather.
               - **origins** (List[dict]): list of origins, as returned by
                 `origin_get`.
         """
         ...
 
     @remote_api_endpoint("origin/search")
     def origin_search(
         self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False
     ):
         """Search for origins whose urls contain a provided string pattern
         or match a provided regular expression.
         The search is performed in a case insensitive way.
 
         Args:
             url_pattern (str): the string pattern to search for in origin urls
             offset (int): number of found origins to skip before returning
                 results
             limit (int): the maximum number of found origins to return
             regexp (bool): if True, consider the provided pattern as a regular
                 expression and return origins whose urls match it
             with_visit (bool): if True, filter out origins with no visit
 
         Yields:
             dicts containing origin information as returned
             by :meth:`swh.storage.storage.Storage.origin_get`.
         """
         ...
 
     @deprecated
     @remote_api_endpoint("origin/count")
     def origin_count(self, url_pattern, regexp=False, with_visit=False):
         """Count origins whose urls contain a provided string pattern
         or match a provided regular expression.
         The pattern search in origin urls is performed in a case insensitive
         way.
 
         Args:
             url_pattern (str): the string pattern to search for in origin urls
             regexp (bool): if True, consider the provided pattern as a regular
                 expression and return origins whose urls match it
             with_visit (bool): if True, filter out origins with no visit
 
         Returns:
             int: The number of origins matching the search criterion.
         """
         ...
 
     @remote_api_endpoint("origin/add_multi")
     def origin_add(self, origins: Iterable[Origin]) -> Dict[str, int]:
         """Add origins to the storage
 
         Args:
             origins: list of dictionaries representing the individual origins,
                 with the following keys:
 
                 - type: the origin type ('git', 'svn', 'deb', ...)
                 - url (bytes): the url the origin points to
 
         Returns:
             Summary dict of keys with associated count as values
 
                 origin:add: Count of object actually stored in db
 
         """
         ...
 
     @deprecated
     @remote_api_endpoint("origin/add")
     def origin_add_one(self, origin: Origin) -> str:
         """Add origin to the storage
 
         Args:
             origin: dictionary representing the individual origin to add. This
                 dict has the following keys:
 
                 - type (FIXME: enum TBD): the origin type ('git', 'wget', ...)
                 - url (bytes): the url the origin points to
 
         Returns:
             the id of the added origin, or of the identical one that already
             exists.
 
         """
         ...
 
     def stat_counters(self):
         """compute statistics about the number of tuples in various tables
 
         Returns:
             dict: a dictionary mapping textual labels (e.g., content) to
             integer values (e.g., the number of tuples in table content)
 
         """
         ...
 
     def refresh_stat_counters(self):
         """Recomputes the statistics for `stat_counters`."""
         ...
 
     @remote_api_endpoint("origin/metadata/add")
     def origin_metadata_add(
         self,
         origin_url: str,
         discovery_date: datetime.datetime,
         authority: Dict[str, Any],
         fetcher: Dict[str, Any],
         format: str,
         metadata: bytes,
     ) -> None:
         """Add an origin_metadata for the origin at discovery_date,
         obtained using the `fetcher` from the `authority`.
 
         The authority and fetcher must be known to the storage before
         using this endpoint.
 
         If there is already origin metadata for the same origin, authority,
         fetcher, and at the same date, it will be replaced by this one.
 
         Args:
             discovery_date: when the metadata was fetched.
             authority: a dict containing keys `type` and `url`.
             fetcher: a dict containing keys `name` and `version`.
             format: text field indicating the format of the content of the
             metadata: blob of raw metadata
         """
         ...
 
     @remote_api_endpoint("origin/metadata/get")
     def origin_metadata_get(
         self,
         origin_url: str,
         authority: Dict[str, str],
         after: Optional[datetime.datetime] = None,
         page_token: Optional[bytes] = None,
         limit: int = 1000,
     ) -> Dict[str, Any]:
-        """Retrieve list of all origin_metadata entries for the origin_id
+        """Retrieve list of all origin_metadata entries for the origin_url
 
         Args:
             origin_url: the origin's URL
             authority: a dict containing keys `type` and `url`.
             after: minimum discovery_date for a result to be returned
             page_token: opaque token, used to get the next page of results
             limit: maximum number of results to be returned
 
         Returns:
             dict with keys `next_page_token` and `results`.
             `next_page_token` is an opaque token that is used to get the
             next page of results, or `None` if there are no more results.
             `results` is a list of dicts in the format:
 
             .. code-block: python
 
                 {
                     'authority': {'type': ..., 'url': ...},
                     'fetcher': {'name': ..., 'version': ...},
                     'discovery_date': ...,
                     'format': '...',
                     'metadata': b'...'
                 }
 
         """
         ...
 
     @remote_api_endpoint("fetcher/add")
     def metadata_fetcher_add(
         self, name: str, version: str, metadata: Dict[str, Any]
     ) -> None:
         """Add a new metadata fetcher to the storage.
 
         `name` and `version` together are a unique identifier of this
         fetcher; and `metadata` is an arbitrary dict of JSONable data
         with information about this fetcher.
 
         Args:
             name: the name of the fetcher
             version: version of the fetcher
 
         """
         ...
 
     @remote_api_endpoint("fetcher/get")
     def metadata_fetcher_get(self, name: str, version: str) -> Optional[Dict[str, Any]]:
         """Retrieve information about a fetcher
 
         Args:
             name: the name of the fetcher
             version: version of the fetcher
 
         Returns:
             dictionary with keys `name`, `version`, and `metadata`; or None
             if the fetcher is not known
 
         """
         ...
 
     @remote_api_endpoint("authority/add")
     def metadata_authority_add(
         self, type: str, url: str, metadata: Dict[str, Any]
     ) -> None:
         """Add a metadata authority
 
         Args:
             type: one of "deposit", "forge", or "registry"
             url: unique URI identifying the authority
             metadata: JSON-encodable object
         """
         ...
 
     @remote_api_endpoint("authority/get")
     def metadata_authority_get(self, type: str, url: str) -> Optional[Dict[str, Any]]:
         """Retrieve information about an authority
 
         Args:
             type: one of "deposit", "forge", or "registry"
             url: unique URI identifying the authority
 
         Returns:
             dictionary with keys `type`, `url`, and `metadata`; or None
             if the authority is not known
         """
         ...
 
     @deprecated
     @remote_api_endpoint("algos/diff_directories")
     def diff_directories(self, from_dir, to_dir, track_renaming=False):
         """Compute the list of file changes introduced between two arbitrary
         directories (insertion / deletion / modification / renaming of files).
 
         Args:
             from_dir (bytes): identifier of the directory to compare from
             to_dir (bytes): identifier of the directory to compare to
             track_renaming (bool): whether or not to track files renaming
 
         Returns:
             A list of dict describing the introduced file changes
             (see :func:`swh.storage.algos.diff.diff_directories`
             for more details).
         """
         ...
 
     @deprecated
     @remote_api_endpoint("algos/diff_revisions")
     def diff_revisions(self, from_rev, to_rev, track_renaming=False):
         """Compute the list of file changes introduced between two arbitrary
         revisions (insertion / deletion / modification / renaming of files).
 
         Args:
             from_rev (bytes): identifier of the revision to compare from
             to_rev (bytes): identifier of the revision to compare to
             track_renaming (bool): whether or not to track files renaming
 
         Returns:
             A list of dict describing the introduced file changes
             (see :func:`swh.storage.algos.diff.diff_directories`
             for more details).
         """
         ...
 
     @deprecated
     @remote_api_endpoint("algos/diff_revision")
     def diff_revision(self, revision, track_renaming=False):
         """Compute the list of file changes introduced by a specific revision
         (insertion / deletion / modification / renaming of files) by comparing
         it against its first parent.
 
         Args:
             revision (bytes): identifier of the revision from which to
                 compute the list of files changes
             track_renaming (bool): whether or not to track files renaming
 
         Returns:
             A list of dict describing the introduced file changes
             (see :func:`swh.storage.algos.diff.diff_directories`
             for more details).
         """
         ...
 
     @remote_api_endpoint("clear/buffer")
     def clear_buffers(self, object_types: Optional[Iterable[str]] = None) -> None:
         """For backend storages (pg, storage, in-memory), this is a noop operation. For proxy
         storages (especially filter, buffer), this is an operation which cleans internal
         state.
 
         """
 
     @remote_api_endpoint("flush")
     def flush(self, object_types: Optional[Iterable[str]] = None) -> Dict:
         """For backend storages (pg, storage, in-memory), this is expected to be a noop
         operation. For proxy storages (especially buffer), this is expected to trigger
         actual writes to the backend.
         """
         ...
diff --git a/swh/storage/sql/30-swh-schema.sql b/swh/storage/sql/30-swh-schema.sql
index e9e91cad..28bae81a 100644
--- a/swh/storage/sql/30-swh-schema.sql
+++ b/swh/storage/sql/30-swh-schema.sql
@@ -1,482 +1,488 @@
 ---
 --- SQL implementation of the Software Heritage data model
 ---
 
 -- schema versions
 create table dbversion
 (
   version     int primary key,
   release     timestamptz,
   description text
 );
 
 comment on table dbversion is 'Details of current db version';
 comment on column dbversion.version is 'SQL schema version';
 comment on column dbversion.release is 'Version deployment timestamp';
 comment on column dbversion.description is 'Release description';
 
 -- latest schema version
 insert into dbversion(version, release, description)
-      values(156, now(), 'Work In Progress');
+      values(157, now(), 'Work In Progress');
 
 -- a SHA1 checksum
 create domain sha1 as bytea check (length(value) = 20);
 
 -- a Git object ID, i.e., a Git-style salted SHA1 checksum
 create domain sha1_git as bytea check (length(value) = 20);
 
 -- a SHA256 checksum
 create domain sha256 as bytea check (length(value) = 32);
 
 -- a blake2 checksum
 create domain blake2s256 as bytea check (length(value) = 32);
 
 -- UNIX path (absolute, relative, individual path component, etc.)
 create domain unix_path as bytea;
 
 -- a set of UNIX-like access permissions, as manipulated by, e.g., chmod
 create domain file_perms as int;
 
+-- an SWHID
+create domain swhid as text check (value ~ '^swh:[0-9]+:.*');
+
 
 -- Checksums about actual file content. Note that the content itself is not
 -- stored in the DB, but on external (key-value) storage. A single checksum is
 -- used as key there, but the other can be used to verify that we do not inject
 -- content collisions not knowingly.
 create table content
 (
   sha1       sha1 not null,
   sha1_git   sha1_git not null,
   sha256     sha256 not null,
   blake2s256 blake2s256 not null,
   length     bigint not null,
   ctime      timestamptz not null default now(),
              -- creation time, i.e. time of (first) injection into the storage
   status     content_status not null default 'visible',
   object_id  bigserial
 );
 
 comment on table content is 'Checksums of file content which is actually stored externally';
 comment on column content.sha1 is 'Content sha1 hash';
 comment on column content.sha1_git is 'Git object sha1 hash';
 comment on column content.sha256 is 'Content Sha256 hash';
 comment on column content.blake2s256 is 'Content blake2s hash';
 comment on column content.length is 'Content length';
 comment on column content.ctime is 'First seen time';
 comment on column content.status is 'Content status (absent, visible, hidden)';
 comment on column content.object_id is 'Content identifier';
 
 
 -- An origin is a place, identified by an URL, where software source code
 -- artifacts can be found. We support different kinds of origins, e.g., git and
 -- other VCS repositories, web pages that list tarballs URLs (e.g.,
 -- http://www.kernel.org), indirect tarball URLs (e.g.,
 -- http://www.example.org/latest.tar.gz), etc. The key feature of an origin is
 -- that it can be *fetched* from (wget, git clone, svn checkout, etc.) to
 -- retrieve all the contained software.
 create table origin
 (
   id       bigserial not null,
   url      text not null
 );
 
 comment on column origin.id is 'Artifact origin id';
 comment on column origin.url is 'URL of origin';
 
 
 -- Content blobs observed somewhere, but not ingested into the archive for
 -- whatever reason. This table is separate from the content table as we might
 -- not have the sha1 checksum of skipped contents (for instance when we inject
 -- git repositories, objects that are too big will be skipped here, and we will
 -- only know their sha1_git). 'reason' contains the reason the content was
 -- skipped. origin is a nullable column allowing to find out which origin
 -- contains that skipped content.
 create table skipped_content
 (
   sha1       sha1,
   sha1_git   sha1_git,
   sha256     sha256,
   blake2s256 blake2s256,
   length     bigint not null,
   ctime      timestamptz not null default now(),
   status     content_status not null default 'absent',
   reason     text not null,
   origin     bigint,
   object_id  bigserial
 );
 
 comment on table skipped_content is 'Content blobs observed, but not ingested in the archive';
 comment on column skipped_content.sha1 is 'Skipped content sha1 hash';
 comment on column skipped_content.sha1_git is 'Git object sha1 hash';
 comment on column skipped_content.sha256 is 'Skipped content sha256 hash';
 comment on column skipped_content.blake2s256 is 'Skipped content blake2s hash';
 comment on column skipped_content.length is 'Skipped content length';
 comment on column skipped_content.ctime is 'First seen time';
 comment on column skipped_content.status is 'Skipped content status (absent, visible, hidden)';
 comment on column skipped_content.reason is 'Reason for skipping';
 comment on column skipped_content.origin is 'Origin table identifier';
 comment on column skipped_content.object_id is 'Skipped content identifier';
 
 
 -- A file-system directory.  A directory is a list of directory entries (see
 -- tables: directory_entry_{dir,file}).
 --
 -- To list the contents of a directory:
 -- 1. list the contained directory_entry_dir using array dir_entries
 -- 2. list the contained directory_entry_file using array file_entries
 -- 3. list the contained directory_entry_rev using array rev_entries
 -- 4. UNION
 --
 -- Synonyms/mappings:
 -- * git: tree
 create table directory
 (
   id            sha1_git not null,
   dir_entries   bigint[],  -- sub-directories, reference directory_entry_dir
   file_entries  bigint[],  -- contained files, reference directory_entry_file
   rev_entries   bigint[],  -- mounted revisions, reference directory_entry_rev
   object_id     bigserial  -- short object identifier
 );
 
 comment on table directory is 'Contents of a directory, synonymous to tree (git)';
 comment on column directory.id is 'Git object sha1 hash';
 comment on column directory.dir_entries is 'Sub-directories, reference directory_entry_dir';
 comment on column directory.file_entries is 'Contained files, reference directory_entry_file';
 comment on column directory.rev_entries is 'Mounted revisions, reference directory_entry_rev';
 comment on column directory.object_id is 'Short object identifier';
 
 
 -- A directory entry pointing to a (sub-)directory.
 create table directory_entry_dir
 (
   id      bigserial,
   target  sha1_git not null,   -- id of target directory
   name    unix_path not null,  -- path name, relative to containing dir
   perms   file_perms not null  -- unix-like permissions
 );
 
 comment on table directory_entry_dir is 'Directory entry for directory';
 comment on column directory_entry_dir.id is 'Directory identifier';
 comment on column directory_entry_dir.target is 'Target directory identifier';
 comment on column directory_entry_dir.name is 'Path name, relative to containing directory';
 comment on column directory_entry_dir.perms is 'Unix-like permissions';
 
 
 -- A directory entry pointing to a file content.
 create table directory_entry_file
 (
   id      bigserial,
   target  sha1_git not null,   -- id of target file
   name    unix_path not null,  -- path name, relative to containing dir
   perms   file_perms not null  -- unix-like permissions
 );
 
 comment on table directory_entry_file is 'Directory entry for file';
 comment on column directory_entry_file.id is 'File identifier';
 comment on column directory_entry_file.target is 'Target file identifier';
 comment on column directory_entry_file.name is 'Path name, relative to containing directory';
 comment on column directory_entry_file.perms is 'Unix-like permissions';
 
 
 -- A directory entry pointing to a revision.
 create table directory_entry_rev
 (
   id      bigserial,
   target  sha1_git not null,   -- id of target revision
   name    unix_path not null,  -- path name, relative to containing dir
   perms   file_perms not null  -- unix-like permissions
 );
 
 comment on table directory_entry_rev is 'Directory entry for revision';
 comment on column directory_entry_dir.id is 'Revision identifier';
 comment on column directory_entry_dir.target is 'Target revision in identifier';
 comment on column directory_entry_dir.name is 'Path name, relative to containing directory';
 comment on column directory_entry_dir.perms is 'Unix-like permissions';
 
 
 -- A person referenced by some source code artifacts, e.g., a VCS revision or
 -- release metadata.
 create table person
 (
   id        bigserial,
   name      bytea,          -- advisory: not null if we managed to parse a name
   email     bytea,          -- advisory: not null if we managed to parse an email
   fullname  bytea not null  -- freeform specification; what is actually used in the checksums
                             --     will usually be of the form 'name <email>'
 );
 
 comment on table person is 'Person referenced in code artifact release metadata';
 comment on column person.id is 'Person identifier';
 comment on column person.name is 'Name';
 comment on column person.email is 'Email';
 comment on column person.fullname is 'Full name (raw name)';
 
 
 -- The state of a source code tree at a specific point in time.
 --
 -- Synonyms/mappings:
 -- * git / subversion / etc: commit
 -- * tarball: a specific tarball
 --
 -- Revisions are organized as DAGs. Each revision points to 0, 1, or more (in
 -- case of merges) parent revisions. Each revision points to a directory, i.e.,
 -- a file-system tree containing files and directories.
 create table revision
 (
   id                    sha1_git not null,
   date                  timestamptz,
   date_offset           smallint,
   committer_date        timestamptz,
   committer_date_offset smallint,
   type                  revision_type not null,
   directory             sha1_git,  -- source code 'root' directory
   message               bytea,
   author                bigint,
   committer             bigint,
   synthetic             boolean not null default false,  -- true iff revision has been created by Software Heritage
   metadata              jsonb,  -- extra metadata (tarball checksums, extra commit information, etc...)
   object_id             bigserial,
   date_neg_utc_offset   boolean,
   committer_date_neg_utc_offset boolean
 );
 
 comment on table revision is 'A revision represents the state of a source code tree at a specific point in time';
 comment on column revision.id is 'Git-style SHA1 commit identifier';
 comment on column revision.date is 'Author timestamp as UNIX epoch';
 comment on column revision.date_offset is 'Author timestamp timezone, as minute offsets from UTC';
 comment on column revision.date_neg_utc_offset is 'True indicates a -0 UTC offset on author timestamp';
 comment on column revision.committer_date is 'Committer timestamp as UNIX epoch';
 comment on column revision.committer_date_offset is 'Committer timestamp timezone, as minute offsets from UTC';
 comment on column revision.committer_date_neg_utc_offset is 'True indicates a -0 UTC offset on committer timestamp';
 comment on column revision.type is 'Type of revision';
 comment on column revision.directory is 'Directory identifier';
 comment on column revision.message is 'Commit message';
 comment on column revision.author is 'Author identity';
 comment on column revision.committer is 'Committer identity';
 comment on column revision.synthetic is 'True iff revision has been synthesized by Software Heritage';
 comment on column revision.metadata is 'Extra revision metadata';
 comment on column revision.object_id is 'Non-intrinsic, sequential object identifier';
 
 
 -- either this table or the sha1_git[] column on the revision table
 create table revision_history
 (
   id           sha1_git not null,
   parent_id    sha1_git not null,
   parent_rank  int not null default 0
     -- parent position in merge commits, 0-based
 );
 
 comment on table revision_history is 'Sequence of revision history with parent and position in history';
 comment on column revision_history.id is 'Revision history git object sha1 checksum';
 comment on column revision_history.parent_id is 'Parent revision git object identifier';
 comment on column revision_history.parent_rank is 'Parent position in merge commits, 0-based';
 
 
 -- Crawling history of software origins visited by Software Heritage. Each
 -- visit is a 3-way mapping between a software origin, a timestamp, and a
 -- snapshot object capturing the full-state of the origin at visit time.
 create table origin_visit
 (
   origin       bigint not null,
   visit        bigint not null,
   date         timestamptz not null,
   type         text not null
 );
 
 comment on column origin_visit.origin is 'Visited origin';
 comment on column origin_visit.visit is 'Sequential visit number for the origin';
 comment on column origin_visit.date is 'Visit timestamp';
 comment on column origin_visit.type is 'Type of loader that did the visit (hg, git, ...)';
 
 
 -- Crawling history of software origin visits by Software Heritage. Each
 -- visit see its history change through new origin visit status updates
 create table origin_visit_status
 (
   origin   bigint not null,
   visit    bigint not null,
   date     timestamptz not null,
   status   origin_visit_state not null,
   metadata jsonb,
   snapshot sha1_git
 );
 
 comment on column origin_visit_status.origin is 'Origin concerned by the visit update';
 comment on column origin_visit_status.visit is 'Visit concerned by the visit update';
 comment on column origin_visit_status.date is 'Visit update timestamp';
 comment on column origin_visit_status.status is 'Visit status (ongoing, failed, full)';
 comment on column origin_visit_status.metadata is 'Optional origin visit metadata';
 comment on column origin_visit_status.snapshot is 'Optional, possibly partial, snapshot of the origin visit. It can be partial.';
 
 
 -- A snapshot represents the entire state of a software origin as crawled by
 -- Software Heritage. This table is a simple mapping between (public) intrinsic
 -- snapshot identifiers and (private) numeric sequential identifiers.
 create table snapshot
 (
   object_id  bigserial not null,  -- PK internal object identifier
   id         sha1_git not null    -- snapshot intrinsic identifier
 );
 
 comment on table snapshot is 'State of a software origin as crawled by Software Heritage';
 comment on column snapshot.object_id is 'Internal object identifier';
 comment on column snapshot.id is 'Intrinsic snapshot identifier';
 
 
 -- Each snapshot associate "branch" names to other objects in the Software
 -- Heritage Merkle DAG. This table describes branches as mappings between names
 -- and target typed objects.
 create table snapshot_branch
 (
   object_id    bigserial not null,  -- PK internal object identifier
   name         bytea not null,      -- branch name, e.g., "master" or "feature/drag-n-drop"
   target       bytea,               -- target object identifier, e.g., a revision identifier
   target_type  snapshot_target      -- target object type, e.g., "revision"
 );
 
 comment on table snapshot_branch is 'Associates branches with objects in Heritage Merkle DAG';
 comment on column snapshot_branch.object_id is 'Internal object identifier';
 comment on column snapshot_branch.name is 'Branch name';
 comment on column snapshot_branch.target is 'Target object identifier';
 comment on column snapshot_branch.target_type is 'Target object type';
 
 
 -- Mapping between snapshots and their branches.
 create table snapshot_branches
 (
   snapshot_id  bigint not null,  -- snapshot identifier, ref. snapshot.object_id
   branch_id    bigint not null   -- branch identifier, ref. snapshot_branch.object_id
 );
 
 comment on table snapshot_branches is 'Mapping between snapshot and their branches';
 comment on column snapshot_branches.snapshot_id is 'Snapshot identifier';
 comment on column snapshot_branches.branch_id is 'Branch identifier';
 
 
 -- A "memorable" point in time in the development history of a software
 -- project.
 --
 -- Synonyms/mappings:
 -- * git: tag (of the annotated kind, otherwise they are just references)
 -- * tarball: the release version number
 create table release
 (
   id          sha1_git not null,
   target      sha1_git,
   date        timestamptz,
   date_offset smallint,
   name        bytea,
   comment     bytea,
   author      bigint,
   synthetic   boolean not null default false,  -- true iff release has been created by Software Heritage
   object_id   bigserial,
   target_type object_type not null,
   date_neg_utc_offset  boolean
 );
 
 comment on table release is 'Details of a software release, synonymous with
  a tag (git) or version number (tarball)';
 comment on column release.id is 'Release git identifier';
 comment on column release.target is 'Target git identifier';
 comment on column release.date is 'Release timestamp';
 comment on column release.date_offset is 'Timestamp offset from UTC';
 comment on column release.name is 'Name';
 comment on column release.comment is 'Comment';
 comment on column release.author is 'Author';
 comment on column release.synthetic is 'Indicates if created by Software Heritage';
 comment on column release.object_id is 'Object identifier';
 comment on column release.target_type is 'Object type (''content'', ''directory'', ''revision'',
  ''release'', ''snapshot'')';
 comment on column release.date_neg_utc_offset is 'True indicates -0 UTC offset for release timestamp';
 
 -- Tools
 create table metadata_fetcher
 (
   id            serial  not null,
   name          text    not null,
   version       text    not null,
   metadata      jsonb   not null
 );
 
 comment on table metadata_fetcher is 'Tools used to retrieve metadata';
 comment on column metadata_fetcher.id is 'Internal identifier of the fetcher';
 comment on column metadata_fetcher.name is 'Fetcher name';
 comment on column metadata_fetcher.version is 'Fetcher version';
 comment on column metadata_fetcher.metadata is 'Extra information about the fetcher';
 
 
 create table metadata_authority
 (
   id            serial  not null,
   type          text    not null,
   url           text    not null,
   metadata      jsonb   not null
 );
 
 comment on table metadata_authority is 'Metadata authority information';
 comment on column metadata_authority.id is 'Internal identifier of the authority';
 comment on column metadata_authority.type is 'Type of authority (deposit/forge/registry)';
 comment on column metadata_authority.url is 'Authority''s uri';
 comment on column metadata_authority.metadata is 'Other metadata about authority';
 
 
--- Discovery of metadata during a listing, loading, deposit or external_catalog of an origin
--- also provides a translation to a defined json schema using a translation tool (tool_id)
-create table origin_metadata
+-- Extrinsic metadata on a DAG objects and origins.
+create table object_metadata
 (
-  id             bigserial     not null,  -- PK internal object identifier
-  origin_id      bigint        not null,  -- references origin(id)
-  discovery_date timestamptz   not null,  -- when it was extracted
+  type           text          not null,
+  id             text          not null,
+
+  -- metadata source
   authority_id   bigint        not null,
   fetcher_id     bigint        not null,
-  format         text          not null default 'sword-v2-atom-codemeta-v2-in-json',
+  discovery_date timestamptz   not null,
+
+  -- metadata itself
+  format         text          not null,
   metadata       bytea         not null
 );
 
-comment on table origin_metadata is 'keeps all metadata found concerning an origin';
-comment on column origin_metadata.id is 'the origin_metadata object''s id';
-comment on column origin_metadata.origin_id is 'the origin id for which the metadata was found';
-comment on column origin_metadata.discovery_date is 'the date of retrieval';
-comment on column origin_metadata.authority_id is 'the metadata provider: github, openhub, deposit, etc.';
-comment on column origin_metadata.fetcher_id is 'the tool used for extracting metadata: loaders, crawlers, etc.';
-comment on column origin_metadata.format is 'name of the format of metadata, used by readers to interpret it.';
-comment on column origin_metadata.metadata is 'original metadata in opaque format';
+comment on table object_metadata is 'keeps all metadata found concerning an object';
+comment on column object_metadata.type is 'the type of object (content/directory/revision/release/snapshot/origin) the metadata is on';
+comment on column object_metadata.id is 'the SWHID or origin URL for which the metadata was found';
+comment on column object_metadata.discovery_date is 'the date of retrieval';
+comment on column object_metadata.authority_id is 'the metadata provider: github, openhub, deposit, etc.';
+comment on column object_metadata.fetcher_id is 'the tool used for extracting metadata: loaders, crawlers, etc.';
+comment on column object_metadata.format is 'name of the format of metadata, used by readers to interpret it.';
+comment on column object_metadata.metadata is 'original metadata in opaque format';
 
 
 -- Keep a cache of object counts
 create table object_counts
 (
   object_type text,             -- table for which we're counting objects (PK)
   value bigint,                 -- count of objects in the table
   last_update timestamptz,      -- last update for the object count in this table
   single_update boolean         -- whether we update this table standalone (true) or through bucketed counts (false)
 );
 
 comment on table object_counts is 'Cache of object counts';
 comment on column object_counts.object_type is 'Object type (''content'', ''directory'', ''revision'',
  ''release'', ''snapshot'')';
 comment on column object_counts.value is 'Count of objects in the table';
 comment on column object_counts.last_update is 'Last update for object count';
 comment on column object_counts.single_update is 'standalone (true) or bucketed counts (false)';
 
 
 create table object_counts_bucketed
 (
     line serial not null,       -- PK
     object_type text not null,  -- table for which we're counting objects
     identifier text not null,   -- identifier across which we're bucketing objects
     bucket_start bytea,         -- lower bound (inclusive) for the bucket
     bucket_end bytea,           -- upper bound (exclusive) for the bucket
     value bigint,               -- count of objects in the bucket
     last_update timestamptz     -- last update for the object count in this bucket
 );
 
 comment on table object_counts_bucketed is 'Bucketed count for objects ordered by type';
 comment on column object_counts_bucketed.line is 'Auto incremented idenitfier value';
 comment on column object_counts_bucketed.object_type is 'Object type (''content'', ''directory'', ''revision'',
  ''release'', ''snapshot'')';
 comment on column object_counts_bucketed.identifier is 'Common identifier for bucketed objects';
 comment on column object_counts_bucketed.bucket_start is 'Lower bound (inclusive) for the bucket';
 comment on column object_counts_bucketed.bucket_end is 'Upper bound (exclusive) for the bucket';
 comment on column object_counts_bucketed.value is 'Count of objects in the bucket';
 comment on column object_counts_bucketed.last_update is 'Last update for the object count in this bucket';
diff --git a/swh/storage/sql/60-swh-indexes.sql b/swh/storage/sql/60-swh-indexes.sql
index 2b85586d..669d8230 100644
--- a/swh/storage/sql/60-swh-indexes.sql
+++ b/swh/storage/sql/60-swh-indexes.sql
@@ -1,192 +1,186 @@
 -- content
 
 create unique index concurrently content_pkey on content(sha1);
 create unique index concurrently on content(sha1_git);
 create index concurrently on content(sha256);
 create index concurrently on content(blake2s256);
 create index concurrently on content(ctime);  -- TODO use a BRIN index here (postgres >= 9.5)
 create unique index concurrently on content(object_id);
 
 alter table content add primary key using index content_pkey;
 
 
 -- origin
 
 create unique index concurrently origin_pkey on origin(id);
 create unique index concurrently on origin using btree(url);
 create index concurrently on origin using gin (url gin_trgm_ops);
 create index concurrently on origin using btree(digest(url, 'sha1'));
 
 alter table origin add primary key using index origin_pkey;
 
 
 -- skipped_content
 
 alter table skipped_content add constraint skipped_content_sha1_sha1_git_sha256_key unique (sha1, sha1_git, sha256);
 
 create index concurrently on skipped_content(sha1);
 create index concurrently on skipped_content(sha1_git);
 create index concurrently on skipped_content(sha256);
 create index concurrently on skipped_content(blake2s256);
 create unique index concurrently on skipped_content(object_id);
 
 alter table skipped_content add constraint skipped_content_origin_fkey foreign key (origin) references origin(id) not valid;
 alter table skipped_content validate constraint skipped_content_origin_fkey;
 
 -- directory
 
 create unique index concurrently directory_pkey on directory(id);
 alter table directory add primary key using index directory_pkey;
 
 create index concurrently on directory using gin (dir_entries);
 create index concurrently on directory using gin (file_entries);
 create index concurrently on directory using gin (rev_entries);
 create unique index concurrently on directory(object_id);
 
 -- directory_entry_dir
 
 create unique index concurrently directory_entry_dir_pkey on directory_entry_dir(id);
 alter table directory_entry_dir add primary key using index directory_entry_dir_pkey;
 
 create unique index concurrently on directory_entry_dir(target, name, perms);
 
 -- directory_entry_file
 
 create unique index concurrently directory_entry_file_pkey on directory_entry_file(id);
 alter table directory_entry_file add primary key using index directory_entry_file_pkey;
 
 create unique index concurrently on directory_entry_file(target, name, perms);
 
 -- directory_entry_rev
 
 create unique index concurrently directory_entry_rev_pkey on directory_entry_rev(id);
 alter table directory_entry_rev add primary key using index directory_entry_rev_pkey;
 
 create unique index concurrently on directory_entry_rev(target, name, perms);
 
 -- person
 create unique index concurrently person_pkey on person(id);
 alter table person add primary key using index person_pkey;
 
 create unique index concurrently on person(fullname);
 create index concurrently on person(name);
 create index concurrently on person(email);
 
 -- revision
 create unique index concurrently revision_pkey on revision(id);
 alter table revision add primary key using index revision_pkey;
 
 alter table revision add constraint revision_author_fkey foreign key (author) references person(id) not valid;
 alter table revision validate constraint revision_author_fkey;
 alter table revision add constraint revision_committer_fkey foreign key (committer) references person(id) not valid;
 alter table revision validate constraint revision_committer_fkey;
 
 create index concurrently on revision(directory);
 create unique index concurrently on revision(object_id);
 
 -- revision_history
 create unique index concurrently revision_history_pkey on revision_history(id, parent_rank);
 alter table revision_history add primary key using index revision_history_pkey;
 
 create index concurrently on revision_history(parent_id);
 
 alter table revision_history add constraint revision_history_id_fkey foreign key (id) references revision(id) not valid;
 alter table revision_history validate constraint revision_history_id_fkey;
 
 -- snapshot
 create unique index concurrently snapshot_pkey on snapshot(object_id);
 alter table snapshot add primary key using index snapshot_pkey;
 
 create unique index concurrently on snapshot(id);
 
 -- snapshot_branch
 create unique index concurrently snapshot_branch_pkey on snapshot_branch(object_id);
 alter table snapshot_branch add primary key using index snapshot_branch_pkey;
 
 create unique index concurrently on snapshot_branch (target_type, target, name);
 alter table snapshot_branch add constraint snapshot_branch_target_check check ((target_type is null) = (target is null)) not valid;
 alter table snapshot_branch validate constraint snapshot_branch_target_check;
 alter table snapshot_branch add constraint snapshot_target_check check (target_type not in ('content', 'directory', 'revision', 'release', 'snapshot') or length(target) = 20) not valid;
 alter table snapshot_branch validate constraint snapshot_target_check;
 
 create unique index concurrently on snapshot_branch (name) where target_type is null and target is null;
 
 -- snapshot_branches
 create unique index concurrently snapshot_branches_pkey on snapshot_branches(snapshot_id, branch_id);
 alter table snapshot_branches add primary key using index snapshot_branches_pkey;
 
 alter table snapshot_branches add constraint snapshot_branches_snapshot_id_fkey foreign key (snapshot_id) references snapshot(object_id) not valid;
 alter table snapshot_branches validate constraint snapshot_branches_snapshot_id_fkey;
 
 alter table snapshot_branches add constraint snapshot_branches_branch_id_fkey foreign key (branch_id) references snapshot_branch(object_id) not valid;
 alter table snapshot_branches validate constraint snapshot_branches_branch_id_fkey;
 
 -- origin_visit
 create unique index concurrently origin_visit_pkey on origin_visit(origin, visit);
 alter table origin_visit add primary key using index origin_visit_pkey;
 
 create index concurrently on origin_visit(date);
 create index concurrently origin_visit_type_date on origin_visit(type, date);
 
 alter table origin_visit add constraint origin_visit_origin_fkey foreign key (origin) references origin(id) not valid;
 alter table origin_visit validate constraint origin_visit_origin_fkey;
 
 -- origin_visit_status
 
 create unique index concurrently origin_visit_status_pkey on origin_visit_status(origin, visit, date);
 alter table origin_visit_status add primary key using index origin_visit_status_pkey;
 
 alter table origin_visit_status
   add constraint origin_visit_status_origin_visit_fkey
   foreign key (origin, visit)
   references origin_visit(origin, visit) not valid;
 alter table origin_visit_status validate constraint origin_visit_status_origin_visit_fkey;
 
 -- release
 create unique index concurrently release_pkey on release(id);
 alter table release add primary key using index release_pkey;
 
 create index concurrently on release(target, target_type);
 create unique index concurrently on release(object_id);
 
 alter table release add constraint release_author_fkey foreign key (author) references person(id) not valid;
 alter table release validate constraint release_author_fkey;
 
 -- if the author is null, then the date must be null
 alter table release add constraint release_author_date_check check ((date is null) or (author is not null)) not valid;
 alter table release validate constraint release_author_date_check;
 
 -- metadata_fetcher
 create unique index metadata_fetcher_pkey on metadata_fetcher(id);
 alter table metadata_fetcher add primary key using index metadata_fetcher_pkey;
 
 create unique index metadata_fetcher_name_version on metadata_fetcher(name, version);
 
 -- metadata_authority
 create unique index concurrently metadata_authority_pkey on metadata_authority(id);
 alter table metadata_authority add primary key using index metadata_authority_pkey;
 
 create unique index metadata_authority_type_url on metadata_authority(type, url);
 
--- origin_metadata
-create unique index concurrently origin_metadata_pkey on origin_metadata(id);
-alter table origin_metadata add primary key using index origin_metadata_pkey;
+-- object_metadata
+create unique index concurrently object_metadata_content_authority_date_fetcher on object_metadata(id, authority_id, discovery_date, fetcher_id);
 
-create unique index concurrently origin_metadata_origin_authority_date_fetcher on origin_metadata(origin_id, authority_id, discovery_date, fetcher_id);
+alter table object_metadata add constraint object_metadata_authority_fkey foreign key (authority_id) references metadata_authority(id) not valid;
+alter table object_metadata validate constraint object_metadata_authority_fkey;
 
-alter table origin_metadata add constraint origin_metadata_origin_fkey foreign key (origin_id) references origin(id) not valid;
-alter table origin_metadata validate constraint origin_metadata_origin_fkey;
-
-alter table origin_metadata add constraint origin_metadata_authority_fkey foreign key (authority_id) references metadata_authority(id) not valid;
-alter table origin_metadata validate constraint origin_metadata_authority_fkey;
-
-alter table origin_metadata add constraint origin_metadata_fetcher_fkey foreign key (fetcher_id) references metadata_fetcher(id) not valid;
-alter table origin_metadata validate constraint origin_metadata_fetcher_fkey;
+alter table object_metadata add constraint object_metadata_fetcher_fkey foreign key (fetcher_id) references metadata_fetcher(id) not valid;
+alter table object_metadata validate constraint object_metadata_fetcher_fkey;
 
 -- object_counts
 create unique index concurrently object_counts_pkey on object_counts(object_type);
 alter table object_counts add primary key using index object_counts_pkey;
 
 -- object_counts_bucketed
 create unique index concurrently object_counts_bucketed_pkey on object_counts_bucketed(line);
 alter table object_counts_bucketed add primary key using index object_counts_bucketed_pkey;
diff --git a/swh/storage/storage.py b/swh/storage/storage.py
index 14f0884d..31d47745 100644
--- a/swh/storage/storage.py
+++ b/swh/storage/storage.py
@@ -1,1316 +1,1376 @@
 # Copyright (C) 2015-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import contextlib
 import datetime
 import itertools
 
 from collections import defaultdict
 from contextlib import contextmanager
 from deprecated import deprecated
 from typing import Any, Dict, Iterable, List, Optional
 
 import attr
 import psycopg2
 import psycopg2.pool
 import psycopg2.errors
 
 from swh.core.api.serializers import msgpack_loads, msgpack_dumps
 from swh.model.model import (
     Content,
     Directory,
     Origin,
     OriginVisit,
     OriginVisitStatus,
     Revision,
     Release,
     SkippedContent,
     Snapshot,
     SHA1_SIZE,
 )
 from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex
 from swh.storage.objstorage import ObjStorage
 from swh.storage.validate import VALIDATION_EXCEPTIONS
 from swh.storage.utils import now
 
 from . import converters
 from .common import db_transaction_generator, db_transaction
 from .db import Db
 from .exc import StorageArgumentException, StorageDBError, HashCollision
 from .algos import diff
 from .metrics import timed, send_metric, process_metrics
 from .utils import get_partition_bounds_bytes, extract_collision_hash
 from .writer import JournalWriter
 
 
 # Max block size of contents to return
 BULK_BLOCK_CONTENT_LEN_MAX = 10000
 
 EMPTY_SNAPSHOT_ID = hash_to_bytes("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e")
 """Identifier for the empty snapshot"""
 
 
 VALIDATION_EXCEPTIONS = VALIDATION_EXCEPTIONS + [
     psycopg2.errors.CheckViolation,
     psycopg2.errors.IntegrityError,
     psycopg2.errors.InvalidTextRepresentation,
     psycopg2.errors.NotNullViolation,
     psycopg2.errors.NumericValueOutOfRange,
     psycopg2.errors.UndefinedFunction,  # (raised on wrong argument typs)
 ]
 """Exceptions raised by postgresql when validation of the arguments
 failed."""
 
 
 @contextlib.contextmanager
 def convert_validation_exceptions():
     """Catches postgresql errors related to invalid arguments, and
     re-raises a StorageArgumentException."""
     try:
         yield
     except tuple(VALIDATION_EXCEPTIONS) as e:
         raise StorageArgumentException(str(e))
 
 
 class Storage:
     """SWH storage proxy, encompassing DB and object storage
 
     """
 
     def __init__(
         self, db, objstorage, min_pool_conns=1, max_pool_conns=10, journal_writer=None
     ):
         """
         Args:
             db_conn: either a libpq connection string, or a psycopg2 connection
             obj_root: path to the root of the object storage
 
         """
         try:
             if isinstance(db, psycopg2.extensions.connection):
                 self._pool = None
                 self._db = Db(db)
             else:
                 self._pool = psycopg2.pool.ThreadedConnectionPool(
                     min_pool_conns, max_pool_conns, db
                 )
                 self._db = None
         except psycopg2.OperationalError as e:
             raise StorageDBError(e)
 
         self.journal_writer = JournalWriter(journal_writer)
         self.objstorage = ObjStorage(objstorage)
 
     def get_db(self):
         if self._db:
             return self._db
         else:
             return Db.from_pool(self._pool)
 
     def put_db(self, db):
         if db is not self._db:
             db.put_conn()
 
     @contextmanager
     def db(self):
         db = None
         try:
             db = self.get_db()
             yield db
         finally:
             if db:
                 self.put_db(db)
 
     @timed
     @db_transaction()
     def check_config(self, *, check_write, db=None, cur=None):
 
         if not self.objstorage.check_config(check_write=check_write):
             return False
 
         # Check permissions on one of the tables
         if check_write:
             check = "INSERT"
         else:
             check = "SELECT"
 
         cur.execute("select has_table_privilege(current_user, 'content', %s)", (check,))
         return cur.fetchone()[0]
 
     def _content_unique_key(self, hash, db):
         """Given a hash (tuple or dict), return a unique key from the
            aggregation of keys.
 
         """
         keys = db.content_hash_keys
         if isinstance(hash, tuple):
             return hash
         return tuple([hash[k] for k in keys])
 
     def _content_add_metadata(self, db, cur, content):
         """Add content to the postgresql database but not the object storage.
         """
         # create temporary table for metadata injection
         db.mktemp("content", cur)
 
         db.copy_to(
             (c.to_dict() for c in content), "tmp_content", db.content_add_keys, cur
         )
 
         # move metadata in place
         try:
             db.content_add_from_temp(cur)
         except psycopg2.IntegrityError as e:
             if e.diag.sqlstate == "23505" and e.diag.table_name == "content":
                 message_detail = e.diag.message_detail
                 if message_detail:
                     hash_name, hash_id = extract_collision_hash(message_detail)
                     collision_contents_hashes = [
                         c.hashes() for c in content if c.get_hash(hash_name) == hash_id
                     ]
                 else:
                     constraint_to_hash_name = {
                         "content_pkey": "sha1",
                         "content_sha1_git_idx": "sha1_git",
                         "content_sha256_idx": "sha256",
                     }
                     hash_name = constraint_to_hash_name.get(e.diag.constraint_name)
                     hash_id = None
                     collision_contents_hashes = None
 
                 raise HashCollision(
                     hash_name, hash_id, collision_contents_hashes
                 ) from None
             else:
                 raise
 
     @timed
     @process_metrics
     def content_add(self, content: Iterable[Content]) -> Dict:
         ctime = now()
 
         contents = [attr.evolve(c, ctime=ctime) for c in content]
 
         objstorage_summary = self.objstorage.content_add(contents)
 
         with self.db() as db:
             with db.transaction() as cur:
                 missing = list(
                     self.content_missing(
                         map(Content.to_dict, contents),
                         key_hash="sha1_git",
                         db=db,
                         cur=cur,
                     )
                 )
                 contents = [c for c in contents if c.sha1_git in missing]
 
                 self.journal_writer.content_add(contents)
                 self._content_add_metadata(db, cur, contents)
 
         return {
             "content:add": len(contents),
             "content:add:bytes": objstorage_summary["content:add:bytes"],
         }
 
     @timed
     @db_transaction()
     def content_update(self, content, keys=[], db=None, cur=None):
         # TODO: Add a check on input keys. How to properly implement
         # this? We don't know yet the new columns.
         self.journal_writer.content_update(content)
 
         db.mktemp("content", cur)
         select_keys = list(set(db.content_get_metadata_keys).union(set(keys)))
         with convert_validation_exceptions():
             db.copy_to(content, "tmp_content", select_keys, cur)
             db.content_update_from_temp(keys_to_update=keys, cur=cur)
 
     @timed
     @process_metrics
     @db_transaction()
     def content_add_metadata(
         self, content: Iterable[Content], db=None, cur=None
     ) -> Dict:
         contents = list(content)
         missing = self.content_missing(
             (c.to_dict() for c in contents), key_hash="sha1_git", db=db, cur=cur,
         )
         contents = [c for c in contents if c.sha1_git in missing]
 
         self.journal_writer.content_add_metadata(contents)
         self._content_add_metadata(db, cur, contents)
 
         return {
             "content:add": len(contents),
         }
 
     @timed
     def content_get(self, content):
         # FIXME: Make this method support slicing the `data`.
         if len(content) > BULK_BLOCK_CONTENT_LEN_MAX:
             raise StorageArgumentException(
                 "Send at maximum %s contents." % BULK_BLOCK_CONTENT_LEN_MAX
             )
         yield from self.objstorage.content_get(content)
 
     @timed
     @db_transaction()
     def content_get_range(self, start, end, limit=1000, db=None, cur=None):
         if limit is None:
             raise StorageArgumentException("limit should not be None")
         contents = []
         next_content = None
         for counter, content_row in enumerate(
             db.content_get_range(start, end, limit + 1, cur)
         ):
             content = dict(zip(db.content_get_metadata_keys, content_row))
             if counter >= limit:
                 # take the last commit for the next page starting from this
                 next_content = content["sha1"]
                 break
             contents.append(content)
         return {
             "contents": contents,
             "next": next_content,
         }
 
     @timed
     def content_get_partition(
         self,
         partition_id: int,
         nb_partitions: int,
         limit: int = 1000,
         page_token: str = None,
     ):
         if limit is None:
             raise StorageArgumentException("limit should not be None")
         (start, end) = get_partition_bounds_bytes(
             partition_id, nb_partitions, SHA1_SIZE
         )
         if page_token:
             start = hash_to_bytes(page_token)
         if end is None:
             end = b"\xff" * SHA1_SIZE
         result = self.content_get_range(start, end, limit)
         result2 = {
             "contents": result["contents"],
             "next_page_token": None,
         }
         if result["next"]:
             result2["next_page_token"] = hash_to_hex(result["next"])
         return result2
 
     @timed
     @db_transaction(statement_timeout=500)
     def content_get_metadata(
         self, contents: List[bytes], db=None, cur=None
     ) -> Dict[bytes, List[Dict]]:
         result: Dict[bytes, List[Dict]] = {sha1: [] for sha1 in contents}
         for row in db.content_get_metadata_from_sha1s(contents, cur):
             content_meta = dict(zip(db.content_get_metadata_keys, row))
             result[content_meta["sha1"]].append(content_meta)
         return result
 
     @timed
     @db_transaction_generator()
     def content_missing(self, content, key_hash="sha1", db=None, cur=None):
         keys = db.content_hash_keys
 
         if key_hash not in keys:
             raise StorageArgumentException("key_hash should be one of %s" % keys)
 
         key_hash_idx = keys.index(key_hash)
 
         if not content:
             return
 
         for obj in db.content_missing_from_list(content, cur):
             yield obj[key_hash_idx]
 
     @timed
     @db_transaction_generator()
     def content_missing_per_sha1(self, contents, db=None, cur=None):
         for obj in db.content_missing_per_sha1(contents, cur):
             yield obj[0]
 
     @timed
     @db_transaction_generator()
     def content_missing_per_sha1_git(self, contents, db=None, cur=None):
         for obj in db.content_missing_per_sha1_git(contents, cur):
             yield obj[0]
 
     @timed
     @db_transaction()
     def content_find(self, content, db=None, cur=None):
         if not set(content).intersection(DEFAULT_ALGORITHMS):
             raise StorageArgumentException(
                 "content keys must contain at least one of: "
                 "sha1, sha1_git, sha256, blake2s256"
             )
 
         contents = db.content_find(
             sha1=content.get("sha1"),
             sha1_git=content.get("sha1_git"),
             sha256=content.get("sha256"),
             blake2s256=content.get("blake2s256"),
             cur=cur,
         )
         return [dict(zip(db.content_find_cols, content)) for content in contents]
 
     @timed
     @db_transaction()
     def content_get_random(self, db=None, cur=None):
         return db.content_get_random(cur)
 
     @staticmethod
     def _skipped_content_normalize(d):
         d = d.copy()
 
         if d.get("status") is None:
             d["status"] = "absent"
 
         if d.get("length") is None:
             d["length"] = -1
 
         return d
 
     @staticmethod
     def _skipped_content_validate(d):
         """Sanity checks on status / reason / length, that postgresql
         doesn't enforce."""
         if d["status"] != "absent":
             raise StorageArgumentException(
                 "Invalid content status: {}".format(d["status"])
             )
 
         if d.get("reason") is None:
             raise StorageArgumentException(
                 "Must provide a reason if content is absent."
             )
 
         if d["length"] < -1:
             raise StorageArgumentException("Content length must be positive or -1.")
 
     def _skipped_content_add_metadata(self, db, cur, content: Iterable[SkippedContent]):
         origin_ids = db.origin_id_get_by_url([cont.origin for cont in content], cur=cur)
         content = [
             attr.evolve(c, origin=origin_id)
             for (c, origin_id) in zip(content, origin_ids)
         ]
         db.mktemp("skipped_content", cur)
         db.copy_to(
             [c.to_dict() for c in content],
             "tmp_skipped_content",
             db.skipped_content_keys,
             cur,
         )
 
         # move metadata in place
         db.skipped_content_add_from_temp(cur)
 
     @timed
     @process_metrics
     @db_transaction()
     def skipped_content_add(
         self, content: Iterable[SkippedContent], db=None, cur=None
     ) -> Dict:
         ctime = now()
         content = [attr.evolve(c, ctime=ctime) for c in content]
 
         missing_contents = self.skipped_content_missing(
             (c.to_dict() for c in content), db=db, cur=cur,
         )
         content = [
             c
             for c in content
             if any(
                 all(
                     c.get_hash(algo) == missing_content.get(algo)
                     for algo in DEFAULT_ALGORITHMS
                 )
                 for missing_content in missing_contents
             )
         ]
 
         self.journal_writer.skipped_content_add(content)
         self._skipped_content_add_metadata(db, cur, content)
 
         return {
             "skipped_content:add": len(content),
         }
 
     @timed
     @db_transaction_generator()
     def skipped_content_missing(self, contents, db=None, cur=None):
         contents = list(contents)
         for content in db.skipped_content_missing(contents, cur):
             yield dict(zip(db.content_hash_keys, content))
 
     @timed
     @process_metrics
     @db_transaction()
     def directory_add(
         self, directories: Iterable[Directory], db=None, cur=None
     ) -> Dict:
         directories = list(directories)
         summary = {"directory:add": 0}
 
         dirs = set()
         dir_entries: Dict[str, defaultdict] = {
             "file": defaultdict(list),
             "dir": defaultdict(list),
             "rev": defaultdict(list),
         }
 
         for cur_dir in directories:
             dir_id = cur_dir.id
             dirs.add(dir_id)
             for src_entry in cur_dir.entries:
                 entry = src_entry.to_dict()
                 entry["dir_id"] = dir_id
                 dir_entries[entry["type"]][dir_id].append(entry)
 
         dirs_missing = set(self.directory_missing(dirs, db=db, cur=cur))
         if not dirs_missing:
             return summary
 
         self.journal_writer.directory_add(
             dir_ for dir_ in directories if dir_.id in dirs_missing
         )
 
         # Copy directory ids
         dirs_missing_dict = ({"id": dir} for dir in dirs_missing)
         db.mktemp("directory", cur)
         db.copy_to(dirs_missing_dict, "tmp_directory", ["id"], cur)
 
         # Copy entries
         for entry_type, entry_list in dir_entries.items():
             entries = itertools.chain.from_iterable(
                 entries_for_dir
                 for dir_id, entries_for_dir in entry_list.items()
                 if dir_id in dirs_missing
             )
 
             db.mktemp_dir_entry(entry_type)
 
             db.copy_to(
                 entries,
                 "tmp_directory_entry_%s" % entry_type,
                 ["target", "name", "perms", "dir_id"],
                 cur,
             )
 
         # Do the final copy
         db.directory_add_from_temp(cur)
         summary["directory:add"] = len(dirs_missing)
 
         return summary
 
     @timed
     @db_transaction_generator()
     def directory_missing(self, directories, db=None, cur=None):
         for obj in db.directory_missing_from_list(directories, cur):
             yield obj[0]
 
     @timed
     @db_transaction_generator(statement_timeout=20000)
     def directory_ls(self, directory, recursive=False, db=None, cur=None):
         if recursive:
             res_gen = db.directory_walk(directory, cur=cur)
         else:
             res_gen = db.directory_walk_one(directory, cur=cur)
 
         for line in res_gen:
             yield dict(zip(db.directory_ls_cols, line))
 
     @timed
     @db_transaction(statement_timeout=2000)
     def directory_entry_get_by_path(self, directory, paths, db=None, cur=None):
         res = db.directory_entry_get_by_path(directory, paths, cur)
         if res:
             return dict(zip(db.directory_ls_cols, res))
 
     @timed
     @db_transaction()
     def directory_get_random(self, db=None, cur=None):
         return db.directory_get_random(cur)
 
     @timed
     @process_metrics
     @db_transaction()
     def revision_add(self, revisions: Iterable[Revision], db=None, cur=None) -> Dict:
         revisions = list(revisions)
         summary = {"revision:add": 0}
 
         revisions_missing = set(
             self.revision_missing(
                 set(revision.id for revision in revisions), db=db, cur=cur
             )
         )
 
         if not revisions_missing:
             return summary
 
         db.mktemp_revision(cur)
 
         revisions_filtered = [
             revision for revision in revisions if revision.id in revisions_missing
         ]
 
         self.journal_writer.revision_add(revisions_filtered)
 
         revisions_filtered = list(map(converters.revision_to_db, revisions_filtered))
 
         parents_filtered: List[bytes] = []
 
         with convert_validation_exceptions():
             db.copy_to(
                 revisions_filtered,
                 "tmp_revision",
                 db.revision_add_cols,
                 cur,
                 lambda rev: parents_filtered.extend(rev["parents"]),
             )
 
             db.revision_add_from_temp(cur)
 
             db.copy_to(
                 parents_filtered,
                 "revision_history",
                 ["id", "parent_id", "parent_rank"],
                 cur,
             )
 
         return {"revision:add": len(revisions_missing)}
 
     @timed
     @db_transaction_generator()
     def revision_missing(self, revisions, db=None, cur=None):
         if not revisions:
             return
 
         for obj in db.revision_missing_from_list(revisions, cur):
             yield obj[0]
 
     @timed
     @db_transaction_generator(statement_timeout=1000)
     def revision_get(self, revisions, db=None, cur=None):
         for line in db.revision_get_from_list(revisions, cur):
             data = converters.db_to_revision(dict(zip(db.revision_get_cols, line)))
             if not data["type"]:
                 yield None
                 continue
             yield data
 
     @timed
     @db_transaction_generator(statement_timeout=2000)
     def revision_log(self, revisions, limit=None, db=None, cur=None):
         for line in db.revision_log(revisions, limit, cur):
             data = converters.db_to_revision(dict(zip(db.revision_get_cols, line)))
             if not data["type"]:
                 yield None
                 continue
             yield data
 
     @timed
     @db_transaction_generator(statement_timeout=2000)
     def revision_shortlog(self, revisions, limit=None, db=None, cur=None):
 
         yield from db.revision_shortlog(revisions, limit, cur)
 
     @timed
     @db_transaction()
     def revision_get_random(self, db=None, cur=None):
         return db.revision_get_random(cur)
 
     @timed
     @process_metrics
     @db_transaction()
     def release_add(self, releases: Iterable[Release], db=None, cur=None) -> Dict:
         releases = list(releases)
         summary = {"release:add": 0}
 
         release_ids = set(release.id for release in releases)
         releases_missing = set(self.release_missing(release_ids, db=db, cur=cur))
 
         if not releases_missing:
             return summary
 
         db.mktemp_release(cur)
 
         releases_filtered = [
             release for release in releases if release.id in releases_missing
         ]
 
         self.journal_writer.release_add(releases_filtered)
 
         releases_filtered = list(map(converters.release_to_db, releases_filtered))
 
         with convert_validation_exceptions():
             db.copy_to(releases_filtered, "tmp_release", db.release_add_cols, cur)
 
             db.release_add_from_temp(cur)
 
         return {"release:add": len(releases_missing)}
 
     @timed
     @db_transaction_generator()
     def release_missing(self, releases, db=None, cur=None):
         if not releases:
             return
 
         for obj in db.release_missing_from_list(releases, cur):
             yield obj[0]
 
     @timed
     @db_transaction_generator(statement_timeout=500)
     def release_get(self, releases, db=None, cur=None):
         for release in db.release_get_from_list(releases, cur):
             data = converters.db_to_release(dict(zip(db.release_get_cols, release)))
             yield data if data["target_type"] else None
 
     @timed
     @db_transaction()
     def release_get_random(self, db=None, cur=None):
         return db.release_get_random(cur)
 
     @timed
     @process_metrics
     @db_transaction()
     def snapshot_add(self, snapshots: Iterable[Snapshot], db=None, cur=None) -> Dict:
         created_temp_table = False
 
         count = 0
         for snapshot in snapshots:
             if not db.snapshot_exists(snapshot.id, cur):
                 if not created_temp_table:
                     db.mktemp_snapshot_branch(cur)
                     created_temp_table = True
 
                 with convert_validation_exceptions():
                     db.copy_to(
                         (
                             {
                                 "name": name,
                                 "target": info.target if info else None,
                                 "target_type": (
                                     info.target_type.value if info else None
                                 ),
                             }
                             for name, info in snapshot.branches.items()
                         ),
                         "tmp_snapshot_branch",
                         ["name", "target", "target_type"],
                         cur,
                     )
 
                 self.journal_writer.snapshot_add([snapshot])
 
                 db.snapshot_add(snapshot.id, cur)
                 count += 1
 
         return {"snapshot:add": count}
 
     @timed
     @db_transaction_generator()
     def snapshot_missing(self, snapshots, db=None, cur=None):
         for obj in db.snapshot_missing_from_list(snapshots, cur):
             yield obj[0]
 
     @timed
     @db_transaction(statement_timeout=2000)
     def snapshot_get(self, snapshot_id, db=None, cur=None):
 
         return self.snapshot_get_branches(snapshot_id, db=db, cur=cur)
 
     @timed
     @db_transaction(statement_timeout=2000)
     def snapshot_get_by_origin_visit(self, origin, visit, db=None, cur=None):
         snapshot_id = db.snapshot_get_by_origin_visit(origin, visit, cur)
 
         if snapshot_id:
             return self.snapshot_get(snapshot_id, db=db, cur=cur)
 
         return None
 
     @timed
     @db_transaction(statement_timeout=2000)
     def snapshot_count_branches(self, snapshot_id, db=None, cur=None):
         return dict([bc for bc in db.snapshot_count_branches(snapshot_id, cur)])
 
     @timed
     @db_transaction(statement_timeout=2000)
     def snapshot_get_branches(
         self,
         snapshot_id,
         branches_from=b"",
         branches_count=1000,
         target_types=None,
         db=None,
         cur=None,
     ):
         if snapshot_id == EMPTY_SNAPSHOT_ID:
             return {
                 "id": snapshot_id,
                 "branches": {},
                 "next_branch": None,
             }
 
         branches = {}
         next_branch = None
 
         fetched_branches = list(
             db.snapshot_get_by_id(
                 snapshot_id,
                 branches_from=branches_from,
                 branches_count=branches_count + 1,
                 target_types=target_types,
                 cur=cur,
             )
         )
         for branch in fetched_branches[:branches_count]:
             branch = dict(zip(db.snapshot_get_cols, branch))
             del branch["snapshot_id"]
             name = branch.pop("name")
             if branch == {"target": None, "target_type": None}:
                 branch = None
             branches[name] = branch
 
         if len(fetched_branches) > branches_count:
             branch = dict(zip(db.snapshot_get_cols, fetched_branches[-1]))
             next_branch = branch["name"]
 
         if branches:
             return {
                 "id": snapshot_id,
                 "branches": branches,
                 "next_branch": next_branch,
             }
 
         return None
 
     @timed
     @db_transaction()
     def snapshot_get_random(self, db=None, cur=None):
         return db.snapshot_get_random(cur)
 
     @timed
     @db_transaction()
     def origin_visit_add(
         self, visits: Iterable[OriginVisit], db=None, cur=None
     ) -> Iterable[OriginVisit]:
         for visit in visits:
             origin = self.origin_get({"url": visit.origin}, db=db, cur=cur)
             if not origin:  # Cannot add a visit without an origin
                 raise StorageArgumentException("Unknown origin %s", visit.origin)
 
         all_visits = []
         nb_visits = 0
         for visit in visits:
             nb_visits += 1
             if not visit.visit:
                 with convert_validation_exceptions():
                     visit_id = db.origin_visit_add(
                         visit.origin, visit.date, visit.type, cur=cur
                     )
                 visit = attr.evolve(visit, visit=visit_id)
             else:
                 db.origin_visit_upsert(visit)
             assert visit.visit is not None
             all_visits.append(visit)
             # Forced to write after for the case when the visit has no id
             self.journal_writer.origin_visit_add([visit])
             visit_status = OriginVisitStatus(
                 origin=visit.origin,
                 visit=visit.visit,
                 date=visit.date,
                 status="created",
                 snapshot=None,
             )
             self._origin_visit_status_add(visit_status, db=db, cur=cur)
 
         send_metric("origin_visit:add", count=nb_visits, method_name="origin_visit")
         return all_visits
 
     def _origin_visit_status_add(
         self, visit_status: OriginVisitStatus, db, cur
     ) -> None:
         """Add an origin visit status"""
         self.journal_writer.origin_visit_status_add([visit_status])
         db.origin_visit_status_add(visit_status, cur=cur)
         send_metric(
             "origin_visit_status:add", count=1, method_name="origin_visit_status"
         )
 
     @timed
     @db_transaction()
     def origin_visit_status_add(
         self, visit_statuses: Iterable[OriginVisitStatus], db=None, cur=None,
     ) -> None:
         # First round to check existence (fail early if any is ko)
         for visit_status in visit_statuses:
             origin_url = self.origin_get({"url": visit_status.origin}, db=db, cur=cur)
             if not origin_url:
                 raise StorageArgumentException(f"Unknown origin {visit_status.origin}")
 
         for visit_status in visit_statuses:
             self._origin_visit_status_add(visit_status, db, cur)
 
     @timed
     @db_transaction()
     def origin_visit_status_get_latest(
         self,
         origin_url: str,
         visit: int,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
         db=None,
         cur=None,
     ) -> Optional[OriginVisitStatus]:
         row = db.origin_visit_status_get_latest(
             origin_url, visit, allowed_statuses, require_snapshot, cur=cur
         )
         if not row:
             return None
         return OriginVisitStatus.from_dict(row)
 
     def _origin_visit_get_updated(
         self, origin: str, visit_id: int, db, cur
     ) -> Optional[Dict[str, Any]]:
         """Retrieve origin visit and latest origin visit status and merge them
         into an origin visit.
 
         """
         row_visit = db.origin_visit_get(origin, visit_id)
         if row_visit is None:
             return None
         visit = dict(zip(db.origin_visit_get_cols, row_visit))
         return self._origin_visit_apply_update(visit, db=db, cur=cur)
 
     def _origin_visit_apply_update(
         self, visit: Dict[str, Any], db, cur=None
     ) -> Dict[str, Any]:
         """Retrieve the latest visit status information for the origin visit.
         Then merge it with the visit and return it.
 
         """
         visit_status = db.origin_visit_status_get_latest(
             visit["origin"], visit["visit"], cur=cur
         )
         return self._origin_visit_merge(visit, visit_status)
 
     def _origin_visit_merge(
         self, visit: Dict[str, Any], visit_status: Dict[str, Any]
     ) -> Dict[str, Any]:
         """Merge origin_visit and origin_visit_status together.
 
         """
         return OriginVisit.from_dict(
             {
                 # default to the values in visit
                 **visit,
                 # override with the last update
                 **visit_status,
                 # visit['origin'] is the URL (via a join), while
                 # visit_status['origin'] is only an id.
                 "origin": visit["origin"],
                 # but keep the date of the creation of the origin visit
                 "date": visit["date"],
             }
         ).to_dict()
 
     @timed
     @db_transaction_generator(statement_timeout=500)
     def origin_visit_get(
         self,
         origin: str,
         last_visit: Optional[int] = None,
         limit: Optional[int] = None,
         order: str = "asc",
         db=None,
         cur=None,
     ) -> Iterable[Dict[str, Any]]:
         assert order in ["asc", "desc"]
         lines = db.origin_visit_get_all(
             origin, last_visit=last_visit, limit=limit, order=order, cur=cur
         )
         for line in lines:
             visit = dict(zip(db.origin_visit_get_cols, line))
             yield self._origin_visit_apply_update(visit, db)
 
     @timed
     @db_transaction(statement_timeout=500)
     def origin_visit_find_by_date(
         self, origin: str, visit_date: datetime.datetime, db=None, cur=None
     ) -> Optional[Dict[str, Any]]:
         visit = db.origin_visit_find_by_date(origin, visit_date, cur=cur)
         if visit:
             return self._origin_visit_apply_update(visit, db)
         return None
 
     @timed
     @db_transaction(statement_timeout=500)
     def origin_visit_get_by(
         self, origin: str, visit: int, db=None, cur=None
     ) -> Optional[Dict[str, Any]]:
         row = db.origin_visit_get(origin, visit, cur)
         if row:
             visit_dict = dict(zip(db.origin_visit_get_cols, row))
             return self._origin_visit_apply_update(visit_dict, db)
         return None
 
     @timed
     @db_transaction(statement_timeout=4000)
     def origin_visit_get_latest(
         self,
         origin: str,
         type: Optional[str] = None,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
         db=None,
         cur=None,
     ) -> Optional[Dict[str, Any]]:
         row = db.origin_visit_get_latest(
             origin,
             type=type,
             allowed_statuses=allowed_statuses,
             require_snapshot=require_snapshot,
             cur=cur,
         )
         if row:
             visit = dict(zip(db.origin_visit_get_cols, row))
             return self._origin_visit_apply_update(visit, db)
         return None
 
     @timed
     @db_transaction()
     def origin_visit_get_random(
         self, type: str, db=None, cur=None
     ) -> Optional[Dict[str, Any]]:
         row = db.origin_visit_get_random(type, cur)
         if row:
             visit = dict(zip(db.origin_visit_get_cols, row))
             return self._origin_visit_apply_update(visit, db)
         return None
 
     @timed
     @db_transaction(statement_timeout=2000)
     def object_find_by_sha1_git(self, ids, db=None, cur=None):
         ret = {id: [] for id in ids}
 
         for retval in db.object_find_by_sha1_git(ids, cur=cur):
             if retval[1]:
                 ret[retval[0]].append(
                     dict(zip(db.object_find_by_sha1_git_cols, retval))
                 )
 
         return ret
 
     @timed
     @db_transaction(statement_timeout=500)
     def origin_get(self, origins, db=None, cur=None):
         if isinstance(origins, dict):
             # Old API
             return_single = True
             origins = [origins]
         elif len(origins) == 0:
             return []
         else:
             return_single = False
 
         origin_urls = [origin["url"] for origin in origins]
         results = db.origin_get_by_url(origin_urls, cur)
 
         results = [dict(zip(db.origin_cols, result)) for result in results]
         if return_single:
             assert len(results) == 1
             if results[0]["url"] is not None:
                 return results[0]
             else:
                 return None
         else:
             return [None if res["url"] is None else res for res in results]
 
     @timed
     @db_transaction_generator(statement_timeout=500)
     def origin_get_by_sha1(self, sha1s, db=None, cur=None):
         for line in db.origin_get_by_sha1(sha1s, cur):
             if line[0] is not None:
                 yield dict(zip(db.origin_cols, line))
             else:
                 yield None
 
     @timed
     @db_transaction_generator()
     def origin_get_range(self, origin_from=1, origin_count=100, db=None, cur=None):
         for origin in db.origin_get_range(origin_from, origin_count, cur):
             yield dict(zip(db.origin_get_range_cols, origin))
 
     @timed
     @db_transaction()
     def origin_list(
         self, page_token: Optional[str] = None, limit: int = 100, *, db=None, cur=None
     ) -> dict:
         page_token = page_token or "0"
         if not isinstance(page_token, str):
             raise StorageArgumentException("page_token must be a string.")
         origin_from = int(page_token)
         result: Dict[str, Any] = {
             "origins": [
                 dict(zip(db.origin_get_range_cols, origin))
                 for origin in db.origin_get_range(origin_from, limit, cur)
             ],
         }
 
         assert len(result["origins"]) <= limit
         if len(result["origins"]) == limit:
             result["next_page_token"] = str(result["origins"][limit - 1]["id"] + 1)
 
         for origin in result["origins"]:
             del origin["id"]
 
         return result
 
     @timed
     @db_transaction_generator()
     def origin_search(
         self,
         url_pattern,
         offset=0,
         limit=50,
         regexp=False,
         with_visit=False,
         db=None,
         cur=None,
     ):
         for origin in db.origin_search(
             url_pattern, offset, limit, regexp, with_visit, cur
         ):
             yield dict(zip(db.origin_cols, origin))
 
     @timed
     @db_transaction()
     def origin_count(
         self, url_pattern, regexp=False, with_visit=False, db=None, cur=None
     ):
         return db.origin_count(url_pattern, regexp, with_visit, cur)
 
     @timed
     @db_transaction()
     def origin_add(
         self, origins: Iterable[Origin], db=None, cur=None
     ) -> Dict[str, int]:
         urls = [o.url for o in origins]
         known_origins = set(url for (url,) in db.origin_get_by_url(urls, cur))
         # use lists here to keep origins sorted; some tests depend on this
         to_add = [url for url in urls if url not in known_origins]
 
         self.journal_writer.origin_add([Origin(url=url) for url in to_add])
         added = 0
         for url in to_add:
             if db.origin_add(url, cur):
                 added += 1
         return {"origin:add": added}
 
     @deprecated("Use origin_add([origin]) instead")
     @timed
     @db_transaction()
     def origin_add_one(self, origin: Origin, db=None, cur=None) -> str:
         stats = self.origin_add([origin])
         if stats.get("origin:add", 0):
             send_metric("origin:add", count=1, method_name="origin_add_one")
         return origin.url
 
     @db_transaction(statement_timeout=500)
     def stat_counters(self, db=None, cur=None):
         return {k: v for (k, v) in db.stat_counters()}
 
     @db_transaction()
     def refresh_stat_counters(self, db=None, cur=None):
         keys = [
             "content",
             "directory",
             "directory_entry_dir",
             "directory_entry_file",
             "directory_entry_rev",
             "origin",
             "origin_visit",
             "person",
             "release",
             "revision",
             "revision_history",
             "skipped_content",
             "snapshot",
         ]
 
         for key in keys:
             cur.execute("select * from swh_update_counter(%s)", (key,))
 
     @timed
     @db_transaction()
     def origin_metadata_add(
         self,
         origin_url: str,
         discovery_date: datetime.datetime,
         authority: Dict[str, Any],
         fetcher: Dict[str, Any],
         format: str,
         metadata: bytes,
         db=None,
         cur=None,
     ) -> None:
-        authority_id = db.metadata_authority_get_id(
-            authority["type"], authority["url"], cur
-        )
-        if not authority_id:
-            raise StorageArgumentException(f"Unknown authority {authority}")
-        fetcher_id = db.metadata_fetcher_get_id(
-            fetcher["name"], fetcher["version"], cur
+        self._object_metadata_add(
+            "origin",
+            origin_url,
+            discovery_date,
+            authority,
+            fetcher,
+            format,
+            metadata,
+            db,
+            cur,
         )
-        if not fetcher_id:
-            raise StorageArgumentException(f"Unknown fetcher {fetcher}")
-        try:
-            db.origin_metadata_add(
-                origin_url,
-                discovery_date,
-                authority_id,
-                fetcher_id,
-                format,
-                metadata,
-                cur,
+
+    def _object_metadata_add(
+        self,
+        object_type: str,
+        id: str,
+        discovery_date: datetime.datetime,
+        authority: Dict[str, Any],
+        fetcher: Dict[str, Any],
+        format: str,
+        metadata: bytes,
+        db,
+        cur,
+    ) -> None:
+        authority_id = self._get_authority_id(authority, db, cur)
+        fetcher_id = self._get_fetcher_id(fetcher, db, cur)
+        if not isinstance(metadata, bytes):
+            raise StorageArgumentException(
+                "metadata must be bytes, not %r" % (metadata,)
             )
-        except psycopg2.ProgrammingError as e:
-            raise StorageArgumentException(*e.args)
-        send_metric("origin_metadata:add", count=1, method_name="origin_metadata_add")
+
+        db.object_metadata_add(
+            object_type,
+            id,
+            discovery_date,
+            authority_id,
+            fetcher_id,
+            format,
+            metadata,
+            cur,
+        )
+
+        send_metric(
+            f"{object_type}_metadata:add",
+            count=1,
+            method_name=f"{object_type}_metadata_add",
+        )
 
     @timed
     @db_transaction(statement_timeout=500)
     def origin_metadata_get(
         self,
         origin_url: str,
         authority: Dict[str, str],
         after: Optional[datetime.datetime] = None,
         page_token: Optional[bytes] = None,
         limit: int = 1000,
         db=None,
         cur=None,
+    ) -> Dict[str, Any]:
+        result = self._object_metadata_get(
+            "origin", origin_url, authority, after, page_token, limit, db, cur
+        )
+
+        for res in result["results"]:
+            res.pop("id")
+            res["origin_url"] = origin_url
+
+        return result
+
+    def _object_metadata_get(
+        self,
+        object_type: str,
+        id: str,
+        authority: Dict[str, str],
+        after: Optional[datetime.datetime],
+        page_token: Optional[bytes],
+        limit: int,
+        db,
+        cur,
     ) -> Dict[str, Any]:
         if page_token:
             (after_time, after_fetcher) = msgpack_loads(page_token)
             if after and after_time < after:
                 raise StorageArgumentException(
                     "page_token is inconsistent with the value of 'after'."
                 )
         else:
             after_time = after
             after_fetcher = None
 
         authority_id = db.metadata_authority_get_id(
             authority["type"], authority["url"], cur
         )
         if not authority_id:
             return {
                 "next_page_token": None,
                 "results": [],
             }
 
-        rows = db.origin_metadata_get(
-            origin_url, authority_id, after_time, after_fetcher, limit + 1, cur
+        rows = db.object_metadata_get(
+            object_type, id, authority_id, after_time, after_fetcher, limit + 1, cur
         )
-        rows = [dict(zip(db.origin_metadata_get_cols, row)) for row in rows]
+        rows = [dict(zip(db.object_metadata_get_cols, row)) for row in rows]
         results = []
         for row in rows:
             row = row.copy()
             row.pop("metadata_fetcher.id")
-            results.append(
-                {
-                    "origin_url": row.pop("origin.url"),
-                    "authority": {
-                        "type": row.pop("metadata_authority.type"),
-                        "url": row.pop("metadata_authority.url"),
-                    },
-                    "fetcher": {
-                        "name": row.pop("metadata_fetcher.name"),
-                        "version": row.pop("metadata_fetcher.version"),
-                    },
-                    **row,
-                }
-            )
+            result = {
+                "authority": {
+                    "type": row.pop("metadata_authority.type"),
+                    "url": row.pop("metadata_authority.url"),
+                },
+                "fetcher": {
+                    "name": row.pop("metadata_fetcher.name"),
+                    "version": row.pop("metadata_fetcher.version"),
+                },
+                **row,
+            }
+
+            results.append(result)
 
         if len(results) > limit:
             results.pop()
             assert len(results) == limit
             last_returned_row = rows[-2]  # rows[-1] corresponds to the popped result
             next_page_token: Optional[bytes] = msgpack_dumps(
                 (
                     last_returned_row["discovery_date"],
                     last_returned_row["metadata_fetcher.id"],
                 )
             )
         else:
             next_page_token = None
 
         return {
             "next_page_token": next_page_token,
             "results": results,
         }
 
     @timed
     @db_transaction()
     def metadata_fetcher_add(
         self, name: str, version: str, metadata: Dict[str, Any], db=None, cur=None
     ) -> None:
         db.metadata_fetcher_add(name, version, metadata)
         send_metric("metadata_fetcher:add", count=1, method_name="metadata_fetcher")
 
     @timed
     @db_transaction(statement_timeout=500)
     def metadata_fetcher_get(
         self, name: str, version: str, db=None, cur=None
     ) -> Optional[Dict[str, Any]]:
         row = db.metadata_fetcher_get(name, version, cur=cur)
         if not row:
             return None
         return dict(zip(db.metadata_fetcher_cols, row))
 
     @timed
     @db_transaction()
     def metadata_authority_add(
         self, type: str, url: str, metadata: Dict[str, Any], db=None, cur=None
     ) -> None:
         db.metadata_authority_add(type, url, metadata, cur)
         send_metric("metadata_authority:add", count=1, method_name="metadata_authority")
 
     @timed
     @db_transaction()
     def metadata_authority_get(
         self, type: str, url: str, db=None, cur=None
     ) -> Optional[Dict[str, Any]]:
         row = db.metadata_authority_get(type, url, cur=cur)
         if not row:
             return None
         return dict(zip(db.metadata_authority_cols, row))
 
     @timed
     def diff_directories(self, from_dir, to_dir, track_renaming=False):
         return diff.diff_directories(self, from_dir, to_dir, track_renaming)
 
     @timed
     def diff_revisions(self, from_rev, to_rev, track_renaming=False):
         return diff.diff_revisions(self, from_rev, to_rev, track_renaming)
 
     @timed
     def diff_revision(self, revision, track_renaming=False):
         return diff.diff_revision(self, revision, track_renaming)
 
     def clear_buffers(self, object_types: Optional[Iterable[str]] = None) -> None:
         """Do nothing
 
         """
         return None
 
     def flush(self, object_types: Optional[Iterable[str]] = None) -> Dict:
         return {}
+
+    def _get_authority_id(self, authority: Dict[str, Any], db, cur):
+        authority_id = db.metadata_authority_get_id(
+            authority["type"], authority["url"], cur
+        )
+        if not authority_id:
+            raise StorageArgumentException(f"Unknown authority {authority}")
+        return authority_id
+
+    def _get_fetcher_id(self, fetcher: Dict[str, Any], db, cur):
+        fetcher_id = db.metadata_fetcher_get_id(
+            fetcher["name"], fetcher["version"], cur
+        )
+        if not fetcher_id:
+            raise StorageArgumentException(f"Unknown fetcher {fetcher}")
+        return fetcher_id