diff --git a/swh/objstorage/api/client.py b/swh/objstorage/api/client.py
index f8c4cc5..0fd3ad5 100644
--- a/swh/objstorage/api/client.py
+++ b/swh/objstorage/api/client.py
@@ -1,55 +1,55 @@
 # Copyright (C) 2015-2022  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
-from typing import Iterator, Optional
+from typing import Any, Dict, Iterator, Optional
 
 import msgpack
 
 from swh.core.api import RPCClient
-from swh.model import hashutil
 from swh.objstorage.constants import DEFAULT_LIMIT
 from swh.objstorage.exc import Error, ObjNotFoundError, ObjStorageAPIError
 from swh.objstorage.interface import CompositeObjId, ObjId, ObjStorageInterface
+from swh.objstorage.objstorage import objid_to_default_hex
 
 
 class RemoteObjStorage(RPCClient):
     """Proxy to a remote object storage.
 
     This class allows to connect to an object storage server via
     http protocol.
 
     Attributes:
         url (string): The url of the server to connect. Must end
             with a '/'
         session: The session to send requests.
 
     """
 
     api_exception = ObjStorageAPIError
     reraise_exceptions = [ObjNotFoundError, Error]
     backend_class = ObjStorageInterface
 
     def restore(self: ObjStorageInterface, content: bytes, obj_id: ObjId) -> None:
         return self.add(content, obj_id, check_presence=False)
 
     def __iter__(self) -> Iterator[CompositeObjId]:
         yield from self.list_content()
 
     def list_content(
         self,
         last_obj_id: Optional[ObjId] = None,
         limit: int = DEFAULT_LIMIT,
     ) -> Iterator[CompositeObjId]:
-        params = {"limit": limit}
+        params: Dict[str, Any] = {"limit": limit}
         if last_obj_id:
-            params["last_obj_id"] = hashutil.hash_to_hex(last_obj_id)
+            params["last_obj_id"] = objid_to_default_hex(last_obj_id)
         response = self.raw_verb(
             "get",
             "content",
             headers={"accept": "application/x-msgpack"},
             params=params,
             stream=True,
         )
-        yield from msgpack.Unpacker(response.raw, raw=True)
+        yield from msgpack.Unpacker(response.raw, raw=False)
diff --git a/swh/objstorage/backends/azure.py b/swh/objstorage/backends/azure.py
index 23a2b12..5b897f1 100644
--- a/swh/objstorage/backends/azure.py
+++ b/swh/objstorage/backends/azure.py
@@ -1,410 +1,413 @@
 # Copyright (C) 2016-2021  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import asyncio
 import contextlib
 import datetime
 from itertools import product
 import string
 from typing import Dict, Iterator, List, Optional, Union
 import warnings
 
 from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError
 from azure.storage.blob import (
     ContainerClient,
     ContainerSasPermissions,
     generate_container_sas,
 )
 from azure.storage.blob.aio import ContainerClient as AsyncContainerClient
+from typing_extensions import Literal
 
 from swh.model import hashutil
 from swh.objstorage.exc import Error, ObjNotFoundError
 from swh.objstorage.interface import CompositeObjId, ObjId
 from swh.objstorage.objstorage import (
     ObjStorage,
     compressors,
     compute_hash,
     decompressors,
 )
 from swh.objstorage.utils import call_async
 
 
 def get_container_url(
     account_name: str,
     account_key: str,
     container_name: str,
     access_policy: str = "read_only",
     expiry: datetime.timedelta = datetime.timedelta(days=365),
     **kwargs,
 ) -> str:
     """Get the full url, for the given container on the given account, with a
     Shared Access Signature granting the specified access policy.
 
     Args:
       account_name: name of the storage account for which to generate the URL
       account_key: shared account key of the storage account used to generate the SAS
       container_name: name of the container for which to grant access in the storage
         account
       access_policy: one of ``read_only``, ``append_only``, ``full``
       expiry: the interval in the future with which the signature will expire
 
     Returns:
       the full URL of the container, with the shared access signature.
     """
 
     access_policies = {
         "read_only": ContainerSasPermissions(
             read=True, list=True, delete=False, write=False
         ),
         "append_only": ContainerSasPermissions(
             read=True, list=True, delete=False, write=True
         ),
         "full": ContainerSasPermissions(read=True, list=True, delete=True, write=True),
     }
 
     current_time = datetime.datetime.utcnow()
 
     signature = generate_container_sas(
         account_name,
         container_name,
         account_key=account_key,
         permission=access_policies[access_policy],
         start=current_time + datetime.timedelta(minutes=-1),
         expiry=current_time + expiry,
     )
 
     return f"https://{account_name}.blob.core.windows.net/{container_name}?{signature}"
 
 
 class AzureCloudObjStorage(ObjStorage):
     """ObjStorage backend for Azure blob storage accounts.
 
     Args:
       container_url: the URL of the container in which the objects are stored.
       account_name: (deprecated) the name of the storage account under which objects are
         stored
       api_secret_key: (deprecated) the shared account key
       container_name: (deprecated) the name of the container under which objects are
         stored
       compression: the compression algorithm used to compress objects in storage
 
     Notes:
       The container url should contain the credentials via a "Shared Access
       Signature". The :func:`get_container_url` helper can be used to generate
       such a URL from the account's access keys. The ``account_name``,
       ``api_secret_key`` and ``container_name`` arguments are deprecated.
     """
 
+    PRIMARY_HASH: Literal["sha1"] = "sha1"
+
     def __init__(
         self,
         container_url: Optional[str] = None,
         account_name: Optional[str] = None,
         api_secret_key: Optional[str] = None,
         container_name: Optional[str] = None,
         compression="gzip",
         **kwargs,
     ):
         if container_url is None:
             if account_name is None or api_secret_key is None or container_name is None:
                 raise ValueError(
                     "AzureCloudObjStorage must have a container_url or all three "
                     "account_name, api_secret_key and container_name"
                 )
             else:
                 warnings.warn(
                     "The Azure objstorage account secret key parameters are "
                     "deprecated, please use container URLs instead.",
                     DeprecationWarning,
                 )
                 container_url = get_container_url(
                     account_name=account_name,
                     account_key=api_secret_key,
                     container_name=container_name,
                     access_policy="full",
                 )
 
         super().__init__(**kwargs)
         self.container_url = container_url
         self.compression = compression
 
     def get_container_client(self, hex_obj_id):
         """Get the container client for the container that contains the object with
         internal id hex_obj_id
 
         This is used to allow the PrefixedAzureCloudObjStorage to dispatch the
         client according to the prefix of the object id.
 
         """
         return ContainerClient.from_container_url(self.container_url)
 
     @contextlib.asynccontextmanager
     async def get_async_container_clients(self):
         """Returns a collection of container clients, to be passed to
         ``get_async_blob_client``.
 
         Each container may not be used in more than one asyncio loop."""
         client = AsyncContainerClient.from_container_url(self.container_url)
         async with client:
             yield {"": client}
 
     def get_blob_client(self, hex_obj_id):
         """Get the azure blob client for the given hex obj id"""
         container_client = self.get_container_client(hex_obj_id)
 
         return container_client.get_blob_client(blob=hex_obj_id)
 
     def get_async_blob_client(self, hex_obj_id, container_clients):
         """Get the azure blob client for the given hex obj id and a collection
         yielded by ``get_async_container_clients``."""
 
         return container_clients[""].get_blob_client(blob=hex_obj_id)
 
     def get_all_container_clients(self):
         """Get all active block_blob_services"""
         yield self.get_container_client("")
 
     def _internal_id(self, obj_id):
         """Internal id is the hex version in objstorage."""
         return hashutil.hash_to_hex(obj_id)
 
     def check_config(self, *, check_write):
         """Check the configuration for this object storage"""
         for container_client in self.get_all_container_clients():
             props = container_client.get_container_properties()
 
             # FIXME: check_write is ignored here
             if not props:
                 return False
 
         return True
 
     def __contains__(self, obj_id: ObjId) -> bool:
         """Does the storage contains the obj_id."""
         hex_obj_id = self._internal_id(obj_id)
         client = self.get_blob_client(hex_obj_id)
         try:
             client.get_blob_properties()
         except ResourceNotFoundError:
             return False
         else:
             return True
 
     def __iter__(self) -> Iterator[CompositeObjId]:
         """Iterate over the objects present in the storage."""
         for client in self.get_all_container_clients():
             for obj in client.list_blobs():
-                yield hashutil.hash_to_bytes(obj.name)
+                yield {self.PRIMARY_HASH: hashutil.hash_to_bytes(obj.name)}
 
     def __len__(self):
         """Compute the number of objects in the current object storage.
 
         Returns:
             number of objects contained in the storage.
 
         """
         return sum(1 for i in self)
 
     def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None:
         """Add an obj in storage if it's not there already."""
         if check_presence and obj_id in self:
             return
 
         hex_obj_id = self._internal_id(obj_id)
 
         # Send the compressed content
         compressor = compressors[self.compression]()
         data = compressor.compress(content)
         data += compressor.flush()
 
         client = self.get_blob_client(hex_obj_id)
         try:
             client.upload_blob(data=data, length=len(data))
         except ResourceExistsError:
             # There's a race condition between check_presence and upload_blob,
             # that we can't get rid of as the azure api doesn't allow atomic
             # replaces or renaming a blob. As the restore operation explicitly
             # removes the blob, it should be safe to just ignore the error.
             pass
 
     def restore(self, content: bytes, obj_id: ObjId) -> None:
         """Restore a content."""
         if obj_id in self:
             self.delete(obj_id)
 
         return self.add(content, obj_id, check_presence=False)
 
     def get(self, obj_id: ObjId) -> bytes:
         """retrieve blob's content if found."""
         return call_async(self._get_async, obj_id)
 
     async def _get_async(self, obj_id, container_clients=None):
         """Coroutine implementing ``get(obj_id)`` using azure-storage-blob's
         asynchronous implementation.
         While ``get(obj_id)`` does not need asynchronicity, this is useful to
         ``get_batch(obj_ids)``, as it can run multiple ``_get_async`` tasks
         concurrently."""
         if container_clients is None:
             # If the container_clients argument is not passed, create a new
             # collection of container_clients and restart the function with it.
             async with self.get_async_container_clients() as container_clients:
                 return await self._get_async(obj_id, container_clients)
 
         hex_obj_id = self._internal_id(obj_id)
         client = self.get_async_blob_client(hex_obj_id, container_clients)
 
         try:
             download = await client.download_blob()
         except ResourceNotFoundError:
             raise ObjNotFoundError(obj_id) from None
         else:
             data = await download.content_as_bytes()
 
         decompressor = decompressors[self.compression]()
         ret = decompressor.decompress(data)
         if decompressor.unused_data:
             raise Error("Corrupt object %s: trailing data found" % hex_obj_id)
         return ret
 
     async def _get_async_or_none(self, obj_id, container_clients):
         """Like ``get_async(obj_id)``, but returns None instead of raising
         ResourceNotFoundError. Used by ``get_batch`` so other blobs can be returned
         even if one is missing."""
         try:
             return await self._get_async(obj_id, container_clients)
         except ObjNotFoundError:
             return None
 
     async def _get_batch_async(self, obj_ids):
         async with self.get_async_container_clients() as container_clients:
             return await asyncio.gather(
                 *[
                     self._get_async_or_none(obj_id, container_clients)
                     for obj_id in obj_ids
                 ]
             )
 
     def get_batch(self, obj_ids: List[ObjId]) -> Iterator[Optional[bytes]]:
         """Retrieve objects' raw content in bulk from storage, concurrently."""
         return call_async(self._get_batch_async, obj_ids)
 
     def check(self, obj_id: ObjId) -> None:
         """Check the content integrity."""
         obj_content = self.get(obj_id)
         content_obj_id = compute_hash(obj_content)
         if content_obj_id != obj_id:
             raise Error(obj_id)
 
     def delete(self, obj_id: ObjId):
         """Delete an object."""
         super().delete(obj_id)  # Check delete permission
         hex_obj_id = self._internal_id(obj_id)
         client = self.get_blob_client(hex_obj_id)
         try:
             client.delete_blob()
         except ResourceNotFoundError:
             raise ObjNotFoundError(obj_id) from None
 
         return True
 
 
 class PrefixedAzureCloudObjStorage(AzureCloudObjStorage):
     """ObjStorage with azure capabilities, striped by prefix.
 
     accounts is a dict containing entries of the form:
         <prefix>: <container_url_for_prefix>
     """
 
     def __init__(
         self,
         accounts: Dict[str, Union[str, Dict[str, str]]],
         compression="gzip",
         **kwargs,
     ):
         # shortcut AzureCloudObjStorage __init__
         ObjStorage.__init__(self, **kwargs)
 
         self.compression = compression
 
         # Definition sanity check
         prefix_lengths = set(len(prefix) for prefix in accounts)
         if not len(prefix_lengths) == 1:
             raise ValueError(
                 "Inconsistent prefixes, found lengths %s"
                 % ", ".join(str(lst) for lst in sorted(prefix_lengths))
             )
 
         self.prefix_len = prefix_lengths.pop()
 
         expected_prefixes = set(
             "".join(letters)
             for letters in product(
                 set(string.hexdigits.lower()), repeat=self.prefix_len
             )
         )
         missing_prefixes = expected_prefixes - set(accounts)
         if missing_prefixes:
             raise ValueError(
                 "Missing prefixes %s" % ", ".join(sorted(missing_prefixes))
             )
 
         do_warning = False
 
         self.container_urls = {}
         for prefix, container_url in accounts.items():
             if isinstance(container_url, dict):
                 do_warning = True
                 container_url = get_container_url(
                     account_name=container_url["account_name"],
                     account_key=container_url["api_secret_key"],
                     container_name=container_url["container_name"],
                     access_policy="full",
                 )
             self.container_urls[prefix] = container_url
 
         if do_warning:
             warnings.warn(
                 "The Azure objstorage account secret key parameters are "
                 "deprecated, please use container URLs instead.",
                 DeprecationWarning,
             )
 
     def get_container_client(self, hex_obj_id):
         """Get the block_blob_service and container that contains the object with
         internal id hex_obj_id
         """
         prefix = hex_obj_id[: self.prefix_len]
         return ContainerClient.from_container_url(self.container_urls[prefix])
 
     @contextlib.asynccontextmanager
     async def get_async_container_clients(self):
         # This is equivalent to:
         # client1 = AsyncContainerClient.from_container_url(url1)
         # ...
         # client16 = AsyncContainerClient.from_container_url(url16)
         # async with client1, ..., client16:
         #     yield {prefix1: client1, ..., prefix16: client16}
         clients = {
             prefix: AsyncContainerClient.from_container_url(url)
             for (prefix, url) in self.container_urls.items()
         }
         async with contextlib.AsyncExitStack() as stack:
             for client in clients.values():
                 await stack.enter_async_context(client)
             yield clients
 
     def get_async_blob_client(self, hex_obj_id, container_clients):
         """Get the azure blob client for the given hex obj id and a collection
         yielded by ``get_async_container_clients``."""
 
         prefix = hex_obj_id[: self.prefix_len]
         return container_clients[prefix].get_blob_client(blob=hex_obj_id)
 
     def get_all_container_clients(self):
         """Get all active container clients"""
         # iterate on items() to sort blob services;
         # needed to be able to paginate in the list_content() method
         yield from (
             self.get_container_client(prefix) for prefix in sorted(self.container_urls)
         )
diff --git a/swh/objstorage/backends/http.py b/swh/objstorage/backends/http.py
index 1900e11..8971c11 100644
--- a/swh/objstorage/backends/http.py
+++ b/swh/objstorage/backends/http.py
@@ -1,98 +1,99 @@
 # Copyright (C) 2021  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import logging
 from typing import Iterator, Optional
 from urllib.parse import urljoin
 
 import requests
 
 from swh.model import hashutil
 from swh.objstorage import exc
 from swh.objstorage.interface import CompositeObjId, ObjId
 from swh.objstorage.objstorage import (
     DEFAULT_LIMIT,
     ObjStorage,
     compute_hash,
     decompressors,
+    objid_to_default_hex,
 )
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.ERROR)
 
 
 class HTTPReadOnlyObjStorage(ObjStorage):
     """Simple ObjStorage retrieving objects from an HTTP server.
 
     For example, can be used to retrieve objects from S3:
 
     objstorage:
       cls: http
       url: https://softwareheritage.s3.amazonaws.com/content/
     """
 
     def __init__(self, url=None, compression=None, **kwargs):
         super().__init__(**kwargs)
         self.session = requests.sessions.Session()
         self.root_path = url
         if not self.root_path.endswith("/"):
             self.root_path += "/"
         self.compression = compression
 
     def check_config(self, *, check_write):
         """Check the configuration for this object storage"""
         return True
 
     def __contains__(self, obj_id: ObjId) -> bool:
         resp = self.session.head(self._path(obj_id))
         return resp.status_code == 200
 
     def __iter__(self) -> Iterator[CompositeObjId]:
         raise exc.NonIterableObjStorage("__iter__")
 
     def __len__(self):
         raise exc.NonIterableObjStorage("__len__")
 
     def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None:
         raise exc.ReadOnlyObjStorage("add")
 
     def delete(self, obj_id: ObjId):
         raise exc.ReadOnlyObjStorage("delete")
 
     def restore(self, content: bytes, obj_id: ObjId) -> None:
         raise exc.ReadOnlyObjStorage("restore")
 
     def list_content(
         self,
         last_obj_id: Optional[ObjId] = None,
         limit: int = DEFAULT_LIMIT,
     ) -> Iterator[CompositeObjId]:
         raise exc.NonIterableObjStorage("__len__")
 
     def get(self, obj_id: ObjId) -> bytes:
         try:
             resp = self.session.get(self._path(obj_id))
             resp.raise_for_status()
         except Exception:
             raise exc.ObjNotFoundError(obj_id)
 
         ret: bytes = resp.content
         if self.compression:
             d = decompressors[self.compression]()
             ret = d.decompress(ret)
             if d.unused_data:
-                hex_obj_id = hashutil.hash_to_hex(obj_id)
+                hex_obj_id = objid_to_default_hex(obj_id)
                 raise exc.Error("Corrupt object %s: trailing data found" % hex_obj_id)
         return ret
 
     def check(self, obj_id: ObjId) -> None:
         # Check the content integrity
         obj_content = self.get(obj_id)
         content_obj_id = compute_hash(obj_content)
         if content_obj_id != obj_id:
             raise exc.Error(obj_id)
 
     def _path(self, obj_id):
         return urljoin(self.root_path, hashutil.hash_to_hex(obj_id))
diff --git a/swh/objstorage/backends/in_memory.py b/swh/objstorage/backends/in_memory.py
index 1d6552a..6764bbd 100644
--- a/swh/objstorage/backends/in_memory.py
+++ b/swh/objstorage/backends/in_memory.py
@@ -1,57 +1,62 @@
 # Copyright (C) 2017  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 from typing import Iterator
 
+from typing_extensions import Literal
+
 from swh.objstorage.exc import Error, ObjNotFoundError
 from swh.objstorage.interface import CompositeObjId, ObjId
 from swh.objstorage.objstorage import ObjStorage, compute_hash, objid_to_default_hex
 
 
 class InMemoryObjStorage(ObjStorage):
     """In-Memory objstorage.
 
     Intended for test purposes.
 
     """
 
+    PRIMARY_HASH: Literal["sha1"] = "sha1"
+
     def __init__(self, **args):
         super().__init__()
         self.state = {}
 
     def check_config(self, *, check_write):
         return True
 
     def __contains__(self, obj_id: ObjId) -> bool:
         return obj_id in self.state
 
     def __iter__(self) -> Iterator[CompositeObjId]:
-        return iter(sorted(self.state))
+        for id_ in sorted(self.state):
+            yield {self.PRIMARY_HASH: id_}
 
     def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None:
         if check_presence and obj_id in self:
             return
 
         self.state[obj_id] = content
 
     def get(self, obj_id: ObjId) -> bytes:
         if obj_id not in self:
             raise ObjNotFoundError(obj_id)
 
         return self.state[obj_id]
 
     def check(self, obj_id: ObjId) -> None:
         if obj_id not in self:
             raise ObjNotFoundError(obj_id)
         if compute_hash(self.state[obj_id]) != obj_id:
             raise Error("Corrupt object %s" % objid_to_default_hex(obj_id))
 
     def delete(self, obj_id: ObjId):
         super().delete(obj_id)  # Check delete permission
         if obj_id not in self:
             raise ObjNotFoundError(obj_id)
 
         self.state.pop(obj_id)
         return True
diff --git a/swh/objstorage/backends/libcloud.py b/swh/objstorage/backends/libcloud.py
index 3441778..c1626b9 100644
--- a/swh/objstorage/backends/libcloud.py
+++ b/swh/objstorage/backends/libcloud.py
@@ -1,250 +1,254 @@
-# Copyright (C) 2016-2017  The Software Heritage developers
+# Copyright (C) 2016-2022  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import abc
 from collections import OrderedDict
 from typing import Iterator, Optional
 from urllib.parse import urlencode
 
 from libcloud.storage import providers
 import libcloud.storage.drivers.s3
 from libcloud.storage.types import ObjectDoesNotExistError, Provider
+from typing_extensions import Literal
 
 from swh.model import hashutil
 from swh.objstorage.exc import Error, ObjNotFoundError
 from swh.objstorage.interface import CompositeObjId, ObjId
 from swh.objstorage.objstorage import (
     ObjStorage,
     compressors,
     compute_hash,
     decompressors,
+    objid_to_default_hex,
 )
 
 
 def patch_libcloud_s3_urlencode():
     """Patches libcloud's S3 backend to properly sign queries.
 
     Recent versions of libcloud are not affected (they use signature V4),
     but 1.5.0 (the one in Debian 9) is."""
 
     def s3_urlencode(params):
         """Like urllib.parse.urlencode, but sorts the parameters first.
         This is required to properly compute the request signature, see
         https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#ConstructingTheCanonicalizedResourceElement
         """  # noqa
         return urlencode(OrderedDict(sorted(params.items())))
 
     libcloud.storage.drivers.s3.urlencode = s3_urlencode
 
 
 patch_libcloud_s3_urlencode()
 
 
 class CloudObjStorage(ObjStorage, metaclass=abc.ABCMeta):
     """Abstract ObjStorage that connect to a cloud using Libcloud
 
     Implementations of this class must redefine the _get_provider
     method to make it return a driver provider (i.e. object that
     supports `get_driver` method) which return a LibCloud driver (see
     https://libcloud.readthedocs.io/en/latest/storage/api.html).
 
     Args:
       container_name: Name of the base container
       path_prefix: prefix to prepend to object paths in the container,
                    separated with a slash
       compression: compression algorithm to use for objects
       kwargs: extra arguments are passed through to the LibCloud driver
     """
 
+    PRIMARY_HASH: Literal["sha1"] = "sha1"
+
     def __init__(
         self,
         container_name: str,
         compression: str = "gzip",
         path_prefix: Optional[str] = None,
         **kwargs,
     ):
         super().__init__(**kwargs)
         self.driver = self._get_driver(**kwargs)
         self.container_name = container_name
         self.container = self.driver.get_container(container_name=container_name)
         self.compression = compression
         self.path_prefix = None
         if path_prefix:
             self.path_prefix = path_prefix.rstrip("/") + "/"
 
     def _get_driver(self, **kwargs):
         """Initialize a driver to communicate with the cloud
 
         Kwargs: arguments passed to the StorageDriver class, typically
           key: key to connect to the API.
           secret: secret key for authentication.
           secure: (bool) support HTTPS
           host: (str)
           port: (int)
           api_version: (str)
           region: (str)
 
         Returns:
             a Libcloud driver to a cloud storage.
 
         """
         # Get the driver class from its description.
         cls = providers.get_driver(self._get_provider())
         # Initialize the driver.
         return cls(**kwargs)
 
     @abc.abstractmethod
     def _get_provider(self):
         """Get a libcloud driver provider
 
         This method must be overridden by subclasses to specify which
         of the native libcloud driver the current storage should
         connect to.  Alternatively, provider for a custom driver may
         be returned, in which case the provider will have to support
         `get_driver` method.
 
         """
         raise NotImplementedError(
             "%s must implement `get_provider` method" % type(self)
         )
 
     def check_config(self, *, check_write):
         """Check the configuration for this object storage"""
         # FIXME: hopefully this blew up during instantiation
         return True
 
     def __contains__(self, obj_id: ObjId) -> bool:
         try:
             self._get_object(obj_id)
         except ObjNotFoundError:
             return False
         else:
             return True
 
     def __iter__(self) -> Iterator[CompositeObjId]:
         """Iterate over the objects present in the storage
 
         Warning: Iteration over the contents of a cloud-based object storage
         may have bad efficiency: due to the very high amount of objects in it
         and the fact that it is remote, get all the contents of the current
         object storage may result in a lot of network requests.
 
         You almost certainly don't want to use this method in production.
         """
         for obj in self.driver.iterate_container_objects(self.container):
             name = obj.name
 
             if self.path_prefix and not name.startswith(self.path_prefix):
                 continue
 
             if self.path_prefix:
                 name = name[len(self.path_prefix) :]
 
-            yield hashutil.hash_to_bytes(name)
+            yield {self.PRIMARY_HASH: hashutil.hash_to_bytes(name)}
 
     def __len__(self):
         """Compute the number of objects in the current object storage.
 
         Warning: this currently uses `__iter__`, its warning about bad
         performance applies.
 
         Returns:
             number of objects contained in the storage.
 
         """
         return sum(1 for i in self)
 
     def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None:
         if check_presence and obj_id in self:
             return
 
         self._put_object(content, obj_id)
 
     def restore(self, content: bytes, obj_id: ObjId) -> None:
         return self.add(content, obj_id, check_presence=False)
 
     def get(self, obj_id: ObjId) -> bytes:
         obj = b"".join(self._get_object(obj_id).as_stream())
         d = decompressors[self.compression]()
         ret = d.decompress(obj)
         if d.unused_data:
-            hex_obj_id = hashutil.hash_to_hex(obj_id)
+            hex_obj_id = objid_to_default_hex(obj_id)
             raise Error("Corrupt object %s: trailing data found" % hex_obj_id)
         return ret
 
     def check(self, obj_id: ObjId) -> None:
         # Check that the file exists, as _get_object raises ObjNotFoundError
         self._get_object(obj_id)
         # Check the content integrity
         obj_content = self.get(obj_id)
         content_obj_id = compute_hash(obj_content)
         if content_obj_id != obj_id:
             raise Error(obj_id)
 
     def delete(self, obj_id: ObjId):
         super().delete(obj_id)  # Check delete permission
         obj = self._get_object(obj_id)
         return self.driver.delete_object(obj)
 
     def _object_path(self, obj_id):
         """Get the full path to an object"""
         hex_obj_id = hashutil.hash_to_hex(obj_id)
         if self.path_prefix:
             return self.path_prefix + hex_obj_id
         else:
             return hex_obj_id
 
     def _get_object(self, obj_id):
         """Get a Libcloud wrapper for an object pointer.
 
         This wrapper does not retrieve the content of the object
         directly.
 
         """
         object_path = self._object_path(obj_id)
 
         try:
             return self.driver.get_object(self.container_name, object_path)
         except ObjectDoesNotExistError:
             raise ObjNotFoundError(obj_id)
 
     def _compressor(self, data):
         comp = compressors[self.compression]()
         for chunk in data:
             cchunk = comp.compress(chunk)
             if cchunk:
                 yield cchunk
         trail = comp.flush()
         if trail:
             yield trail
 
     def _put_object(self, content, obj_id):
         """Create an object in the cloud storage.
 
         Created object will contain the content and be referenced by
         the given id.
 
         """
         object_path = self._object_path(obj_id)
 
         if not isinstance(content, Iterator):
             content = (content,)
         self.driver.upload_object_via_stream(
             self._compressor(content), self.container, object_path
         )
 
 
 class AwsCloudObjStorage(CloudObjStorage):
     """Amazon's S3 Cloud-based object storage"""
 
     def _get_provider(self):
         return Provider.S3
 
 
 class OpenStackCloudObjStorage(CloudObjStorage):
     """OpenStack Swift Cloud based object storage"""
 
     def _get_provider(self):
         return Provider.OPENSTACK_SWIFT
diff --git a/swh/objstorage/backends/pathslicing.py b/swh/objstorage/backends/pathslicing.py
index f8f19d9..67e3701 100644
--- a/swh/objstorage/backends/pathslicing.py
+++ b/swh/objstorage/backends/pathslicing.py
@@ -1,373 +1,374 @@
 # Copyright (C) 2015-2022  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 from contextlib import contextmanager
 from itertools import islice
 import os
 import tempfile
 from typing import Iterator, List, Optional
 
+from typing_extensions import Literal
+
 from swh.model import hashutil
 from swh.objstorage.constants import DEFAULT_LIMIT, ID_HASH_ALGO, ID_HEXDIGEST_LENGTH
 from swh.objstorage.exc import Error, ObjNotFoundError
 from swh.objstorage.interface import CompositeObjId, ObjId
 from swh.objstorage.objstorage import (
     ObjStorage,
     compressors,
     decompressors,
     objid_to_default_hex,
 )
 
 BUFSIZ = 1048576
 
 DIR_MODE = 0o755
 FILE_MODE = 0o644
 
 
 class PathSlicer:
     """Helper class to compute a path based on a hash.
 
     Used to compute a directory path based on the object hash according to a
     given slicing. Each slicing correspond to a directory that is named
     according to the hash of its content.
 
     For instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689
     will have the following computed path:
 
     - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689
     - 0:1/0:5/    : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689
 
      Args:
          root (str): path to the root directory of the storage on the disk.
          slicing (str): the slicing configuration.
     """
 
     def __init__(self, root: str, slicing: str):
         self.root = root
         # Make a list of tuples where each tuple contains the beginning
         # and the end of each slicing.
         try:
             self.bounds = [
                 slice(*(int(x) if x else None for x in sbounds.split(":")))
                 for sbounds in slicing.split("/")
                 if sbounds
             ]
         except TypeError:
             raise ValueError(
                 "Invalid slicing declaration; "
                 "it should be a of the form '<int>:<int>[/<int>:<int>]..."
             )
 
     def check_config(self):
         """Check the slicing configuration is valid.
 
         Raises:
             ValueError: if the slicing configuration is invalid.
         """
         if len(self):
             max_char = max(
                 max(bound.start or 0, bound.stop or 0) for bound in self.bounds
             )
             if ID_HEXDIGEST_LENGTH < max_char:
                 raise ValueError(
                     "Algorithm %s has too short hash for slicing to char %d"
                     % (ID_HASH_ALGO, max_char)
                 )
 
     def get_directory(self, hex_obj_id: str) -> str:
         """Compute the storage directory of an object.
 
         See also: PathSlicer::get_path
 
         Args:
             hex_obj_id: object id as hexlified string.
 
         Returns:
             Absolute path (including root) to the directory that contains
             the given object id.
         """
         return os.path.join(self.root, *self.get_slices(hex_obj_id))
 
     def get_path(self, hex_obj_id: str) -> str:
         """Compute the full path to an object into the current storage.
 
         See also: PathSlicer::get_directory
 
         Args:
             hex_obj_id(str): object id as hexlified string.
 
         Returns:
             Absolute path (including root) to the object corresponding
             to the given object id.
         """
         return os.path.join(self.get_directory(hex_obj_id), hex_obj_id)
 
     def get_slices(self, hex_obj_id: str) -> List[str]:
         """Compute the path elements for the given hash.
 
         Args:
             hex_obj_id(str): object id as hexlified string.
 
         Returns:
             Relative path to the actual object corresponding to the given id as
             a list.
         """
 
         assert len(hex_obj_id) == ID_HEXDIGEST_LENGTH
         return [hex_obj_id[bound] for bound in self.bounds]
 
     def __len__(self) -> int:
         """Number of slices of the slicer"""
         return len(self.bounds)
 
 
 class PathSlicingObjStorage(ObjStorage):
     """Implementation of the ObjStorage API based on the hash of the content.
 
     On disk, an object storage is a directory tree containing files
     named after their object IDs. An object ID is a checksum of its
     content, depending on the value of the ID_HASH_ALGO constant (see
     swh.model.hashutil for its meaning).
 
     To avoid directories that contain too many files, the object storage has a
     given slicing. Each slicing correspond to a directory that is named
     according to the hash of its content.
 
     So for instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689
     will be stored in the given object storages :
 
     - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689
     - 0:1/0:5/    : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689
 
     The files in the storage are stored in gzipped compressed format.
 
     Args:
         root (str): path to the root directory of the storage on
             the disk.
         slicing (str): string that indicates the slicing to perform
             on the hash of the content to know the path where it should
             be stored (see the documentation of the PathSlicer class).
 
     """
 
+    PRIMARY_HASH: Literal["sha1"] = "sha1"
+
     def __init__(self, root, slicing, compression="gzip", **kwargs):
         super().__init__(**kwargs)
         self.root = root
         self.slicer = PathSlicer(root, slicing)
 
         self.use_fdatasync = hasattr(os, "fdatasync")
         self.compression = compression
 
         self.check_config(check_write=False)
 
     def check_config(self, *, check_write):
         """Check whether this object storage is properly configured"""
 
         self.slicer.check_config()
 
         if not os.path.isdir(self.root):
             raise ValueError(
                 'PathSlicingObjStorage root "%s" is not a directory' % self.root
             )
 
         if check_write:
             if not os.access(self.root, os.W_OK):
                 raise PermissionError(
                     'PathSlicingObjStorage root "%s" is not writable' % self.root
                 )
 
         if self.compression not in compressors:
             raise ValueError(
                 'Unknown compression algorithm "%s" for '
                 "PathSlicingObjStorage" % self.compression
             )
 
         return True
 
     def __contains__(self, obj_id: ObjId) -> bool:
         hex_obj_id = objid_to_default_hex(obj_id)
         return os.path.isfile(self.slicer.get_path(hex_obj_id))
 
     def __iter__(self) -> Iterator[CompositeObjId]:
         """Iterate over the object identifiers currently available in the
         storage.
 
         Warning: with the current implementation of the object
         storage, this method will walk the filesystem to list objects,
         meaning that listing all objects will be very slow for large
         storages. You almost certainly don't want to use this method
         in production.
 
         Return:
             Iterator over object IDs
 
         """
 
-        def obj_iterator():
-            # XXX hackish: it does not verify that the depth of found files
-            # matches the slicing depth of the storage
-            for root, _dirs, files in os.walk(self.root):
-                _dirs.sort()
-                for f in sorted(files):
-                    yield bytes.fromhex(f)
-
-        return obj_iterator()
+        # XXX hackish: it does not verify that the depth of found files
+        # matches the slicing depth of the storage
+        for root, _dirs, files in os.walk(self.root):
+            _dirs.sort()
+            for f in sorted(files):
+                yield {self.PRIMARY_HASH: bytes.fromhex(f)}
 
     def __len__(self) -> int:
         """Compute the number of objects available in the storage.
 
         Warning: this currently uses `__iter__`, its warning about bad
         performances applies
 
         Return:
             number of objects contained in the storage
         """
         return sum(1 for i in self)
 
     def add(
         self,
         content: bytes,
         obj_id: ObjId,
         check_presence: bool = True,
     ) -> None:
         if check_presence and obj_id in self:
             # If the object is already present, return immediately.
             return
 
         hex_obj_id = objid_to_default_hex(obj_id)
         compressor = compressors[self.compression]()
         with self._write_obj_file(hex_obj_id) as f:
             f.write(compressor.compress(content))
             f.write(compressor.flush())
 
     def get(self, obj_id: ObjId) -> bytes:
         if obj_id not in self:
             raise ObjNotFoundError(obj_id)
 
         # Open the file and return its content as bytes
         hex_obj_id = objid_to_default_hex(obj_id)
         d = decompressors[self.compression]()
         with open(self.slicer.get_path(hex_obj_id), "rb") as f:
             out = d.decompress(f.read())
         if d.unused_data:
             raise Error(
                 "Corrupt object %s: trailing data found" % hex_obj_id,
             )
 
         return out
 
     def check(self, obj_id: ObjId) -> None:
         try:
             data = self.get(obj_id)
         except OSError:
             hex_obj_id = objid_to_default_hex(obj_id)
             raise Error(
                 "Corrupt object %s: not a proper compressed file" % hex_obj_id,
             )
 
         checksums = hashutil.MultiHash.from_data(
             data, hash_names=[ID_HASH_ALGO]
         ).digest()
 
         actual_obj_sha1 = checksums[ID_HASH_ALGO]
         hex_obj_id = objid_to_default_hex(obj_id)
 
         if hex_obj_id != hashutil.hash_to_hex(actual_obj_sha1):
             raise Error(
                 "Corrupt object %s should have id %s"
                 % (objid_to_default_hex(obj_id), hashutil.hash_to_hex(actual_obj_sha1))
             )
 
     def delete(self, obj_id: ObjId):
         super().delete(obj_id)  # Check delete permission
         if obj_id not in self:
             raise ObjNotFoundError(obj_id)
 
         hex_obj_id = objid_to_default_hex(obj_id)
         try:
             os.remove(self.slicer.get_path(hex_obj_id))
         except FileNotFoundError:
             raise ObjNotFoundError(obj_id)
         return True
 
     # Streaming methods
 
     @contextmanager
     def chunk_writer(self, obj_id):
         hex_obj_id = objid_to_default_hex(obj_id)
         compressor = compressors[self.compression]()
         with self._write_obj_file(hex_obj_id) as f:
             yield lambda c: f.write(compressor.compress(c))
             f.write(compressor.flush())
 
     def list_content(
         self, last_obj_id: Optional[ObjId] = None, limit: int = DEFAULT_LIMIT
     ) -> Iterator[CompositeObjId]:
         if last_obj_id:
             it = self.iter_from(last_obj_id)
         else:
             it = iter(self)
         return islice(it, limit)
 
     def iter_from(self, obj_id, n_leaf=False):
         hex_obj_id = objid_to_default_hex(obj_id)
         slices = self.slicer.get_slices(hex_obj_id)
         rlen = len(self.root.split("/"))
 
         i = 0
         for root, dirs, files in os.walk(self.root):
             if not dirs:
                 i += 1
             level = len(root.split("/")) - rlen
             dirs.sort()
             if dirs and root == os.path.join(self.root, *slices[:level]):
                 cslice = slices[level]
                 for d in dirs[:]:
                     if d < cslice:
                         dirs.remove(d)
             for f in sorted(files):
                 if f > hex_obj_id:
-                    yield bytes.fromhex(f)
+                    yield {self.PRIMARY_HASH: bytes.fromhex(f)}
         if n_leaf:
             yield i
 
     @contextmanager
     def _write_obj_file(self, hex_obj_id):
         """Context manager for writing object files to the object storage.
 
         During writing, data are written to a temporary file, which is atomically
         renamed to the right file name after closing.
 
         Usage sample:
             with objstorage._write_obj_file(hex_obj_id):
                 f.write(obj_data)
 
         Yields:
             a file-like object open for writing bytes.
         """
         # Get the final paths and create the directory if absent.
         dir = self.slicer.get_directory(hex_obj_id)
         if not os.path.isdir(dir):
             os.makedirs(dir, DIR_MODE, exist_ok=True)
         path = os.path.join(dir, hex_obj_id)
 
         # Create a temporary file.
         (tmp, tmp_path) = tempfile.mkstemp(suffix=".tmp", prefix="hex_obj_id.", dir=dir)
 
         # Open the file and yield it for writing.
         tmp_f = os.fdopen(tmp, "wb")
         yield tmp_f
 
         # Make sure the contents of the temporary file are written to disk
         tmp_f.flush()
         if self.use_fdatasync:
             os.fdatasync(tmp)
         else:
             os.fsync(tmp)
 
         # Then close the temporary file and move it to the right path.
         tmp_f.close()
         os.chmod(tmp_path, FILE_MODE)
         os.rename(tmp_path, path)
diff --git a/swh/objstorage/backends/seaweedfs/objstorage.py b/swh/objstorage/backends/seaweedfs/objstorage.py
index b87ae9f..62a1d47 100644
--- a/swh/objstorage/backends/seaweedfs/objstorage.py
+++ b/swh/objstorage/backends/seaweedfs/objstorage.py
@@ -1,157 +1,161 @@
 # Copyright (C) 2019-2021  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import io
 from itertools import islice
 import logging
 import os
 from typing import Iterator, Optional
 
+from typing_extensions import Literal
+
 from swh.model import hashutil
 from swh.objstorage.exc import Error, ObjNotFoundError
 from swh.objstorage.interface import CompositeObjId, ObjId
 from swh.objstorage.objstorage import (
     DEFAULT_LIMIT,
     ObjStorage,
     compressors,
     compute_hash,
     decompressors,
     objid_to_default_hex,
 )
 
 from .http import HttpFiler
 
 LOGGER = logging.getLogger(__name__)
 
 
 class SeaweedFilerObjStorage(ObjStorage):
     """ObjStorage with seaweedfs abilities, using the Filer API.
 
     https://github.com/chrislusf/seaweedfs/wiki/Filer-Server-API
     """
 
+    PRIMARY_HASH: Literal["sha1"] = "sha1"
+
     def __init__(self, url, compression=None, **kwargs):
         super().__init__(**kwargs)
         self.wf = HttpFiler(url)
         self.compression = compression
 
     def check_config(self, *, check_write):
         """Check the configuration for this object storage"""
         # FIXME: hopefully this blew up during instantiation
         return True
 
     def __contains__(self, obj_id: ObjId) -> bool:
         return self.wf.exists(self._path(obj_id))
 
     def __iter__(self) -> Iterator[CompositeObjId]:
         """Iterate over the objects present in the storage
 
         Warning: Iteration over the contents of a cloud-based object storage
         may have bad efficiency: due to the very high amount of objects in it
         and the fact that it is remote, get all the contents of the current
         object storage may result in a lot of network requests.
 
         You almost certainly don't want to use this method in production.
         """
         obj_id = last_obj_id = None
         while True:
             for obj_id in self.list_content(last_obj_id=last_obj_id):
                 yield obj_id
             if last_obj_id == obj_id:
                 break
             last_obj_id = obj_id
 
     def __len__(self):
         """Compute the number of objects in the current object storage.
 
         Warning: this currently uses `__iter__`, its warning about bad
         performance applies.
 
         Returns:
             number of objects contained in the storage.
 
         """
         return sum(1 for i in self)
 
     def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None:
         if check_presence and obj_id in self:
             return
 
         def compressor(data):
             comp = compressors[self.compression]()
             yield comp.compress(data)
             yield comp.flush()
 
         assert isinstance(
             content, bytes
         ), "list of content chunks is not supported anymore"
 
         self.wf.put(io.BytesIO(b"".join(compressor(content))), self._path(obj_id))
 
     def restore(self, content: bytes, obj_id: ObjId) -> None:
         return self.add(content, obj_id, check_presence=False)
 
     def get(self, obj_id: ObjId) -> bytes:
         try:
             obj = self.wf.get(self._path(obj_id))
         except Exception:
             raise ObjNotFoundError(obj_id)
 
         d = decompressors[self.compression]()
         ret = d.decompress(obj)
         if d.unused_data:
-            hex_obj_id = hashutil.hash_to_hex(obj_id)
+            hex_obj_id = objid_to_default_hex(obj_id)
             raise Error("Corrupt object %s: trailing data found" % hex_obj_id)
         return ret
 
     def check(self, obj_id: ObjId) -> None:
         # Check the content integrity
         obj_content = self.get(obj_id)
         content_obj_id = compute_hash(obj_content)
         if content_obj_id != obj_id:
             raise Error(obj_id)
 
     def delete(self, obj_id: ObjId):
         super().delete(obj_id)  # Check delete permission
         if obj_id not in self:
             raise ObjNotFoundError(obj_id)
         self.wf.delete(self._path(obj_id))
         return True
 
     def list_content(
         self,
         last_obj_id: Optional[ObjId] = None,
         limit: int = DEFAULT_LIMIT,
     ) -> Iterator[CompositeObjId]:
         if last_obj_id:
             objid = objid_to_default_hex(last_obj_id)
             lastfilename = objid
         else:
             lastfilename = None
         for fname in islice(self.wf.iterfiles(last_file_name=lastfilename), limit):
             bytehex = fname.rsplit("/", 1)[-1]
-            yield hashutil.bytehex_to_hash(bytehex.encode())
+            yield {self.PRIMARY_HASH: hashutil.bytehex_to_hash(bytehex.encode())}
 
     # internal methods
     def _put_object(self, content, obj_id):
         """Create an object in the cloud storage.
 
         Created object will contain the content and be referenced by
         the given id.
 
         """
 
         def compressor(data):
             comp = compressors[self.compression]()
             for chunk in data:
                 yield comp.compress(chunk)
             yield comp.flush()
 
         if isinstance(content, bytes):
             content = [content]
         self.wf.put(io.BytesIO(b"".join(compressor(content))), self._path(obj_id))
 
-    def _path(self, obj_id):
-        return os.path.join(self.wf.basepath, hashutil.hash_to_hex(obj_id))
+    def _path(self, obj_id: ObjId):
+        return os.path.join(self.wf.basepath, objid_to_default_hex(obj_id))
diff --git a/swh/objstorage/tests/objstorage_testing.py b/swh/objstorage/tests/objstorage_testing.py
index 438553c..4463ecc 100644
--- a/swh/objstorage/tests/objstorage_testing.py
+++ b/swh/objstorage/tests/objstorage_testing.py
@@ -1,216 +1,216 @@
 # Copyright (C) 2015-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import inspect
 
 from swh.objstorage import exc
 from swh.objstorage.interface import ObjStorageInterface
 from swh.objstorage.objstorage import compute_hash
 
 
 class ObjStorageTestFixture:
     def test_types(self):
         """Checks all methods of ObjStorageInterface are implemented by this
         backend, and that they have the same signature."""
         # Create an instance of the protocol (which cannot be instantiated
         # directly, so this creates a subclass, then instantiates it)
         interface = type("_", (ObjStorageInterface,), {})()
 
         assert "get_batch" in dir(interface)
 
         missing_methods = []
 
         for meth_name in dir(interface):
             if meth_name.startswith("_") and meth_name not in (
                 "__iter__",
                 "__contains__",
             ):
                 continue
             interface_meth = getattr(interface, meth_name)
             concrete_meth = getattr(self.storage, meth_name)
 
             expected_signature = inspect.signature(interface_meth)
             actual_signature = inspect.signature(concrete_meth)
 
             assert expected_signature == actual_signature, meth_name
 
         assert missing_methods == []
 
         # If all the assertions above succeed, then this one should too.
         # But there's no harm in double-checking.
         # And we could replace the assertions above by this one, but unlike
         # the assertions above, it doesn't explain what is missing.
         assert isinstance(self.storage, ObjStorageInterface)
 
     def hash_content(self, content):
         obj_id = compute_hash(content)
         return content, obj_id
 
     def assertContentMatch(self, obj_id, expected_content):  # noqa
         content = self.storage.get(obj_id)
         self.assertEqual(content, expected_content)
 
     def test_check_config(self):
         self.assertTrue(self.storage.check_config(check_write=False))
         self.assertTrue(self.storage.check_config(check_write=True))
 
     def test_contains(self):
         content_p, obj_id_p = self.hash_content(b"contains_present")
         content_m, obj_id_m = self.hash_content(b"contains_missing")
         self.storage.add(content_p, obj_id=obj_id_p)
         self.assertIn(obj_id_p, self.storage)
         self.assertNotIn(obj_id_m, self.storage)
 
     def test_add_get_w_id(self):
         content, obj_id = self.hash_content(b"add_get_w_id")
         self.storage.add(content, obj_id=obj_id)
         self.assertContentMatch(obj_id, content)
 
     def test_add_twice(self):
         content, obj_id = self.hash_content(b"add_twice")
         self.storage.add(content, obj_id=obj_id)
         self.assertContentMatch(obj_id, content)
         self.storage.add(content, obj_id=obj_id, check_presence=False)
         self.assertContentMatch(obj_id, content)
 
     def test_add_big(self):
         content, obj_id = self.hash_content(b"add_big" * 1024 * 1024)
         self.storage.add(content, obj_id=obj_id)
         self.assertContentMatch(obj_id, content)
 
     def test_add_get_batch(self):
         content1, obj_id1 = self.hash_content(b"add_get_batch_1")
         content2, obj_id2 = self.hash_content(b"add_get_batch_2")
         self.storage.add(content1, obj_id1)
         self.storage.add(content2, obj_id2)
         cr1, cr2 = self.storage.get_batch([obj_id1, obj_id2])
         self.assertEqual(cr1, content1)
         self.assertEqual(cr2, content2)
 
     def test_get_batch_unexisting_content(self):
         content, obj_id = self.hash_content(b"get_batch_unexisting_content")
         result = list(self.storage.get_batch([obj_id]))
         self.assertTrue(len(result) == 1)
         self.assertIsNone(result[0])
 
     def test_restore_content(self):
         self.storage.allow_delete = True
 
         valid_content, valid_obj_id = self.hash_content(b"restore_content")
         invalid_content = b"unexpected content"
         self.storage.add(invalid_content, valid_obj_id)
         with self.assertRaises(exc.Error):
             self.storage.check(valid_obj_id)
         self.storage.restore(valid_content, valid_obj_id)
         self.assertContentMatch(valid_obj_id, valid_content)
 
     def test_get_missing(self):
         content, obj_id = self.hash_content(b"get_missing")
         with self.assertRaises(exc.ObjNotFoundError) as e:
             self.storage.get(obj_id)
 
         self.assertIn(obj_id, e.exception.args)
 
     def test_check_missing(self):
         content, obj_id = self.hash_content(b"check_missing")
         with self.assertRaises(exc.Error):
             self.storage.check(obj_id)
 
     def test_check_present(self):
         content, obj_id = self.hash_content(b"check_present")
         self.storage.add(content, obj_id)
         try:
             self.storage.check(obj_id)
         except exc.Error:
             self.fail("Integrity check failed")
 
     def test_delete_missing(self):
         self.storage.allow_delete = True
         content, obj_id = self.hash_content(b"missing_content_to_delete")
         with self.assertRaises(exc.Error):
             self.storage.delete(obj_id)
 
     def test_delete_present(self):
         self.storage.allow_delete = True
         content, obj_id = self.hash_content(b"content_to_delete")
         self.storage.add(content, obj_id=obj_id)
         self.assertTrue(self.storage.delete(obj_id))
         with self.assertRaises(exc.Error):
             self.storage.get(obj_id)
 
     def test_delete_not_allowed(self):
         self.storage.allow_delete = False
         content, obj_id = self.hash_content(b"content_to_delete")
         self.storage.add(content, obj_id=obj_id)
         with self.assertRaises(PermissionError):
             self.storage.delete(obj_id)
 
     def test_delete_not_allowed_by_default(self):
         content, obj_id = self.hash_content(b"content_to_delete")
         self.storage.add(content, obj_id=obj_id)
         with self.assertRaises(PermissionError):
             self.assertTrue(self.storage.delete(obj_id))
 
     def test_add_batch(self):
         contents = {}
         expected_content_add = 0
         expected_content_add_bytes = 0
         for i in range(50):
             content = b"Test content %02d" % i
             content, obj_id = self.hash_content(content)
             contents[obj_id] = content
             expected_content_add_bytes += len(content)
             expected_content_add += 1
 
         ret = self.storage.add_batch(contents)
 
         self.assertEqual(
             ret,
             {
                 "object:add": expected_content_add,
                 "object:add:bytes": expected_content_add_bytes,
             },
         )
         for obj_id in contents:
             self.assertIn(obj_id, self.storage)
 
     def test_content_iterator(self):
         sto_obj_ids = iter(self.storage)
         sto_obj_ids = list(sto_obj_ids)
         self.assertFalse(sto_obj_ids)
 
-        obj_ids = set()
+        obj_ids = []
         for i in range(100):
             content, obj_id = self.hash_content(b"content %d" % i)
             self.storage.add(content, obj_id=obj_id)
-            obj_ids.add(obj_id)
+            obj_ids.append({"sha1": obj_id})
 
-        sto_obj_ids = set(self.storage)
-        self.assertEqual(sto_obj_ids, obj_ids)
+        sto_obj_ids = list(self.storage)
+        self.assertCountEqual(sto_obj_ids, obj_ids)
 
     def test_list_content(self):
         all_ids = []
         for i in range(1200):
             content = b"example %d" % i
             obj_id = compute_hash(content)
             self.storage.add(content, obj_id)
-            all_ids.append(obj_id)
-        all_ids.sort()
+            all_ids.append({"sha1": obj_id})
+        all_ids.sort(key=lambda d: d["sha1"])
 
         ids = list(self.storage.list_content())
         self.assertEqual(len(ids), 1200)
         self.assertEqual(ids[0], all_ids[0])
         self.assertEqual(ids[100], all_ids[100])
         self.assertEqual(ids[999], all_ids[999])
 
         ids = list(self.storage.list_content(limit=10))
         self.assertEqual(len(ids), 10)
         self.assertEqual(ids[0], all_ids[0])
         self.assertEqual(ids[9], all_ids[9])
 
         ids = list(self.storage.list_content(last_obj_id=all_ids[999], limit=100))
         self.assertEqual(len(ids), 100)
         self.assertEqual(ids[0], all_ids[1000])
         self.assertEqual(ids[9], all_ids[1009])
diff --git a/swh/objstorage/tests/test_objstorage_pathslicing.py b/swh/objstorage/tests/test_objstorage_pathslicing.py
index 23a9735..d1f5568 100644
--- a/swh/objstorage/tests/test_objstorage_pathslicing.py
+++ b/swh/objstorage/tests/test_objstorage_pathslicing.py
@@ -1,154 +1,156 @@
 # Copyright (C) 2015-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import shutil
 import tempfile
 import unittest
 from unittest.mock import DEFAULT, patch
 
 from swh.model import hashutil
 from swh.objstorage import exc
 from swh.objstorage.constants import ID_DIGEST_LENGTH
 from swh.objstorage.factory import get_objstorage
 
 from .objstorage_testing import ObjStorageTestFixture
 
 
 class TestPathSlicingObjStorage(ObjStorageTestFixture, unittest.TestCase):
     compression = "none"
 
     def setUp(self):
         super().setUp()
         self.slicing = "0:2/2:4/4:6"
         self.tmpdir = tempfile.mkdtemp()
         self.storage = get_objstorage(
             "pathslicing",
             root=self.tmpdir,
             slicing=self.slicing,
             compression=self.compression,
         )
 
     def tearDown(self):
         super().tearDown()
         shutil.rmtree(self.tmpdir)
 
     def content_path(self, obj_id):
         hex_obj_id = hashutil.hash_to_hex(obj_id)
         return self.storage.slicer.get_path(hex_obj_id)
 
     def test_iter(self):
         content, obj_id = self.hash_content(b"iter")
         self.assertEqual(list(iter(self.storage)), [])
         self.storage.add(content, obj_id=obj_id)
-        self.assertEqual(list(iter(self.storage)), [obj_id])
+        self.assertEqual(
+            list(iter(self.storage)), [{self.storage.PRIMARY_HASH: obj_id}]
+        )
 
     def test_len(self):
         content, obj_id = self.hash_content(b"len")
         self.assertEqual(len(self.storage), 0)
         self.storage.add(content, obj_id=obj_id)
         self.assertEqual(len(self.storage), 1)
 
     def test_check_ok(self):
         content, obj_id = self.hash_content(b"check_ok")
         self.storage.add(content, obj_id=obj_id)
         assert self.storage.check(obj_id) is None
         assert self.storage.check(obj_id.hex()) is None
 
     def test_check_id_mismatch(self):
         content, obj_id = self.hash_content(b"check_id_mismatch")
         self.storage.add(b"unexpected content", obj_id=obj_id)
         with self.assertRaises(exc.Error) as error:
             self.storage.check(obj_id)
         self.assertEqual(
             (
                 "Corrupt object %s should have id "
                 "12ebb2d6c81395bcc5cab965bdff640110cb67ff" % obj_id.hex(),
             ),
             error.exception.args,
         )
 
     def test_iterate_from(self):
         all_ids = []
         for i in range(100):
             content, obj_id = self.hash_content(b"content %d" % i)
             self.storage.add(content, obj_id=obj_id)
-            all_ids.append(obj_id)
-        all_ids.sort()
+            all_ids.append({self.storage.PRIMARY_HASH: obj_id})
+        all_ids.sort(key=lambda d: d[self.storage.PRIMARY_HASH])
 
         ids = list(self.storage.iter_from(b"\x00" * ID_DIGEST_LENGTH))
         self.assertEqual(len(ids), len(all_ids))
         self.assertEqual(ids, all_ids)
 
         ids = list(self.storage.iter_from(all_ids[0]))
         self.assertEqual(len(ids), len(all_ids) - 1)
         self.assertEqual(ids, all_ids[1:])
 
         ids = list(self.storage.iter_from(all_ids[-1], n_leaf=True))
         n_leaf = ids[-1]
         ids = ids[:-1]
         self.assertEqual(n_leaf, 1)
         self.assertEqual(len(ids), 0)
 
         ids = list(self.storage.iter_from(all_ids[-2], n_leaf=True))
         n_leaf = ids[-1]
         ids = ids[:-1]
         self.assertEqual(n_leaf, 2)  # beware, this depends on the hash algo
         self.assertEqual(len(ids), 1)
         self.assertEqual(ids, all_ids[-1:])
 
     def test_fdatasync_default(self):
         content, obj_id = self.hash_content(b"check_fdatasync")
         with patch.multiple("os", fsync=DEFAULT, fdatasync=DEFAULT) as patched:
             self.storage.add(content, obj_id=obj_id)
         if self.storage.use_fdatasync:
             assert patched["fdatasync"].call_count == 1
             assert patched["fsync"].call_count == 0
         else:
             assert patched["fdatasync"].call_count == 0
             assert patched["fsync"].call_count == 1
 
     def test_fdatasync_forced_on(self):
         self.storage.use_fdatasync = True
         content, obj_id = self.hash_content(b"check_fdatasync")
         with patch.multiple("os", fsync=DEFAULT, fdatasync=DEFAULT) as patched:
             self.storage.add(content, obj_id=obj_id)
         assert patched["fdatasync"].call_count == 1
         assert patched["fsync"].call_count == 0
 
     def test_fdatasync_forced_off(self):
         self.storage.use_fdatasync = False
         content, obj_id = self.hash_content(b"check_fdatasync")
         with patch.multiple("os", fsync=DEFAULT, fdatasync=DEFAULT) as patched:
             self.storage.add(content, obj_id=obj_id)
         assert patched["fdatasync"].call_count == 0
         assert patched["fsync"].call_count == 1
 
     def test_check_not_compressed(self):
         content, obj_id = self.hash_content(b"check_not_compressed")
         self.storage.add(content, obj_id=obj_id)
         with open(self.content_path(obj_id), "ab") as f:  # Add garbage.
             f.write(b"garbage")
         with self.assertRaises(exc.Error) as error:
             self.storage.check(obj_id)
         if self.compression == "none":
             self.assertIn("Corrupt object", error.exception.args[0])
         else:
             self.assertIn("trailing data found", error.exception.args[0])
 
 
 class TestPathSlicingObjStorageGzip(TestPathSlicingObjStorage):
     compression = "gzip"
 
 
 class TestPathSlicingObjStorageZlib(TestPathSlicingObjStorage):
     compression = "zlib"
 
 
 class TestPathSlicingObjStorageBz2(TestPathSlicingObjStorage):
     compression = "bz2"
 
 
 class TestPathSlicingObjStorageLzma(TestPathSlicingObjStorage):
     compression = "lzma"
diff --git a/swh/objstorage/tests/test_readonly_filter.py b/swh/objstorage/tests/test_readonly_filter.py
index 39ef57c..5ee7c74 100644
--- a/swh/objstorage/tests/test_readonly_filter.py
+++ b/swh/objstorage/tests/test_readonly_filter.py
@@ -1,85 +1,85 @@
 # Copyright (C) 2015-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import random
 import shutil
 from string import ascii_lowercase
 import tempfile
 import unittest
 
 from swh.objstorage.exc import Error, ObjNotFoundError
 from swh.objstorage.factory import get_objstorage
 from swh.objstorage.multiplexer.filter import read_only
 from swh.objstorage.objstorage import compute_hash
 
 
 def get_random_content():
     return bytes("".join(random.sample(ascii_lowercase, 10)), "utf8")
 
 
 class ReadOnlyFilterTestCase(unittest.TestCase):
     # Read only filter should not allow writing
 
     def setUp(self):
         super().setUp()
         self.tmpdir = tempfile.mkdtemp()
         pstorage = {
             "cls": "pathslicing",
             "root": self.tmpdir,
             "slicing": "0:5",
         }
         base_storage = get_objstorage(**pstorage)
         self.storage = get_objstorage(
             "filtered", storage_conf=pstorage, filters_conf=[read_only()]
         )
         self.valid_content = b"pre-existing content"
         self.invalid_content = b"invalid_content"
         self.true_invalid_content = b"Anything that is not correct"
         self.absent_content = b"non-existent content"
         # Create a valid content.
         self.valid_id = compute_hash(self.valid_content)
         base_storage.add(self.valid_content, obj_id=self.valid_id)
         # Create an invalid id and add a content with it.
         self.invalid_id = compute_hash(self.true_invalid_content)
         base_storage.add(self.invalid_content, obj_id=self.invalid_id)
         # Compute an id for a non-existing content.
         self.absent_id = compute_hash(self.absent_content)
 
     def tearDown(self):
         super().tearDown()
         shutil.rmtree(self.tmpdir)
 
     def test_can_contains(self):
         self.assertTrue(self.valid_id in self.storage)
         self.assertTrue(self.invalid_id in self.storage)
         self.assertFalse(self.absent_id in self.storage)
 
     def test_can_iter(self):
-        self.assertIn(self.valid_id, iter(self.storage))
-        self.assertIn(self.invalid_id, iter(self.storage))
+        self.assertIn({"sha1": self.valid_id}, iter(self.storage))
+        self.assertIn({"sha1": self.invalid_id}, iter(self.storage))
 
     def test_can_len(self):
         self.assertEqual(2, len(self.storage))
 
     def test_can_get(self):
         self.assertEqual(self.valid_content, self.storage.get(self.valid_id))
         self.assertEqual(self.invalid_content, self.storage.get(self.invalid_id))
 
     def test_can_check(self):
         with self.assertRaises(ObjNotFoundError):
             self.storage.check(self.absent_id)
         with self.assertRaises(Error):
             self.storage.check(self.invalid_id)
         self.storage.check(self.valid_id)
 
     def test_cannot_add(self):
         new_id = self.storage.add(b"New content")
         result = self.storage.add(self.valid_content, self.valid_id)
         self.assertIsNone(new_id, self.storage)
         self.assertIsNone(result)
 
     def test_cannot_restore(self):
         result = self.storage.restore(self.valid_content, self.valid_id)
         self.assertIsNone(result)