diff --git a/swh/fuse/cli.py b/swh/fuse/cli.py
index 005c028..b8a4957 100644
--- a/swh/fuse/cli.py
+++ b/swh/fuse/cli.py
@@ -1,200 +1,201 @@
 # Copyright (C) 2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 # WARNING: do not import unnecessary things here to keep cli startup time under
 # control
 import os
 from pathlib import Path
 from typing import Any, Dict
 
 import click
 
 from swh.core.cli import CONTEXT_SETTINGS
 from swh.core.cli import swh as swh_cli_group
 from swh.model.cli import SWHIDParamType
 
 # All generic config code should reside in swh.core.config
 DEFAULT_CONFIG_PATH = os.environ.get(
     "SWH_CONFIG_FILE", os.path.join(click.get_app_dir("swh"), "global.yml")
 )
 
 CACHE_HOME_DIR: Path = (
     Path(os.environ["XDG_CACHE_HOME"])
     if "XDG_CACHE_HOME" in os.environ
     else Path.home() / ".cache"
 )
 
 DEFAULT_CONFIG: Dict[str, Any] = {
     "cache": {
         "metadata": {"path": CACHE_HOME_DIR / "swh/fuse/metadata.sqlite"},
         "blob": {"path": CACHE_HOME_DIR / "swh/fuse/blob.sqlite"},
         "history": {"path": CACHE_HOME_DIR / "swh/fuse/history.sqlite"},
         "direntry": {"maxram": "10%"},
     },
     "web-api": {
         "url": "https://archive.softwareheritage.org/api/1",
         "auth-token": None,
     },
+    "sharding": {"depth": 1, "length": 2,},
 }
 
 
 @swh_cli_group.group(name="fs", context_settings=CONTEXT_SETTINGS)
 @click.option(
     "-C",
     "--config-file",
     default=None,
     type=click.Path(exists=True, dir_okay=False, path_type=str),
     help=f"Configuration file (default: {DEFAULT_CONFIG_PATH})",
 )
 @click.pass_context
 def fuse(ctx, config_file):
     """Software Heritage virtual file system"""
 
     import logging
     import pprint
 
     from swh.core import config
 
     if not config_file:
         config_file = DEFAULT_CONFIG_PATH
 
     try:
         logging.info(f"Loading configuration from: {config_file}")
         conf = config.read_raw_config(config.config_basepath(config_file))
         if not conf:
             raise ValueError(f"Cannot parse configuration file: {config_file}")
 
         if config_file == DEFAULT_CONFIG_PATH:
             try:
                 conf = conf["swh"]["fuse"]
             except KeyError:
                 pass
 
         # recursive merge not done by config.read
         conf = config.merge_configs(DEFAULT_CONFIG, conf)
     except Exception as err:
         logging.warning(f"Using default configuration (cannot load custom one: {err})")
         conf = DEFAULT_CONFIG
 
     logging.info(f"Read configuration: \n{pprint.pformat(conf)}")
     ctx.ensure_object(dict)
     ctx.obj["config"] = conf
 
 
 @fuse.command(name="mount")
 @click.argument(
     "path",
     required=True,
     metavar="PATH",
     type=click.Path(exists=True, dir_okay=True, file_okay=False),
 )
 @click.argument("swhids", nargs=-1, metavar="[SWHID]...", type=SWHIDParamType())
 @click.option(
     "-f/-d",
     "--foreground/--daemon",
     default=False,
     help="whether to run FUSE attached to the console (foreground) "
     "or daemonized in the background (default: daemon)",
 )
 @click.pass_context
 def mount(ctx, swhids, path, foreground):
     """Mount the Software Heritage virtual file system at PATH.
 
     If specified, objects referenced by the given SWHIDs will be prefetched and used to
     populate the virtual file system (VFS). Otherwise the VFS will be populated
     on-demand, when accessing its content.
 
     \b
     Example:
 
     \b
       $ mkdir swhfs
       $ swh fs mount swhfs/
       $ grep printf swhfs/archive/swh:1:cnt:c839dea9e8e6f0528b468214348fee8669b305b2
           printf("Hello, World!");
       $
 
     """
 
     import asyncio
     from contextlib import ExitStack
     import logging
 
     from daemon import DaemonContext
 
     from swh.fuse import fuse
 
     # TODO: set default logging settings when --log-config is not passed
     # DEFAULT_LOG_PATH = Path(".local/swh/fuse/mount.log")
     with ExitStack() as stack:
         if not foreground:
             # TODO: temporary fix until swh.core has the proper logging utilities
             # Disable logging config before daemonizing, and reset it once
             # daemonized to be sure to not close file handlers
             logging.shutdown()
             # Stay in the current working directory when spawning daemon
             cwd = os.getcwd()
             stack.enter_context(DaemonContext(working_directory=cwd))
             logging.config.dictConfig(
                 {
                     "version": 1,
                     "handlers": {
                         "syslog": {
                             "class": "logging.handlers.SysLogHandler",
                             "address": "/dev/log",
                         },
                     },
                     "root": {"level": ctx.obj["log_level"], "handlers": ["syslog"],},
                 }
             )
 
         conf = ctx.obj["config"]
         asyncio.run(fuse.main(swhids, path, conf))
 
 
 @fuse.command()
 @click.argument(
     "path",
     required=True,
     metavar="PATH",
     type=click.Path(exists=True, dir_okay=True, file_okay=False),
 )
 @click.pass_context
 def umount(ctx, path):
     """Unmount a mounted virtual file system.
 
     Note: this is equivalent to ``fusermount -u PATH``, which can be used to unmount any
     FUSE-based virtual file system. See ``man fusermount3``.
 
     """
     import logging
     import subprocess
 
     try:
         subprocess.run(["fusermount", "-u", path], check=True)
     except subprocess.CalledProcessError as err:
         logging.error(
             f"cannot unmount virtual file system: "
             f"\"{' '.join(err.cmd)}\" returned exit status {err.returncode}"
         )
         ctx.exit(1)
 
 
 @fuse.command()
 @click.pass_context
 def clean(ctx):
     """Clean on-disk cache(s).
 
     """
 
     def rm_cache(conf, cache_name):
         try:
             conf["cache"][cache_name]["path"].unlink(missing_ok=True)
         except KeyError:
             pass
 
     conf = ctx.obj["config"]
     for cache_name in ["blob", "metadata", "history"]:
         rm_cache(conf, cache_name)
diff --git a/swh/fuse/fs/artifact.py b/swh/fuse/fs/artifact.py
index e3105f8..52a243d 100644
--- a/swh/fuse/fs/artifact.py
+++ b/swh/fuse/fs/artifact.py
@@ -1,329 +1,329 @@
 # Copyright (C) 2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 from dataclasses import dataclass
 from pathlib import Path
 from typing import Any, AsyncIterator, List
 import urllib.parse
 
 from swh.fuse.fs.entry import (
     EntryMode,
     FuseDirEntry,
+    FuseDirEntryShardByHash,
     FuseEntry,
     FuseFileEntry,
     FuseSymlinkEntry,
 )
 from swh.model.from_disk import DentryPerms
 from swh.model.identifiers import CONTENT, DIRECTORY, RELEASE, REVISION, SNAPSHOT, SWHID
 
 
 @dataclass
 class Content(FuseFileEntry):
     """ Software Heritage content artifact.
 
     Attributes:
         swhid: Software Heritage persistent identifier
         prefetch: optional prefetched metadata used to set entry attributes
 
     Content leaves (AKA blobs) are represented on disks as regular files,
     containing the corresponding bytes, as archived.
 
     Note that permissions are associated to blobs only in the context of
     directories. Hence, when accessing blobs from the top-level `archive/`
     directory, the permissions of the `archive/SWHID` file will be arbitrary and
     not meaningful (e.g., `0x644`). """
 
     swhid: SWHID
     prefetch: Any = None
 
     async def get_content(self) -> bytes:
         data = await self.fuse.get_blob(self.swhid)
         if not self.prefetch:
             self.prefetch = {"length": len(data)}
         return data
 
     async def size(self) -> int:
         if self.prefetch:
             return self.prefetch["length"]
         else:
             return len(await self.get_content())
 
 
 @dataclass
 class Directory(FuseDirEntry):
     """ Software Heritage directory artifact.
 
     Attributes:
         swhid: Software Heritage persistent identifier
 
     Directory nodes are represented as directories on the file-system,
     containing one entry for each entry of the archived directory. Entry names
     and other metadata, including permissions, will correspond to the archived
     entry metadata.
 
     Note that the FUSE mount is read-only, no matter what the permissions say.
     So it is possible that, in the context of a directory, a file is presented
     as writable, whereas actually writing to it will fail with `EPERM`. """
 
     swhid: SWHID
 
     async def compute_entries(self) -> AsyncIterator[FuseEntry]:
         metadata = await self.fuse.get_metadata(self.swhid)
         for entry in metadata:
             name = entry["name"]
             swhid = entry["target"]
             mode = (
                 # Archived permissions for directories are always set to
                 # 0o040000 so use a read-only permission instead
                 int(EntryMode.RDONLY_DIR)
                 if swhid.object_type == DIRECTORY
                 else entry["perms"]
             )
 
             # 1. Regular file
             if swhid.object_type == CONTENT:
                 yield self.create_child(
                     Content,
                     name=name,
                     mode=mode,
                     swhid=swhid,
                     # The directory API has extra info we can use to set
                     # attributes without additional Software Heritage API call
                     prefetch=entry,
                 )
             # 2. Regular directory
             elif swhid.object_type == DIRECTORY:
                 yield self.create_child(
                     Directory, name=name, mode=mode, swhid=swhid,
                 )
             # 3. Symlink
             elif mode == DentryPerms.symlink:
                 yield self.create_child(
                     FuseSymlinkEntry,
                     name=name,
                     # Symlink target is stored in the blob content
                     target=await self.fuse.get_blob(swhid),
                 )
             # 4. Submodule
             elif swhid.object_type == REVISION:
                 # Make sure the revision metadata is fetched and create a
                 # symlink to distinguish it with regular directories
                 await self.fuse.get_metadata(swhid)
                 yield self.create_child(
                     FuseSymlinkEntry,
                     name=name,
                     target=Path(self.get_relative_root_path(), f"archive/{swhid}"),
                 )
             else:
                 raise ValueError("Unknown directory entry type: {swhid.object_type}")
 
 
 @dataclass
 class Revision(FuseDirEntry):
     """ Software Heritage revision artifact.
 
     Attributes:
         swhid: Software Heritage persistent identifier
 
     Revision (AKA commit) nodes are represented on the file-system as
     directories with the following entries:
 
     - `root`: source tree at the time of the commit, as a symlink pointing into
       `archive/`, to a SWHID of type `dir`
     - `parents/` (note the plural): a virtual directory containing entries named
       `1`, `2`, `3`, etc., one for each parent commit. Each of these entry is a
       symlink pointing into `archive/`, to the SWHID file for the given parent
       commit
     - `parent` (note the singular): present if and only if the current commit
       has at least one parent commit (which is the most common case). When
       present it is a symlink pointing into `parents/1/`
     - `history`: a virtual directory listing all its revision ancestors, sorted
       in reverse topological order. Each entry is a symlink pointing into
       `archive/SWHID`.
     - `meta.json`: metadata for the current node, as a symlink pointing to the
       relevant `meta/<SWHID>.json` file """
 
     swhid: SWHID
 
     async def compute_entries(self) -> AsyncIterator[FuseEntry]:
         metadata = await self.fuse.get_metadata(self.swhid)
         directory = metadata["directory"]
         parents = metadata["parents"]
 
         root_path = self.get_relative_root_path()
 
         yield self.create_child(
             FuseSymlinkEntry,
             name="root",
             target=Path(root_path, f"archive/{directory}"),
         )
         yield self.create_child(
             FuseSymlinkEntry,
             name="meta.json",
             target=Path(root_path, f"meta/{self.swhid}.json"),
         )
         yield self.create_child(
             RevisionParents,
             name="parents",
             mode=int(EntryMode.RDONLY_DIR),
             parents=[x["id"] for x in parents],
         )
 
         if len(parents) >= 1:
             yield self.create_child(
                 FuseSymlinkEntry, name="parent", target="parents/1/",
             )
 
         yield self.create_child(
             RevisionHistory,
             name="history",
             mode=int(EntryMode.RDONLY_DIR),
             swhid=self.swhid,
         )
 
 
 @dataclass
 class RevisionParents(FuseDirEntry):
     """ Revision virtual `parents/` directory """
 
     parents: List[SWHID]
 
     async def compute_entries(self) -> AsyncIterator[FuseEntry]:
         root_path = self.get_relative_root_path()
         for i, parent in enumerate(self.parents):
             yield self.create_child(
                 FuseSymlinkEntry,
                 name=str(i + 1),
                 target=Path(root_path, f"archive/{parent}"),
             )
 
 
 @dataclass
 class RevisionHistory(FuseDirEntry):
     """ Revision virtual `history/` directory """
 
     swhid: SWHID
 
     async def compute_entries(self) -> AsyncIterator[FuseEntry]:
         history = await self.fuse.get_history(self.swhid)
-        root_path = self.get_relative_root_path()
-        for swhid in history:
-            yield self.create_child(
-                FuseSymlinkEntry,
-                name=str(swhid),
-                target=Path(root_path, f"archive/{swhid}"),
-            )
+        yield self.create_child(
+            FuseDirEntryShardByHash,
+            name="by-hash",
+            mode=int(EntryMode.RDONLY_DIR),
+            swhids=history,
+        )
 
 
 @dataclass
 class Release(FuseDirEntry):
     """ Software Heritage release artifact.
 
     Attributes:
         swhid: Software Heritage persistent identifier
 
     Release nodes are represented on the file-system as directories with the
     following entries:
 
     - `target`: target node, as a symlink to `archive/<SWHID>`
     - `target_type`: regular file containing the type of the target SWHID
     - `root`: present if and only if the release points to something that
       (transitively) resolves to a directory. When present it is a symlink
       pointing into `archive/` to the SWHID of the given directory
     - `meta.json`: metadata for the current node, as a symlink pointing to the
       relevant `meta/<SWHID>.json` file """
 
     swhid: SWHID
 
     async def find_root_directory(self, swhid: SWHID) -> SWHID:
         if swhid.object_type == RELEASE:
             metadata = await self.fuse.get_metadata(swhid)
             return await self.find_root_directory(metadata["target"])
         elif swhid.object_type == REVISION:
             metadata = await self.fuse.get_metadata(swhid)
             return metadata["directory"]
         elif swhid.object_type == DIRECTORY:
             return swhid
         else:
             return None
 
     async def compute_entries(self) -> AsyncIterator[FuseEntry]:
         metadata = await self.fuse.get_metadata(self.swhid)
         root_path = self.get_relative_root_path()
 
         yield self.create_child(
             FuseSymlinkEntry,
             name="meta.json",
             target=Path(root_path, f"meta/{self.swhid}.json"),
         )
 
         target = metadata["target"]
         yield self.create_child(
             FuseSymlinkEntry, name="target", target=Path(root_path, f"archive/{target}")
         )
         yield self.create_child(
             ReleaseType,
             name="target_type",
             mode=int(EntryMode.RDONLY_FILE),
             target_type=target.object_type,
         )
 
         target_dir = await self.find_root_directory(target)
         if target_dir is not None:
             yield self.create_child(
                 FuseSymlinkEntry,
                 name="root",
                 target=Path(root_path, f"archive/{target_dir}"),
             )
 
 
 @dataclass
 class ReleaseType(FuseFileEntry):
     """ Release type virtual file """
 
     target_type: str
 
     async def get_content(self) -> bytes:
         return str.encode(self.target_type + "\n")
 
     async def size(self) -> int:
         return len(await self.get_content())
 
 
 @dataclass
 class Snapshot(FuseDirEntry):
     """ Software Heritage snapshot artifact.
 
     Attributes:
         swhid: Software Heritage persistent identifier
 
     Snapshot nodes are represented on the file-system as directories with one
     entry for each branch in the snapshot. Each entry is a symlink pointing into
     `archive/` to the branch target SWHID. Branch names are URL encoded (hence
     '/' are replaced with '%2F'). """
 
     swhid: SWHID
 
     async def compute_entries(self) -> AsyncIterator[FuseEntry]:
         metadata = await self.fuse.get_metadata(self.swhid)
         root_path = self.get_relative_root_path()
 
         for branch_name, branch_meta in metadata.items():
             # Mangle branch name to create a valid UNIX filename
             name = urllib.parse.quote_plus(branch_name)
             yield self.create_child(
                 FuseSymlinkEntry,
                 name=name,
                 target=Path(root_path, f"archive/{branch_meta['target']}"),
             )
 
 
 OBJTYPE_GETTERS = {
     CONTENT: Content,
     DIRECTORY: Directory,
     REVISION: Revision,
     RELEASE: Release,
     SNAPSHOT: Snapshot,
 }
diff --git a/swh/fuse/fs/entry.py b/swh/fuse/fs/entry.py
index 2ad91f7..8833d9f 100644
--- a/swh/fuse/fs/entry.py
+++ b/swh/fuse/fs/entry.py
@@ -1,124 +1,180 @@
 # Copyright (C) 2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 from __future__ import annotations
 
 from dataclasses import dataclass, field
 from enum import IntEnum
 from pathlib import Path
 from stat import S_IFDIR, S_IFLNK, S_IFREG
-from typing import Any, AsyncIterator, Sequence, Union
+from typing import Any, AsyncIterator, List, Sequence, Union
+
+from swh.model.identifiers import SWHID
 
 # Avoid cycling import
 Fuse = "Fuse"
 
 
 class EntryMode(IntEnum):
     """ Default entry mode and permissions for the FUSE.
 
     The FUSE mount is always read-only, even if permissions contradict this
     statement (in a context of a directory, entries are listed with permissions
     taken from the archive).
     """
 
     RDONLY_FILE = S_IFREG | 0o444
     RDONLY_DIR = S_IFDIR | 0o555
     SYMLINK = S_IFLNK | 0o444
 
 
 @dataclass
 class FuseEntry:
     """ Main wrapper class to manipulate virtual FUSE entries
 
     Attributes:
         name: entry filename
         mode: entry permission mode
         fuse: internal reference to the main FUSE class
         inode: unique integer identifying the entry
     """
 
     name: str
     mode: int
     depth: int
     fuse: Fuse
     inode: int = field(init=False)
 
     def __post_init__(self):
         self.inode = self.fuse._alloc_inode(self)
 
     async def size(self) -> int:
         """ Return the size (in bytes) of an entry """
 
         raise NotImplementedError
 
     def get_relative_root_path(self) -> str:
         return "../" * (self.depth - 1)
 
     def create_child(self, constructor: Any, **kwargs) -> FuseEntry:
         return constructor(depth=self.depth + 1, fuse=self.fuse, **kwargs)
 
 
 class FuseFileEntry(FuseEntry):
     """ FUSE virtual file entry """
 
     async def get_content(self) -> bytes:
         """ Return the content of a file entry """
 
         raise NotImplementedError
 
 
 class FuseDirEntry(FuseEntry):
     """ FUSE virtual directory entry """
 
     async def size(self) -> int:
         return 0
 
     async def compute_entries(self) -> Sequence[FuseEntry]:
         """ Return the child entries of a directory entry """
 
         raise NotImplementedError
 
     async def get_entries(self, offset: int = 0) -> AsyncIterator[FuseEntry]:
         """ Return the child entries of a directory entry using direntry cache """
 
         cache = self.fuse.cache.direntry.get(self)
         if cache:
             entries = cache
         else:
             entries = [x async for x in self.compute_entries()]
             self.fuse.cache.direntry.set(self, entries)
 
         # Avoid copy by manual iteration (instead of slicing) and use of a
         # generator (instead of returning the full list every time)
         for i in range(offset, len(entries)):
             yield entries[i]
 
     async def lookup(self, name: str) -> FuseEntry:
         """ Look up a FUSE entry by name """
 
         async for entry in self.get_entries():
             if entry.name == name:
                 return entry
         return None
 
 
+@dataclass
+class FuseDirEntryShardByHash(FuseDirEntry):
+    """ FUSE virtual directory entry sharded by SWHID hash """
+
+    swhids: List[SWHID]
+    prefix: str = field(default="")
+
+    def get_full_sharded_name(self, swhid: SWHID) -> str:
+        sharding_depth = self.fuse.conf["sharding"]["depth"]
+        sharding_length = self.fuse.conf["sharding"]["length"]
+        if sharding_depth <= 0:
+            return str(swhid)
+        else:
+            basename = swhid.object_id
+            name, i = "", 0
+            for _ in range(sharding_depth):
+                name += basename[i : i + sharding_length]
+                name += "/"
+                i += sharding_length
+            # Always keep the full SWHID as the path basename (otherwise we
+            # loose the SWHID object type information)
+            name += str(swhid)
+            return name
+
+    async def compute_entries(self) -> AsyncIterator[FuseEntry]:
+        current_sharding_depth = self.prefix.count("/")
+        if current_sharding_depth == self.fuse.conf["sharding"]["depth"]:
+            root_path = self.get_relative_root_path()
+            for swhid in self.swhids:
+                yield self.create_child(
+                    FuseSymlinkEntry,
+                    name=str(swhid),
+                    target=Path(root_path, f"archive/{swhid}"),
+                )
+        else:
+            subdirs = {}
+            sharding_length = self.fuse.conf["sharding"]["length"]
+            prefix_length = len(self.prefix)
+            for swhid in self.swhids:
+                name = self.get_full_sharded_name(swhid)
+                next_prefix = name[prefix_length : prefix_length + sharding_length]
+                subdirs.setdefault(next_prefix, []).append(swhid)
+
+            # Recursive intermediate sharded directories
+            for subdir, subentries in subdirs.items():
+                yield self.create_child(
+                    FuseDirEntryShardByHash,
+                    name=subdir,
+                    mode=int(EntryMode.RDONLY_DIR),
+                    prefix=f"{self.prefix}{subdir}/",
+                    swhids=subentries,
+                )
+
+
 @dataclass
 class FuseSymlinkEntry(FuseEntry):
     """ FUSE virtual symlink entry
 
     Attributes:
         target: path to symlink target
     """
 
     mode: int = field(init=False, default=int(EntryMode.SYMLINK))
     target: Union[str, bytes, Path]
 
     async def size(self) -> int:
         return len(str(self.target))
 
     def get_target(self) -> Union[str, bytes, Path]:
         """ Return the path target of a symlink entry """
 
         return self.target
diff --git a/swh/fuse/fuse.py b/swh/fuse/fuse.py
index 36972b1..375c8f3 100644
--- a/swh/fuse/fuse.py
+++ b/swh/fuse/fuse.py
@@ -1,264 +1,265 @@
 # Copyright (C) 2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import asyncio
 import errno
 import logging
 import os
 from pathlib import Path
 import time
 from typing import Any, Dict, List
 
 import pyfuse3
 import pyfuse3_asyncio
 import requests
 
 from swh.fuse.cache import FuseCache
 from swh.fuse.fs.entry import FuseDirEntry, FuseEntry, FuseFileEntry, FuseSymlinkEntry
 from swh.fuse.fs.mountpoint import Root
 from swh.model.identifiers import CONTENT, REVISION, SWHID
 from swh.web.client.client import WebAPIClient
 
 
 class Fuse(pyfuse3.Operations):
     """ Software Heritage Filesystem in Userspace (FUSE). Locally mount parts of
     the archive and navigate it as a virtual file system. """
 
     def __init__(
         self, root_path: Path, cache: FuseCache, conf: Dict[str, Any],
     ):
         super(Fuse, self).__init__()
 
         self._next_inode: int = pyfuse3.ROOT_INODE
         self._inode2entry: Dict[int, FuseEntry] = {}
 
         self.root = Root(fuse=self)
+        self.conf = conf
 
         self.time_ns: int = time.time_ns()  # start time, used as timestamp
         self.gid = os.getgid()
         self.uid = os.getuid()
 
         self.web_api = WebAPIClient(
             conf["web-api"]["url"], conf["web-api"]["auth-token"]
         )
         self.cache = cache
 
     def shutdown(self) -> None:
         pass
 
     def _alloc_inode(self, entry: FuseEntry) -> int:
         """ Return a unique inode integer for a given entry """
 
         inode = self._next_inode
         self._next_inode += 1
         self._inode2entry[inode] = entry
 
         # TODO add inode recycling with invocation to invalidate_inode when
         # the dicts get too big
 
         return inode
 
     def inode2entry(self, inode: int) -> FuseEntry:
         """ Return the entry matching a given inode """
 
         try:
             return self._inode2entry[inode]
         except KeyError:
             raise pyfuse3.FUSEError(errno.ENOENT)
 
     async def get_metadata(self, swhid: SWHID) -> Any:
         """ Retrieve metadata for a given SWHID using Software Heritage API """
 
         cache = await self.cache.metadata.get(swhid)
         if cache:
             return cache
 
         try:
             typify = False  # Get the raw JSON from the API
             # TODO: async web API
             loop = asyncio.get_event_loop()
             metadata = await loop.run_in_executor(None, self.web_api.get, swhid, typify)
             await self.cache.metadata.set(swhid, metadata)
             # Retrieve it from cache so it is correctly typed
             return await self.cache.metadata.get(swhid)
         except requests.HTTPError as err:
             logging.error(f"Cannot fetch metadata for object {swhid}: {err}")
             raise
 
     async def get_blob(self, swhid: SWHID) -> bytes:
         """ Retrieve the blob bytes for a given content SWHID using Software
         Heritage API """
 
         if swhid.object_type != CONTENT:
             raise pyfuse3.FUSEError(errno.EINVAL)
 
         # Make sure the metadata cache is also populated with the given SWHID
         await self.get_metadata(swhid)
 
         cache = await self.cache.blob.get(swhid)
         if cache:
             return cache
 
         try:
             loop = asyncio.get_event_loop()
             resp = await loop.run_in_executor(None, self.web_api.content_raw, swhid)
             blob = b"".join(list(resp))
             await self.cache.blob.set(swhid, blob)
             return blob
         except requests.HTTPError as err:
             logging.error(f"Cannot fetch blob for object {swhid}: {err}")
             raise
 
     async def get_history(self, swhid: SWHID) -> List[SWHID]:
         if swhid.object_type != REVISION:
             raise pyfuse3.FUSEError(errno.EINVAL)
 
         cache = await self.cache.history.get(swhid)
         if cache:
             return cache
 
         try:
             # Use the swh-graph API to retrieve the full history very fast
             call = f"graph/visit/edges/{swhid}?edges=rev:rev"
             loop = asyncio.get_event_loop()
             history = await loop.run_in_executor(None, self.web_api._call, call)
             await self.cache.history.set(history.text)
             # Retrieve it from cache so it is correctly typed
             return await self.cache.history.get(swhid)
         except requests.HTTPError as err:
             logging.error(f"Cannot fetch history for object {swhid}: {err}")
             # Ignore exception since swh-graph does not necessarily contain the
             # most recent artifacts from the archive. Computing the full history
             # from the Web API is too computationally intensive so simply return
             # an empty list.
             return []
 
     async def get_attrs(self, entry: FuseEntry) -> pyfuse3.EntryAttributes:
         """ Return entry attributes """
 
         attrs = pyfuse3.EntryAttributes()
         attrs.st_size = 0
         attrs.st_atime_ns = self.time_ns
         attrs.st_ctime_ns = self.time_ns
         attrs.st_mtime_ns = self.time_ns
         attrs.st_gid = self.gid
         attrs.st_uid = self.uid
         attrs.st_ino = entry.inode
         attrs.st_mode = entry.mode
         attrs.st_size = await entry.size()
         return attrs
 
     async def getattr(
         self, inode: int, _ctx: pyfuse3.RequestContext
     ) -> pyfuse3.EntryAttributes:
         """ Get attributes for a given inode """
 
         entry = self.inode2entry(inode)
         return await self.get_attrs(entry)
 
     async def opendir(self, inode: int, _ctx: pyfuse3.RequestContext) -> int:
         """ Open a directory referred by a given inode """
 
         # Re-use inode as directory handle
         return inode
 
     async def readdir(self, fh: int, offset: int, token: pyfuse3.ReaddirToken) -> None:
         """ Read entries in an open directory """
 
         # opendir() uses inode as directory handle
         inode = fh
         direntry = self.inode2entry(inode)
         assert isinstance(direntry, FuseDirEntry)
 
         next_id = offset + 1
         try:
             async for entry in direntry.get_entries(offset):
                 name = os.fsencode(entry.name)
                 attrs = await self.get_attrs(entry)
                 if not pyfuse3.readdir_reply(token, name, attrs, next_id):
                     break
 
                 next_id += 1
                 self._inode2entry[attrs.st_ino] = entry
         except Exception as err:
             logging.debug(f"Cannot readdir: {err}")
             raise pyfuse3.FUSEError(errno.ENOENT)
 
     async def open(
         self, inode: int, _flags: int, _ctx: pyfuse3.RequestContext
     ) -> pyfuse3.FileInfo:
         """ Open an inode and return a unique file handle """
 
         # Re-use inode as file handle
         return pyfuse3.FileInfo(fh=inode, keep_cache=True)
 
     async def read(self, fh: int, offset: int, length: int) -> bytes:
         """ Read `length` bytes from file handle `fh` at position `offset` """
 
         # open() uses inode as file handle
         inode = fh
 
         entry = self.inode2entry(inode)
         assert isinstance(entry, FuseFileEntry)
         try:
             data = await entry.get_content()
             return data[offset : offset + length]
         except Exception as err:
             logging.debug(f"Cannot read: {err}")
             raise pyfuse3.FUSEError(errno.ENOENT)
 
     async def lookup(
         self, parent_inode: int, name: str, _ctx: pyfuse3.RequestContext
     ) -> pyfuse3.EntryAttributes:
         """ Look up a directory entry by name and get its attributes """
 
         name = os.fsdecode(name)
         parent_entry = self.inode2entry(parent_inode)
         assert isinstance(parent_entry, FuseDirEntry)
         try:
             lookup_entry = await parent_entry.lookup(name)
             if lookup_entry:
                 return await self.get_attrs(lookup_entry)
             else:
                 raise ValueError(f"unknown name: {name}")
         except Exception as err:
             logging.debug(f"Cannot lookup: {err}")
             raise pyfuse3.FUSEError(errno.ENOENT)
 
     async def readlink(self, inode: int, _ctx: pyfuse3.RequestContext) -> bytes:
         entry = self.inode2entry(inode)
         assert isinstance(entry, FuseSymlinkEntry)
         return os.fsencode(entry.get_target())
 
 
 async def main(swhids: List[SWHID], root_path: Path, conf: Dict[str, Any]) -> None:
     """ swh-fuse CLI entry-point """
 
     # Use pyfuse3 asyncio layer to match the rest of Software Heritage codebase
     pyfuse3_asyncio.enable()
 
     async with FuseCache(conf["cache"]) as cache:
         fs = Fuse(root_path, cache, conf)
 
         # Initially populate the cache
         for swhid in swhids:
             try:
                 await fs.get_metadata(swhid)
             except Exception as err:
                 logging.error(f"Cannot prefetch object {swhid}: {err}")
 
         fuse_options = set(pyfuse3.default_options)
         fuse_options.add("fsname=swhfs")
         if logging.root.level <= logging.DEBUG:
             fuse_options.add("debug")
 
         try:
             pyfuse3.init(fs, root_path, fuse_options)
             await pyfuse3.main()
         except Exception as err:
             logging.error(f"Error running FUSE: {err}")
         finally:
             fs.shutdown()
             pyfuse3.close(unmount=True)
diff --git a/swh/fuse/tests/test_revision.py b/swh/fuse/tests/test_revision.py
index 7bb8718..cca7839 100644
--- a/swh/fuse/tests/test_revision.py
+++ b/swh/fuse/tests/test_revision.py
@@ -1,50 +1,56 @@
 import json
 import os
 
 from swh.fuse.tests.api_url import GRAPH_API_REQUEST
 from swh.fuse.tests.common import (
     check_dir_name_entries,
     get_data_from_graph_archive,
     get_data_from_web_archive,
 )
 from swh.fuse.tests.data.config import REV_SMALL_HISTORY, ROOT_DIR, ROOT_REV
+from swh.model.identifiers import parse_swhid
 
 
 def test_access_meta(fuse_mntdir):
     file_path = fuse_mntdir / "archive" / ROOT_REV / "meta.json"
     expected = json.dumps(get_data_from_web_archive(ROOT_REV))
     assert file_path.read_text() == expected
 
 
 def test_list_root(fuse_mntdir):
     dir_path = fuse_mntdir / "archive" / ROOT_REV / "root"
     check_dir_name_entries(dir_path, ROOT_DIR)
 
 
 def test_list_parents(fuse_mntdir):
     rev_meta = get_data_from_web_archive(ROOT_REV)
     dir_path = fuse_mntdir / "archive" / ROOT_REV / "parents"
     for i, parent in enumerate(rev_meta["parents"]):
         parent_path = dir_path / str(i + 1)
         parent_swhid = f"swh:1:rev:{parent['id']}"
         assert parent_path.is_symlink()
         assert os.readlink(parent_path) == f"../../../archive/{parent_swhid}"
 
 
 def test_list_parent(fuse_mntdir):
     file_path = fuse_mntdir / "archive" / ROOT_REV / "parent"
     assert file_path.is_symlink()
     assert os.readlink(file_path) == "parents/1/"
 
 
 def test_list_history(fuse_mntdir):
-    dir_path = fuse_mntdir / "archive" / REV_SMALL_HISTORY / "history"
+    dir_path = fuse_mntdir / "archive" / REV_SMALL_HISTORY / "history/by-hash"
     history_meta = get_data_from_graph_archive(
         REV_SMALL_HISTORY, GRAPH_API_REQUEST.HISTORY
     )
     history = history_meta.strip()
     # Only keep second node in the edge because first node is redundant
     # information or the root node (hence not an ancestor)
-    expected = [edge.split(" ")[1] for edge in history.split("\n")]
-    actual = os.listdir(dir_path)
-    assert set(actual) == set(expected)
+    expected = set([edge.split(" ")[1] for edge in history.split("\n")])
+
+    for swhid in expected:
+        swhid = parse_swhid(swhid)
+        depth1 = swhid.object_id[:2]
+        depth2 = str(swhid)
+        assert (dir_path / depth1).exists()
+        assert depth2 in (os.listdir(dir_path / depth1))