diff --git a/swh/web/browse/snapshot_context.py b/swh/web/browse/snapshot_context.py
index 58116705..c21531df 100644
--- a/swh/web/browse/snapshot_context.py
+++ b/swh/web/browse/snapshot_context.py
@@ -1,1334 +1,1322 @@
 # Copyright (C) 2018-2022  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU Affero General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 # Utility module for browsing the archive in a snapshot context.
 
 from collections import defaultdict
 from typing import Any, Dict, List, Optional, Tuple
 
-from django.core.cache import cache
 from django.shortcuts import render
 from django.utils.html import escape
 
 from swh.model.hashutil import hash_to_bytes
 from swh.model.model import Snapshot
 from swh.model.swhids import CoreSWHID, ObjectType
 from swh.web.browse.utils import (
     format_log_entries,
     gen_release_link,
     gen_revision_link,
     gen_revision_log_link,
     gen_revision_url,
     gen_snapshot_link,
     get_directory_entries,
     get_readme_to_display,
 )
 from swh.web.common import archive
 from swh.web.common.exc import BadInputExc, NotFoundExc, http_status_code_message
 from swh.web.common.identifiers import get_swhids_info
 from swh.web.common.origin_visits import get_origin_visit
 from swh.web.common.typing import (
     DirectoryMetadata,
     OriginInfo,
     SnapshotBranchInfo,
     SnapshotContext,
     SnapshotReleaseInfo,
     SWHObjectInfo,
 )
 from swh.web.common.utils import (
+    django_cache,
     format_utc_iso_date,
     gen_path_info,
     reverse,
     swh_object_icons,
 )
 from swh.web.config import get_config
 
 _empty_snapshot_id = Snapshot(branches={}).id.hex()
 
 
 def _get_branch(branches, branch_name, snapshot_id):
     """
     Utility function to get a specific branch from a snapshot.
     Returns None if the branch cannot be found.
     """
     filtered_branches = [b for b in branches if b["name"] == branch_name]
     if filtered_branches:
         return filtered_branches[0]
     else:
         # case where a large branches list has been truncated
         snp = archive.lookup_snapshot(
             snapshot_id,
             branches_from=branch_name,
             branches_count=1,
             target_types=["revision", "alias"],
             # pull request branches must be browsable even if they are hidden
             # by default in branches list
             branch_name_exclude_prefix=None,
         )
         snp_branch, _, _ = process_snapshot_branches(snp)
         if snp_branch and snp_branch[0]["name"] == branch_name:
             branches.append(snp_branch[0])
             return snp_branch[0]
 
 
 def _get_release(releases, release_name, snapshot_id):
     """
     Utility function to get a specific release from a snapshot.
     Returns None if the release cannot be found.
     """
     filtered_releases = [r for r in releases if r["name"] == release_name]
     if filtered_releases:
         return filtered_releases[0]
     else:
         # case where a large branches list has been truncated
         try:
             # git origins have specific branches for releases
             snp = archive.lookup_snapshot(
                 snapshot_id,
                 branches_from=f"refs/tags/{release_name}",
                 branches_count=1,
                 target_types=["release"],
             )
         except NotFoundExc:
             snp = archive.lookup_snapshot(
                 snapshot_id,
                 branches_from=release_name,
                 branches_count=1,
                 target_types=["release", "alias"],
             )
         _, snp_release, _ = process_snapshot_branches(snp)
         if snp_release and snp_release[0]["name"] == release_name:
             releases.append(snp_release[0])
             return snp_release[0]
 
 
 def _branch_not_found(
     branch_type, branch, snapshot_id, snapshot_sizes, origin_info, timestamp, visit_id
 ):
     """
     Utility function to raise an exception when a specified branch/release
     can not be found.
     """
     if branch_type == "branch":
         branch_type = "Branch"
         branch_type_plural = "branches"
         target_type = "revision"
     else:
         branch_type = "Release"
         branch_type_plural = "releases"
         target_type = "release"
 
     if snapshot_id and snapshot_sizes[target_type] == 0:
         msg = "Snapshot with id %s has an empty list" " of %s!" % (
             snapshot_id,
             branch_type_plural,
         )
     elif snapshot_id:
         msg = "%s %s for snapshot with id %s" " not found!" % (
             branch_type,
             branch,
             snapshot_id,
         )
     elif visit_id and snapshot_sizes[target_type] == 0:
         msg = (
             "Origin with url %s"
             " for visit with id %s has an empty list"
             " of %s!" % (origin_info["url"], visit_id, branch_type_plural)
         )
     elif visit_id:
         msg = (
             "%s %s associated to visit with"
             " id %s for origin with url %s"
             " not found!" % (branch_type, branch, visit_id, origin_info["url"])
         )
     elif snapshot_sizes[target_type] == 0:
         msg = (
             "Origin with url %s"
             " for visit with timestamp %s has an empty list"
             " of %s!" % (origin_info["url"], timestamp, branch_type_plural)
         )
     else:
         msg = (
             "%s %s associated to visit with"
             " timestamp %s for origin with "
             "url %s not found!" % (branch_type, branch, timestamp, origin_info["url"])
         )
     raise NotFoundExc(escape(msg))
 
 
 def process_snapshot_branches(
     snapshot: Dict[str, Any]
 ) -> Tuple[List[SnapshotBranchInfo], List[SnapshotReleaseInfo], Dict[str, Any]]:
     """
     Process a dictionary describing snapshot branches: extract those
     targeting revisions and releases, put them in two different lists,
     then sort those lists in lexicographical order of the branches' names.
 
     Args:
         snapshot: A dict describing a snapshot as returned for instance by
             :func:`swh.web.common.archive.lookup_snapshot`
 
     Returns:
         A tuple whose first member is the sorted list of branches
         targeting revisions, second member the sorted list of branches
         targeting releases and third member a dict mapping resolved branch
         aliases to their real target.
     """
     snapshot_branches = snapshot["branches"]
     branches: Dict[str, SnapshotBranchInfo] = {}
     branch_aliases: Dict[str, str] = {}
     releases: Dict[str, SnapshotReleaseInfo] = {}
     revision_to_branch = defaultdict(set)
     revision_to_release = defaultdict(set)
     release_to_branch = defaultdict(set)
     for branch_name, target in snapshot_branches.items():
         if not target:
             # FIXME: display branches with an unknown target anyway
             continue
         target_id = target["target"]
         target_type = target["target_type"]
         if target_type == "revision":
             branches[branch_name] = SnapshotBranchInfo(
                 name=branch_name,
                 alias=False,
                 revision=target_id,
                 date=None,
                 directory=None,
                 message=None,
                 url=None,
             )
             revision_to_branch[target_id].add(branch_name)
         elif target_type == "release":
             release_to_branch[target_id].add(branch_name)
         elif target_type == "alias":
             branch_aliases[branch_name] = target_id
         # FIXME: handle pointers to other object types
 
     def _add_release_info(branch, release, alias=False):
         releases[branch] = SnapshotReleaseInfo(
             name=release["name"],
             alias=alias,
             branch_name=branch,
             date=format_utc_iso_date(release["date"]),
             directory=None,
             id=release["id"],
             message=release["message"],
             target_type=release["target_type"],
             target=release["target"],
             url=None,
         )
 
     def _add_branch_info(branch, revision, alias=False):
         branches[branch] = SnapshotBranchInfo(
             name=branch,
             alias=alias,
             revision=revision["id"],
             directory=revision["directory"],
             date=format_utc_iso_date(revision["date"]),
             message=revision["message"],
             url=None,
         )
 
     releases_info = archive.lookup_release_multiple(release_to_branch.keys())
     for release in releases_info:
         if release is None:
             continue
         branches_to_update = release_to_branch[release["id"]]
         for branch in branches_to_update:
             _add_release_info(branch, release)
         if release["target_type"] == "revision":
             revision_to_release[release["target"]].update(branches_to_update)
 
     revisions = archive.lookup_revision_multiple(
         set(revision_to_branch.keys()) | set(revision_to_release.keys())
     )
 
     for revision in revisions:
         if not revision:
             continue
         for branch in revision_to_branch[revision["id"]]:
             _add_branch_info(branch, revision)
         for release_id in revision_to_release[revision["id"]]:
             releases[release_id]["directory"] = revision["directory"]
 
     resolved_aliases = {}
 
     for branch_alias, branch_target in branch_aliases.items():
         resolved_alias = archive.lookup_snapshot_alias(snapshot["id"], branch_alias)
         resolved_aliases[branch_alias] = resolved_alias
         if resolved_alias is None:
             continue
 
         target_type = resolved_alias["target_type"]
         target = resolved_alias["target"]
 
         if target_type == "revision":
             revision = archive.lookup_revision(target)
             _add_branch_info(branch_alias, revision, alias=True)
         elif target_type == "release":
             release = archive.lookup_release(target)
             _add_release_info(branch_alias, release, alias=True)
 
         if branch_alias in branches:
             branches[branch_alias]["name"] = branch_alias
 
     ret_branches = list(sorted(branches.values(), key=lambda b: b["name"]))
     ret_releases = list(sorted(releases.values(), key=lambda b: b["name"]))
 
     return ret_branches, ret_releases, resolved_aliases
 
 
+@django_cache()
 def get_snapshot_content(
     snapshot_id: str,
 ) -> Tuple[List[SnapshotBranchInfo], List[SnapshotReleaseInfo], Dict[str, Any]]:
     """Returns the lists of branches and releases
     associated to a swh snapshot.
     That list is put in  cache in order to speedup the navigation
     in the swh-web/browse ui.
 
     .. warning:: At most 1000 branches contained in the snapshot
         will be returned for performance reasons.
 
     Args:
         snapshot_id: hexadecimal representation of the snapshot identifier
 
     Returns:
         A tuple with three members. The first one is a list of dict describing
         the snapshot branches. The second one is a list of dict describing the
         snapshot releases. The third one is a dict mapping resolved branch
         aliases to their real target.
 
     Raises:
         NotFoundExc if the snapshot does not exist
     """
-    cache_entry_id = "swh_snapshot_%s" % snapshot_id
-    cache_entry = cache.get(cache_entry_id)
-
-    if cache_entry:
-        return (
-            cache_entry["branches"],
-            cache_entry["releases"],
-            cache_entry.get("aliases", {}),
-        )
 
     branches: List[SnapshotBranchInfo] = []
     releases: List[SnapshotReleaseInfo] = []
     aliases: Dict[str, Any] = {}
 
     snapshot_content_max_size = get_config()["snapshot_content_max_size"]
 
     if snapshot_id:
         snapshot = archive.lookup_snapshot(
             snapshot_id, branches_count=snapshot_content_max_size
         )
         branches, releases, aliases = process_snapshot_branches(snapshot)
 
-    cache.set(
-        cache_entry_id, {"branches": branches, "releases": releases, "aliases": aliases}
-    )
-
     return branches, releases, aliases
 
 
 def get_origin_visit_snapshot(
     origin_info: OriginInfo,
     visit_ts: Optional[str] = None,
     visit_id: Optional[int] = None,
     snapshot_id: Optional[str] = None,
 ) -> Tuple[List[SnapshotBranchInfo], List[SnapshotReleaseInfo], Dict[str, Any]]:
     """Returns the lists of branches and releases associated to an origin for
     a given visit.
 
     The visit is expressed by either:
 
         * a snapshot identifier
         * a timestamp, if no visit with that exact timestamp is found,
           the closest one from the provided timestamp will be used.
 
     If no visit parameter is provided, it returns the list of branches
     found for the latest visit.
 
     That list is put in  cache in order to speedup the navigation
     in the swh-web/browse ui.
 
     .. warning:: At most 1000 branches contained in the snapshot
         will be returned for performance reasons.
 
     Args:
         origin_info: a dict filled with origin information
         visit_ts: an ISO 8601 datetime string to parse
         visit_id: visit id for disambiguation in case several visits have
             the same timestamp
         snapshot_id: if provided, visit associated to the snapshot will be processed
 
     Returns:
         A tuple with three members. The first one is a list of dict describing
         the origin branches for the given visit.
         The second one is a list of dict describing the origin releases
         for the given visit. The third one is a dict mapping resolved branch
         aliases to their real target.
 
     Raises:
         NotFoundExc if the origin or its visit are not found
     """
 
     visit_info = get_origin_visit(origin_info, visit_ts, visit_id, snapshot_id)
 
     return get_snapshot_content(visit_info["snapshot"])
 
 
 def get_snapshot_context(
     snapshot_id: Optional[str] = None,
     origin_url: Optional[str] = None,
     timestamp: Optional[str] = None,
     visit_id: Optional[int] = None,
     branch_name: Optional[str] = None,
     release_name: Optional[str] = None,
     revision_id: Optional[str] = None,
     path: Optional[str] = None,
     browse_context: str = "directory",
 ) -> SnapshotContext:
     """
     Utility function to compute relevant information when navigating
     the archive in a snapshot context. The snapshot is either
     referenced by its id or it will be retrieved from an origin visit.
 
     Args:
         snapshot_id: hexadecimal representation of a snapshot identifier
         origin_url: an origin_url
         timestamp: a datetime string for retrieving the closest
             visit of the origin
         visit_id: optional visit id for disambiguation in case
             of several visits with the same timestamp
         branch_name: optional branch name set when browsing the snapshot in
             that scope (will default to "HEAD" if not provided)
         release_name: optional release name set when browsing the snapshot in
             that scope
         revision_id: optional revision identifier set when browsing the snapshot in
             that scope
         path: optional path of the object currently browsed in the snapshot
         browse_context: indicates which type of object is currently browsed
 
     Returns:
         A dict filled with snapshot context information.
 
     Raises:
         swh.web.common.exc.NotFoundExc: if no snapshot is found for the visit
             of an origin.
     """
     assert origin_url is not None or snapshot_id is not None
     origin_info = None
     visit_info = None
     url_args = {}
     query_params: Dict[str, Any] = {}
     origin_visits_url = None
 
     if origin_url:
 
         if visit_id is not None:
             query_params["visit_id"] = visit_id
         elif snapshot_id is not None:
             query_params["snapshot"] = snapshot_id
 
         origin_info = archive.lookup_origin({"url": origin_url})
 
         visit_info = get_origin_visit(origin_info, timestamp, visit_id, snapshot_id)
         formatted_date = format_utc_iso_date(visit_info["date"])
         visit_info["formatted_date"] = formatted_date
         snapshot_id = visit_info["snapshot"]
 
         if not snapshot_id:
             raise NotFoundExc(
                 "No snapshot associated to the visit of origin "
                 "%s on %s" % (escape(origin_url), formatted_date)
             )
 
         # provided timestamp is not necessarily equals to the one
         # of the retrieved visit, so get the exact one in order
         # to use it in the urls generated below
         if timestamp:
             timestamp = visit_info["date"]
 
         branches, releases, aliases = get_origin_visit_snapshot(
             origin_info, timestamp, visit_id, snapshot_id
         )
 
         query_params["origin_url"] = origin_info["url"]
 
         origin_visits_url = reverse(
             "browse-origin-visits", query_params={"origin_url": origin_info["url"]}
         )
 
         if timestamp is not None:
             query_params["timestamp"] = format_utc_iso_date(
                 timestamp, "%Y-%m-%dT%H:%M:%SZ"
             )
 
         visit_url = reverse("browse-origin-directory", query_params=query_params)
         visit_info["url"] = directory_url = visit_url
 
         branches_url = reverse("browse-origin-branches", query_params=query_params)
 
         releases_url = reverse("browse-origin-releases", query_params=query_params)
     else:
         assert snapshot_id is not None
         branches, releases, aliases = get_snapshot_content(snapshot_id)
         url_args = {"snapshot_id": snapshot_id}
         directory_url = reverse("browse-snapshot-directory", url_args=url_args)
         branches_url = reverse("browse-snapshot-branches", url_args=url_args)
 
         releases_url = reverse("browse-snapshot-releases", url_args=url_args)
 
     releases = list(reversed(releases))
 
-    snapshot_sizes_cache_id = f"swh_snapshot_{snapshot_id}_sizes"
-    snapshot_sizes = cache.get(snapshot_sizes_cache_id)
-    if snapshot_sizes is None:
-        snapshot_sizes = archive.lookup_snapshot_sizes(snapshot_id)
-        cache.set(snapshot_sizes_cache_id, snapshot_sizes)
+    @django_cache()
+    def _get_snapshot_sizes(snapshot_id):
+        return archive.lookup_snapshot_sizes(snapshot_id)
+
+    snapshot_sizes = _get_snapshot_sizes(snapshot_id)
 
     is_empty = (snapshot_sizes["release"] + snapshot_sizes["revision"]) == 0
 
     swh_snp_id = str(
         CoreSWHID(object_type=ObjectType.SNAPSHOT, object_id=hash_to_bytes(snapshot_id))
     )
 
     if visit_info:
         timestamp = format_utc_iso_date(visit_info["date"])
 
     if origin_info:
         browse_view_name = f"browse-origin-{browse_context}"
     else:
         browse_view_name = f"browse-snapshot-{browse_context}"
 
     release_id = None
     root_directory = None
 
     snapshot_total_size = snapshot_sizes["release"] + snapshot_sizes["revision"]
 
     if path is not None:
         query_params["path"] = path
 
     if snapshot_total_size and revision_id is not None:
         # browse specific revision for a snapshot requested
         revision = archive.lookup_revision(revision_id)
         root_directory = revision["directory"]
         branches.append(
             SnapshotBranchInfo(
                 name=revision_id,
                 alias=False,
                 revision=revision_id,
                 directory=root_directory,
                 date=revision["date"],
                 message=revision["message"],
                 url=None,
             )
         )
         query_params["revision"] = revision_id
     elif snapshot_total_size and release_name:
         # browse specific release for a snapshot requested
         release = _get_release(releases, release_name, snapshot_id)
         if release is None:
             _branch_not_found(
                 "release",
                 release_name,
                 snapshot_id,
                 snapshot_sizes,
                 origin_info,
                 timestamp,
                 visit_id,
             )
         else:
             if release["target_type"] == "revision":
                 revision = archive.lookup_revision(release["target"])
                 root_directory = revision["directory"]
                 revision_id = release["target"]
             elif release["target_type"] == "directory":
                 root_directory = release["target"]
             release_id = release["id"]
             query_params["release"] = release_name
     elif snapshot_total_size:
         head = aliases.get("HEAD")
         if branch_name:
             # browse specific branch for a snapshot requested
             query_params["branch"] = branch_name
             branch = _get_branch(branches, branch_name, snapshot_id)
             if branch is None:
                 _branch_not_found(
                     "branch",
                     branch_name,
                     snapshot_id,
                     snapshot_sizes,
                     origin_info,
                     timestamp,
                     visit_id,
                 )
             else:
                 branch_name = branch["name"]
                 revision_id = branch["revision"]
                 root_directory = branch["directory"]
         elif head is not None:
             # otherwise, browse branch targeted by the HEAD alias if it exists
             if head["target_type"] == "revision":
                 # HEAD alias targets a revision
                 head_rev = archive.lookup_revision(head["target"])
                 branch_name = "HEAD"
                 revision_id = head_rev["id"]
                 root_directory = head_rev["directory"]
             else:
                 # HEAD alias targets a release
                 release_name = archive.lookup_release(head["target"])["name"]
                 head_rel = _get_release(releases, release_name, snapshot_id)
                 if head_rel["target_type"] == "revision":
                     revision = archive.lookup_revision(head_rel["target"])
                     root_directory = revision["directory"]
                     revision_id = head_rel["target"]
                 elif head_rel["target_type"] == "directory":
                     root_directory = head_rel["target"]
                 release_id = head_rel["id"]
         elif branches:
             # fallback to browse first branch otherwise
             branch = branches[0]
             branch_name = branch["name"]
             revision_id = branch["revision"]
             root_directory = branch["directory"]
         elif releases:
             # fallback to browse last release otherwise
             release = releases[-1]
             if release["target_type"] == "revision":
                 revision = archive.lookup_revision(release["target"])
                 root_directory = revision["directory"]
                 revision_id = release["target"]
             elif release["target_type"] == "directory":
                 root_directory = release["target"]
             release_id = release["id"]
             release_name = release["name"]
 
     for b in branches:
         branch_query_params = dict(query_params)
         branch_query_params.pop("release", None)
         if b["name"] != b["revision"]:
             branch_query_params.pop("revision", None)
             branch_query_params["branch"] = b["name"]
         b["url"] = reverse(
             browse_view_name, url_args=url_args, query_params=branch_query_params
         )
 
     for r in releases:
         release_query_params = dict(query_params)
         release_query_params.pop("branch", None)
         release_query_params.pop("revision", None)
         release_query_params["release"] = r["name"]
         r["url"] = reverse(
             browse_view_name, url_args=url_args, query_params=release_query_params,
         )
 
     revision_info = None
     if revision_id:
         try:
             revision_info = archive.lookup_revision(revision_id)
         except NotFoundExc:
             pass
         else:
             revision_info["date"] = format_utc_iso_date(revision_info["date"])
             revision_info["committer_date"] = format_utc_iso_date(
                 revision_info["committer_date"]
             )
             if revision_info["message"]:
                 message_lines = revision_info["message"].split("\n")
                 revision_info["message_header"] = message_lines[0]
             else:
                 revision_info["message_header"] = ""
 
     snapshot_context = SnapshotContext(
         directory_url=directory_url,
         branch=branch_name,
         branch_alias=branch_name in aliases,
         branches=branches,
         branches_url=branches_url,
         is_empty=is_empty,
         origin_info=origin_info,
         origin_visits_url=origin_visits_url,
         release=release_name,
         release_alias=release_name in aliases,
         release_id=release_id,
         query_params=query_params,
         releases=releases,
         releases_url=releases_url,
         revision_id=revision_id,
         revision_info=revision_info,
         root_directory=root_directory,
         snapshot_id=snapshot_id,
         snapshot_sizes=snapshot_sizes,
         snapshot_swhid=swh_snp_id,
         url_args=url_args,
         visit_info=visit_info,
     )
 
     if revision_info:
         revision_info["revision_url"] = gen_revision_url(revision_id, snapshot_context)
 
     return snapshot_context
 
 
 def _build_breadcrumbs(snapshot_context: SnapshotContext, path: str):
     origin_info = snapshot_context["origin_info"]
     url_args = snapshot_context["url_args"]
     query_params = dict(snapshot_context["query_params"])
     root_directory = snapshot_context["root_directory"]
 
     path_info = gen_path_info(path)
 
     if origin_info:
         browse_view_name = "browse-origin-directory"
     else:
         browse_view_name = "browse-snapshot-directory"
 
     breadcrumbs = []
     if root_directory:
         query_params.pop("path", None)
         breadcrumbs.append(
             {
                 "name": root_directory[:7],
                 "url": reverse(
                     browse_view_name, url_args=url_args, query_params=query_params
                 ),
             }
         )
     for pi in path_info:
         query_params["path"] = pi["path"]
         breadcrumbs.append(
             {
                 "name": pi["name"],
                 "url": reverse(
                     browse_view_name, url_args=url_args, query_params=query_params
                 ),
             }
         )
     return breadcrumbs
 
 
 def _check_origin_url(snapshot_id, origin_url):
     if snapshot_id is None and origin_url is None:
         raise BadInputExc("An origin URL must be provided as query parameter.")
 
 
 def browse_snapshot_directory(
     request, snapshot_id=None, origin_url=None, timestamp=None, path=None
 ):
     """
     Django view implementation for browsing a directory in a snapshot context.
     """
     _check_origin_url(snapshot_id, origin_url)
 
     visit_id = int(request.GET.get("visit_id", 0))
     snapshot_context = get_snapshot_context(
         snapshot_id=snapshot_id,
         origin_url=origin_url,
         timestamp=timestamp,
         visit_id=visit_id or None,
         path=path,
         browse_context="directory",
         branch_name=request.GET.get("branch"),
         release_name=request.GET.get("release"),
         revision_id=request.GET.get("revision"),
     )
 
     root_directory = snapshot_context["root_directory"]
     sha1_git = root_directory
     error_info = {
         "status_code": 200,
         "description": None,
     }
     if root_directory and path:
         try:
             dir_info = archive.lookup_directory_with_path(root_directory, path)
             sha1_git = dir_info["target"]
         except NotFoundExc as e:
             sha1_git = None
             error_info["status_code"] = 404
             error_info["description"] = f"NotFoundExc: {str(e)}"
 
     dirs = []
     files = []
     if sha1_git:
         dirs, files = get_directory_entries(sha1_git)
 
     origin_info = snapshot_context["origin_info"]
     visit_info = snapshot_context["visit_info"]
     url_args = snapshot_context["url_args"]
     query_params = dict(snapshot_context["query_params"])
     revision_id = snapshot_context["revision_id"]
     snapshot_id = snapshot_context["snapshot_id"]
 
     if origin_info:
         browse_view_name = "browse-origin-directory"
     else:
         browse_view_name = "browse-snapshot-directory"
 
     breadcrumbs = _build_breadcrumbs(snapshot_context, path)
 
     path = "" if path is None else (path + "/")
 
     for d in dirs:
         if d["type"] == "rev":
             d["url"] = reverse("browse-revision", url_args={"sha1_git": d["target"]})
         else:
             query_params["path"] = path + d["name"]
             d["url"] = reverse(
                 browse_view_name, url_args=url_args, query_params=query_params
             )
 
     sum_file_sizes = 0
 
     readmes = {}
 
     if origin_info:
         browse_view_name = "browse-origin-content"
     else:
         browse_view_name = "browse-snapshot-content"
 
     for f in files:
         query_params["path"] = path + f["name"]
         f["url"] = reverse(
             browse_view_name, url_args=url_args, query_params=query_params
         )
         if f["length"] is not None:
             sum_file_sizes += f["length"]
         if f["name"].lower().startswith("readme"):
             readmes[f["name"]] = f["checksums"]["sha1"]
 
     readme_name, readme_url, readme_html = get_readme_to_display(readmes)
 
     if origin_info:
         browse_view_name = "browse-origin-log"
     else:
         browse_view_name = "browse-snapshot-log"
 
     history_url = None
     if snapshot_id != _empty_snapshot_id:
         query_params.pop("path", None)
         history_url = reverse(
             browse_view_name, url_args=url_args, query_params=query_params
         )
 
     nb_files = None
     nb_dirs = None
     dir_path = None
     if root_directory:
         nb_files = len(files)
         nb_dirs = len(dirs)
         dir_path = "/" + path
 
     swh_objects = []
     vault_cooking = {}
     revision_found = False
 
     if revision_id is not None:
         try:
             archive.lookup_revision(revision_id)
         except NotFoundExc:
             pass
         else:
             revision_found = True
 
     if sha1_git is not None:
         swh_objects.append(
             SWHObjectInfo(object_type=ObjectType.DIRECTORY, object_id=sha1_git)
         )
         vault_cooking.update(
             {"directory_context": True, "directory_swhid": f"swh:1:dir:{sha1_git}",}
         )
     if revision_id is not None and revision_found:
         swh_objects.append(
             SWHObjectInfo(object_type=ObjectType.REVISION, object_id=revision_id)
         )
         vault_cooking.update(
             {"revision_context": True, "revision_swhid": f"swh:1:rev:{revision_id}",}
         )
     swh_objects.append(
         SWHObjectInfo(object_type=ObjectType.SNAPSHOT, object_id=snapshot_id)
     )
 
     visit_date = None
     visit_type = None
     if visit_info:
         visit_date = format_utc_iso_date(visit_info["date"])
         visit_type = visit_info["type"]
 
     release_id = snapshot_context["release_id"]
     if release_id:
         swh_objects.append(
             SWHObjectInfo(object_type=ObjectType.RELEASE, object_id=release_id)
         )
 
     dir_metadata = DirectoryMetadata(
         object_type=ObjectType.DIRECTORY,
         object_id=sha1_git,
         directory=sha1_git,
         nb_files=nb_files,
         nb_dirs=nb_dirs,
         sum_file_sizes=sum_file_sizes,
         root_directory=root_directory,
         path=dir_path,
         revision=revision_id,
         revision_found=revision_found,
         release=release_id,
         snapshot=snapshot_id,
         origin_url=origin_url,
         visit_date=visit_date,
         visit_type=visit_type,
     )
 
     swhids_info = get_swhids_info(swh_objects, snapshot_context, dir_metadata)
 
     dir_path = "/".join([bc["name"] for bc in breadcrumbs]) + "/"
     context_found = "snapshot: %s" % snapshot_context["snapshot_id"]
     if origin_info:
         context_found = "origin: %s" % origin_info["url"]
     heading = "Directory - %s - %s - %s" % (
         dir_path,
         snapshot_context["branch"],
         context_found,
     )
 
     top_right_link = None
     if not snapshot_context["is_empty"] and revision_found:
         top_right_link = {
             "url": history_url,
             "icon": swh_object_icons["revisions history"],
             "text": "History",
         }
 
     return render(
         request,
         "browse/directory.html",
         {
             "heading": heading,
             "swh_object_name": "Directory",
             "swh_object_metadata": dir_metadata,
             "dirs": dirs,
             "files": files,
             "breadcrumbs": breadcrumbs if root_directory else [],
             "top_right_link": top_right_link,
             "readme_name": readme_name,
             "readme_url": readme_url,
             "readme_html": readme_html,
             "snapshot_context": snapshot_context,
             "vault_cooking": vault_cooking,
             "show_actions": True,
             "swhids_info": swhids_info,
             "error_code": error_info["status_code"],
             "error_message": http_status_code_message.get(error_info["status_code"]),
             "error_description": error_info["description"],
         },
         status=error_info["status_code"],
     )
 
 
 PER_PAGE = 100
 
 
 def browse_snapshot_log(request, snapshot_id=None, origin_url=None, timestamp=None):
     """
     Django view implementation for browsing a revision history in a
     snapshot context.
     """
     _check_origin_url(snapshot_id, origin_url)
 
     visit_id = int(request.GET.get("visit_id", 0))
     snapshot_context = get_snapshot_context(
         snapshot_id=snapshot_id,
         origin_url=origin_url,
         timestamp=timestamp,
         visit_id=visit_id or None,
         browse_context="log",
         branch_name=request.GET.get("branch"),
         release_name=request.GET.get("release"),
         revision_id=request.GET.get("revision"),
     )
 
     revision_id = snapshot_context["revision_id"]
 
     if revision_id is None:
         raise NotFoundExc("No revisions history found in the current snapshot context.")
 
     per_page = int(request.GET.get("per_page", PER_PAGE))
     offset = int(request.GET.get("offset", 0))
     revs_ordering = request.GET.get("revs_ordering", "committer_date")
     session_key = "rev_%s_log_ordering_%s" % (revision_id, revs_ordering)
     rev_log_session = request.session.get(session_key, None)
     rev_log = []
     revs_walker_state = None
     if rev_log_session:
         rev_log = rev_log_session["rev_log"]
         revs_walker_state = rev_log_session["revs_walker_state"]
 
     if len(rev_log) < offset + per_page:
         revs_walker = archive.get_revisions_walker(
             revs_ordering,
             revision_id,
             max_revs=offset + per_page + 1,
             state=revs_walker_state,
         )
         rev_log += [rev["id"] for rev in revs_walker]
         revs_walker_state = revs_walker.export_state()
 
     revs = rev_log[offset : offset + per_page]
     revision_log = archive.lookup_revision_multiple(revs)
 
     request.session[session_key] = {
         "rev_log": rev_log,
         "revs_walker_state": revs_walker_state,
     }
 
     origin_info = snapshot_context["origin_info"]
     visit_info = snapshot_context["visit_info"]
     url_args = snapshot_context["url_args"]
     query_params = snapshot_context["query_params"]
     snapshot_id = snapshot_context["snapshot_id"]
 
     query_params["per_page"] = per_page
     revs_ordering = request.GET.get("revs_ordering", "")
     query_params["revs_ordering"] = revs_ordering or None
 
     if origin_info:
         browse_view_name = "browse-origin-log"
     else:
         browse_view_name = "browse-snapshot-log"
 
     prev_log_url = None
     if len(rev_log) > offset + per_page:
         query_params["offset"] = offset + per_page
         prev_log_url = reverse(
             browse_view_name, url_args=url_args, query_params=query_params
         )
 
     next_log_url = None
     if offset != 0:
         query_params["offset"] = offset - per_page
         next_log_url = reverse(
             browse_view_name, url_args=url_args, query_params=query_params
         )
 
     revision_log_data = format_log_entries(revision_log, per_page, snapshot_context)
 
     browse_rev_link = gen_revision_link(revision_id)
 
     browse_log_link = gen_revision_log_link(revision_id)
 
     browse_snp_link = gen_snapshot_link(snapshot_id)
 
     revision_metadata = {
         "context-independent revision": browse_rev_link,
         "context-independent revision history": browse_log_link,
         "context-independent snapshot": browse_snp_link,
         "snapshot": snapshot_id,
     }
 
     if origin_info:
         revision_metadata["origin url"] = origin_info["url"]
         revision_metadata["origin visit date"] = format_utc_iso_date(visit_info["date"])
         revision_metadata["origin visit type"] = visit_info["type"]
 
     swh_objects = [
         SWHObjectInfo(object_type=ObjectType.REVISION, object_id=revision_id),
         SWHObjectInfo(object_type=ObjectType.SNAPSHOT, object_id=snapshot_id),
     ]
 
     release_id = snapshot_context["release_id"]
     if release_id:
         swh_objects.append(
             SWHObjectInfo(object_type=ObjectType.RELEASE, object_id=release_id)
         )
         browse_rel_link = gen_release_link(release_id)
         revision_metadata["release"] = release_id
         revision_metadata["context-independent release"] = browse_rel_link
 
     swhids_info = get_swhids_info(swh_objects, snapshot_context)
 
     context_found = "snapshot: %s" % snapshot_context["snapshot_id"]
     if origin_info:
         context_found = "origin: %s" % origin_info["url"]
     heading = "Revision history - %s - %s" % (snapshot_context["branch"], context_found)
 
     return render(
         request,
         "browse/revision-log.html",
         {
             "heading": heading,
             "swh_object_name": "Revisions history",
             "swh_object_metadata": revision_metadata,
             "revision_log": revision_log_data,
             "revs_ordering": revs_ordering,
             "next_log_url": next_log_url,
             "prev_log_url": prev_log_url,
             "breadcrumbs": None,
             "top_right_link": None,
             "snapshot_context": snapshot_context,
             "vault_cooking": None,
             "show_actions": True,
             "swhids_info": swhids_info,
         },
     )
 
 
 def browse_snapshot_branches(
     request, snapshot_id=None, origin_url=None, timestamp=None, branch_name_include=None
 ):
     """
     Django view implementation for browsing a list of branches in a snapshot
     context.
     """
     _check_origin_url(snapshot_id, origin_url)
 
     visit_id = int(request.GET.get("visit_id", 0))
     snapshot_context = get_snapshot_context(
         snapshot_id=snapshot_id,
         origin_url=origin_url,
         timestamp=timestamp,
         visit_id=visit_id or None,
     )
 
     branches_bc = request.GET.get("branches_breadcrumbs", "")
     branches_bc = branches_bc.split(",") if branches_bc else []
     branches_from = branches_bc[-1] if branches_bc else ""
 
     origin_info = snapshot_context["origin_info"]
     url_args = snapshot_context["url_args"]
     query_params = snapshot_context["query_params"]
 
     if origin_info:
         browse_view_name = "browse-origin-directory"
     else:
         browse_view_name = "browse-snapshot-directory"
 
     snapshot = archive.lookup_snapshot(
         snapshot_context["snapshot_id"],
         branches_from,
         PER_PAGE + 1,
         target_types=["revision", "alias"],
         branch_name_include_substring=branch_name_include,
     )
     displayed_branches = []
     if snapshot:
         displayed_branches, _, _ = process_snapshot_branches(snapshot)
 
     for branch in displayed_branches:
         rev_query_params = {}
         if origin_info:
             rev_query_params["origin_url"] = origin_info["url"]
 
         revision_url = reverse(
             "browse-revision",
             url_args={"sha1_git": branch["revision"]},
             query_params=query_params,
         )
 
         query_params["branch"] = branch["name"]
         directory_url = reverse(
             browse_view_name, url_args=url_args, query_params=query_params
         )
         del query_params["branch"]
         branch["revision_url"] = revision_url
         branch["directory_url"] = directory_url
 
     if origin_info:
         browse_view_name = "browse-origin-branches"
     else:
         browse_view_name = "browse-snapshot-branches"
 
     prev_branches_url = None
     next_branches_url = None
 
     if branches_bc:
         query_params_prev = dict(query_params)
 
         query_params_prev["branches_breadcrumbs"] = ",".join(branches_bc[:-1])
         prev_branches_url = reverse(
             browse_view_name, url_args=url_args, query_params=query_params_prev
         )
     elif branches_from:
         prev_branches_url = reverse(
             browse_view_name, url_args=url_args, query_params=query_params
         )
 
     if snapshot and snapshot["next_branch"] is not None:
         query_params_next = dict(query_params)
         next_branch = displayed_branches[-1]["name"]
         del displayed_branches[-1]
         branches_bc.append(next_branch)
         query_params_next["branches_breadcrumbs"] = ",".join(branches_bc)
         next_branches_url = reverse(
             browse_view_name, url_args=url_args, query_params=query_params_next
         )
 
     heading = "Branches - "
     if origin_info:
         heading += "origin: %s" % origin_info["url"]
     else:
         heading += "snapshot: %s" % snapshot_id
 
     return render(
         request,
         "browse/branches.html",
         {
             "heading": heading,
             "swh_object_name": "Branches",
             "swh_object_metadata": {},
             "top_right_link": None,
             "displayed_branches": displayed_branches,
             "prev_branches_url": prev_branches_url,
             "next_branches_url": next_branches_url,
             "snapshot_context": snapshot_context,
             "search_string": branch_name_include or "",
         },
     )
 
 
 def browse_snapshot_releases(
     request,
     snapshot_id=None,
     origin_url=None,
     timestamp=None,
     release_name_include=None,
 ):
     """
     Django view implementation for browsing a list of releases in a snapshot
     context.
     """
     _check_origin_url(snapshot_id, origin_url)
 
     visit_id = int(request.GET.get("visit_id", 0))
     snapshot_context = get_snapshot_context(
         snapshot_id=snapshot_id,
         origin_url=origin_url,
         timestamp=timestamp,
         visit_id=visit_id or None,
     )
 
     rel_bc = request.GET.get("releases_breadcrumbs", "")
     rel_bc = rel_bc.split(",") if rel_bc else []
     rel_from = rel_bc[-1] if rel_bc else ""
 
     origin_info = snapshot_context["origin_info"]
     url_args = snapshot_context["url_args"]
     query_params = snapshot_context["query_params"]
 
     snapshot = archive.lookup_snapshot(
         snapshot_context["snapshot_id"],
         rel_from,
         PER_PAGE + 1,
         target_types=["release", "alias"],
         branch_name_include_substring=release_name_include,
     )
     displayed_releases = []
     if snapshot:
         _, displayed_releases, _ = process_snapshot_branches(snapshot)
 
     for release in displayed_releases:
         query_params_tgt = {"snapshot": snapshot_id, "release": release["name"]}
         if origin_info:
             query_params_tgt["origin_url"] = origin_info["url"]
 
         release_url = reverse(
             "browse-release",
             url_args={"sha1_git": release["id"]},
             query_params=query_params_tgt,
         )
 
         target_url = ""
         tooltip = (
             f"The release {release['name']} targets "
             f"{release['target_type']} {release['target']}"
         )
         if release["target_type"] == "revision":
             target_url = reverse(
                 "browse-revision",
                 url_args={"sha1_git": release["target"]},
                 query_params=query_params_tgt,
             )
         elif release["target_type"] == "directory":
             target_url = reverse(
                 "browse-directory",
                 url_args={"sha1_git": release["target"]},
                 query_params=query_params_tgt,
             )
         elif release["target_type"] == "content":
             target_url = reverse(
                 "browse-content",
                 url_args={"query_string": release["target"]},
                 query_params=query_params_tgt,
             )
         elif release["target_type"] == "release":
             target_url = reverse(
                 "browse-release",
                 url_args={"sha1_git": release["target"]},
                 query_params=query_params_tgt,
             )
             tooltip = (
                 f"The release {release['name']} "
                 f"is an alias for release {release['target']}"
             )
 
         release["release_url"] = release_url
         release["target_url"] = target_url
         release["tooltip"] = tooltip
 
     if origin_info:
         browse_view_name = "browse-origin-releases"
     else:
         browse_view_name = "browse-snapshot-releases"
 
     prev_releases_url = None
     next_releases_url = None
 
     if rel_bc:
         query_params_prev = dict(query_params)
 
         query_params_prev["releases_breadcrumbs"] = ",".join(rel_bc[:-1])
         prev_releases_url = reverse(
             browse_view_name, url_args=url_args, query_params=query_params_prev
         )
     elif rel_from:
         prev_releases_url = reverse(
             browse_view_name, url_args=url_args, query_params=query_params
         )
 
     if snapshot and snapshot["next_branch"] is not None:
         query_params_next = dict(query_params)
         next_rel = displayed_releases[-1]["branch_name"]
         del displayed_releases[-1]
         rel_bc.append(next_rel)
         query_params_next["releases_breadcrumbs"] = ",".join(rel_bc)
         next_releases_url = reverse(
             browse_view_name, url_args=url_args, query_params=query_params_next
         )
 
     heading = "Releases - "
     if origin_info:
         heading += "origin: %s" % origin_info["url"]
     else:
         heading += "snapshot: %s" % snapshot_id
 
     return render(
         request,
         "browse/releases.html",
         {
             "heading": heading,
             "top_panel_visible": False,
             "top_panel_collapsible": False,
             "swh_object_name": "Releases",
             "swh_object_metadata": {},
             "top_right_link": None,
             "displayed_releases": displayed_releases,
             "prev_releases_url": prev_releases_url,
             "next_releases_url": next_releases_url,
             "snapshot_context": snapshot_context,
             "vault_cooking": None,
             "show_actions": False,
             "search_string": release_name_include or "",
         },
     )
diff --git a/swh/web/browse/utils.py b/swh/web/browse/utils.py
index c82f1d60..a10cec56 100644
--- a/swh/web/browse/utils.py
+++ b/swh/web/browse/utils.py
@@ -1,734 +1,724 @@
 # Copyright (C) 2017-2022  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU Affero General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import base64
 import stat
 import textwrap
 from typing import Tuple
 
 import chardet
 import magic
 import sentry_sdk
 
-from django.core.cache import cache
 from django.utils.html import escape
 from django.utils.safestring import mark_safe
 
 from swh.web.common import archive, highlightjs
 from swh.web.common.exc import NotFoundExc
 from swh.web.common.utils import (
     browsers_supported_image_mimes,
+    django_cache,
     format_utc_iso_date,
     reverse,
     rst_to_html,
 )
 from swh.web.config import get_config
 
 
+@django_cache()
 def get_directory_entries(sha1_git):
     """Function that retrieves the content of a directory
     from the archive.
 
     The directories entries are first sorted in lexicographical order.
     Sub-directories and regular files are then extracted.
 
     Args:
         sha1_git: sha1_git identifier of the directory
 
     Returns:
         A tuple whose first member corresponds to the sub-directories list
         and second member the regular files list
 
     Raises:
         NotFoundExc if the directory is not found
     """
-    cache_entry_id = "directory_entries_%s" % sha1_git
-    cache_entry = cache.get(cache_entry_id)
-
-    if cache_entry:
-        return cache_entry
-
     entries = list(archive.lookup_directory(sha1_git))
     for e in entries:
         e["perms"] = stat.filemode(e["perms"])
         if e["type"] == "rev":
             # modify dir entry name to explicitly show it points
             # to a revision
             e["name"] = "%s @ %s" % (e["name"], e["target"][:7])
 
     dirs = [e for e in entries if e["type"] in ("dir", "rev")]
     files = [e for e in entries if e["type"] == "file"]
 
     dirs = sorted(dirs, key=lambda d: d["name"])
     files = sorted(files, key=lambda f: f["name"])
 
-    cache.set(cache_entry_id, (dirs, files))
-
     return dirs, files
 
 
 def get_mimetype_and_encoding_for_content(content):
     """Function that returns the mime type and the encoding associated to
     a content buffer using the magic module under the hood.
 
     Args:
         content (bytes): a content buffer
 
     Returns:
         A tuple (mimetype, encoding), for instance ('text/plain', 'us-ascii'),
         associated to the provided content.
 
     """
     m = magic.Magic(mime=True, mime_encoding=True)
     mime_encoding = m.from_buffer(content)
     mime_type, encoding = mime_encoding.split(";")
     encoding = encoding.replace(" charset=", "")
     return mime_type, encoding
 
 
 # maximum authorized content size in bytes for HTML display
 # with code highlighting
 content_display_max_size = get_config()["content_display_max_size"]
 
 
 def re_encode_content(
     mimetype: str, encoding: str, content_data: bytes
 ) -> Tuple[str, str, bytes]:
     """Try to re-encode textual content if it is not encoded to UTF-8
     for proper display in the browse Web UI.
 
     Args:
         mimetype: content mimetype as detected by python-magic
         encoding: content encoding as detected by python-magic
         content_data: raw content bytes
 
     Returns:
         A tuple with 3 members: content mimetype, content encoding (possibly updated
         after processing), content raw bytes (possibly reencoded to UTF-8)
     """
     if mimetype.startswith("text/") and encoding not in ("us-ascii", "utf-8"):
         # first check if chardet detects an encoding with confidence
         result = chardet.detect(content_data)
         if result["confidence"] >= 0.9:
             encoding = result["encoding"]
             content_data = content_data.decode(encoding).encode("utf-8")
         elif encoding == "unknown-8bit":
             # probably a malformed UTF-8 content, re-encode it
             # by replacing invalid chars with a substitution one
             content_data = content_data.decode("utf-8", "replace").encode("utf-8")
         elif encoding not in ["utf-8", "binary"]:
             content_data = content_data.decode(encoding, "replace").encode("utf-8")
     elif mimetype.startswith("application/octet-stream"):
         # file may detect a text content as binary
         # so try to decode it for display
         encodings = ["us-ascii", "utf-8"]
         encodings += ["iso-8859-%s" % i for i in range(1, 17)]
         for enc in encodings:
             try:
                 content_data = content_data.decode(enc).encode("utf-8")
             except Exception as exc:
                 sentry_sdk.capture_exception(exc)
             else:
                 # ensure display in content view
                 encoding = enc
                 mimetype = "text/plain"
                 break
     return mimetype, encoding, content_data
 
 
 def request_content(
     query_string, max_size=content_display_max_size, re_encode=True,
 ):
     """Function that retrieves a content from the archive.
 
     Raw bytes content is first retrieved, then the content mime type.
     If the mime type is not stored in the archive, it will be computed
     using Python magic module.
 
     Args:
         query_string: a string of the form "[ALGO_HASH:]HASH" where
             optional ALGO_HASH can be either ``sha1``, ``sha1_git``,
             ``sha256``, or ``blake2s256`` (default to ``sha1``) and HASH
             the hexadecimal representation of the hash value
         max_size: the maximum size for a content to retrieve (default to 1MB,
             no size limit if None)
 
     Returns:
         A tuple whose first member corresponds to the content raw bytes
         and second member the content mime type
 
     Raises:
         NotFoundExc if the content is not found
     """
     content_data = archive.lookup_content(query_string)
     filetype = None
     language = None
     # requests to the indexer db may fail so properly handle
     # those cases in order to avoid content display errors
     try:
         filetype = archive.lookup_content_filetype(query_string)
         language = archive.lookup_content_language(query_string)
     except Exception as exc:
         sentry_sdk.capture_exception(exc)
     mimetype = "unknown"
     encoding = "unknown"
     if filetype:
         mimetype = filetype["mimetype"]
         encoding = filetype["encoding"]
 
     if not max_size or content_data["length"] < max_size:
         try:
             content_raw = archive.lookup_content_raw(query_string)
         except Exception as exc:
             sentry_sdk.capture_exception(exc)
             raise NotFoundExc(
                 "The bytes of the content are currently not available "
                 "in the archive."
             )
         else:
             content_data["raw_data"] = content_raw["data"]
 
             if not filetype:
                 mimetype, encoding = get_mimetype_and_encoding_for_content(
                     content_data["raw_data"]
                 )
 
             if re_encode:
                 mimetype, encoding, raw_data = re_encode_content(
                     mimetype, encoding, content_data["raw_data"]
                 )
                 content_data["raw_data"] = raw_data
 
     else:
         content_data["raw_data"] = None
 
     content_data["mimetype"] = mimetype
     content_data["encoding"] = encoding
 
     if language:
         content_data["language"] = language["lang"]
     else:
         content_data["language"] = "not detected"
 
     return content_data
 
 
 def prepare_content_for_display(content_data, mime_type, path):
     """Function that prepares a content for HTML display.
 
     The function tries to associate a programming language to a
     content in order to perform syntax highlighting client-side
     using highlightjs. The language is determined using either
     the content filename or its mime type.
     If the mime type corresponds to an image format supported
     by web browsers, the content will be encoded in base64
     for displaying the image.
 
     Args:
         content_data (bytes): raw bytes of the content
         mime_type (string): mime type of the content
         path (string): path of the content including filename
 
     Returns:
         A dict containing the content bytes (possibly different from the one
         provided as parameter if it is an image) under the key 'content_data
         and the corresponding highlightjs language class under the
         key 'language'.
     """
 
     language = None
     if path:
         language = highlightjs.get_hljs_language_from_filename(path.split("/")[-1])
 
     if language is None:
         language = highlightjs.get_hljs_language_from_mime_type(mime_type)
 
     if language is None:
         language = "plaintext"
 
     if mime_type.startswith("image/"):
         if mime_type in browsers_supported_image_mimes:
             content_data = base64.b64encode(content_data).decode("ascii")
 
     if mime_type.startswith("image/svg"):
         mime_type = "image/svg+xml"
 
     if mime_type.startswith("text/") or mime_type.startswith("application/"):
         content_data = content_data.decode("utf-8", errors="replace")
 
     return {"content_data": content_data, "language": language, "mimetype": mime_type}
 
 
 def gen_link(url, link_text=None, link_attrs=None):
     """
     Utility function for generating an HTML link to insert
     in Django templates.
 
     Args:
         url (str): an url
         link_text (str): optional text for the produced link,
             if not provided the url will be used
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         An HTML link in the form '<a href="url">link_text</a>'
 
     """
     attrs = " "
     if link_attrs:
         for k, v in link_attrs.items():
             attrs += '%s="%s" ' % (k, v)
     if not link_text:
         link_text = url
     link = '<a%shref="%s">%s</a>' % (attrs, escape(url), escape(link_text))
     return mark_safe(link)
 
 
 def _snapshot_context_query_params(snapshot_context):
     query_params = {}
     if not snapshot_context:
         return query_params
     if snapshot_context and snapshot_context["origin_info"]:
         origin_info = snapshot_context["origin_info"]
         snp_query_params = snapshot_context["query_params"]
         query_params = {"origin_url": origin_info["url"]}
         if "timestamp" in snp_query_params:
             query_params["timestamp"] = snp_query_params["timestamp"]
         if "visit_id" in snp_query_params:
             query_params["visit_id"] = snp_query_params["visit_id"]
         if "snapshot" in snp_query_params and "visit_id" not in query_params:
             query_params["snapshot"] = snp_query_params["snapshot"]
     elif snapshot_context:
         query_params = {"snapshot": snapshot_context["snapshot_id"]}
 
     if snapshot_context["release"]:
         query_params["release"] = snapshot_context["release"]
     elif snapshot_context["branch"] and snapshot_context["branch"] not in (
         "HEAD",
         snapshot_context["revision_id"],
     ):
         query_params["branch"] = snapshot_context["branch"]
     elif snapshot_context["revision_id"]:
         query_params["revision"] = snapshot_context["revision_id"]
     return query_params
 
 
 def gen_revision_url(revision_id, snapshot_context=None):
     """
     Utility function for generating an url to a revision.
 
     Args:
         revision_id (str): a revision id
         snapshot_context (dict): if provided, generate snapshot-dependent
             browsing url
 
     Returns:
         str: The url to browse the revision
 
     """
     query_params = _snapshot_context_query_params(snapshot_context)
     # remove query parameters not needed for a revision view
     query_params.pop("revision", None)
     query_params.pop("release", None)
 
     return reverse(
         "browse-revision", url_args={"sha1_git": revision_id}, query_params=query_params
     )
 
 
 def gen_revision_link(
     revision_id,
     shorten_id=False,
     snapshot_context=None,
     link_text="Browse",
     link_attrs={"class": "btn btn-default btn-sm", "role": "button"},
 ):
     """
     Utility function for generating a link to a revision HTML view
     to insert in Django templates.
 
     Args:
         revision_id (str): a revision id
         shorten_id (boolean): whether to shorten the revision id to 7
             characters for the link text
         snapshot_context (dict): if provided, generate snapshot-dependent
             browsing link
         link_text (str): optional text for the generated link
             (the revision id will be used by default)
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         str: An HTML link in the form '<a href="revision_url">revision_id</a>'
 
     """
     if not revision_id:
         return None
 
     revision_url = gen_revision_url(revision_id, snapshot_context)
 
     if shorten_id:
         return gen_link(revision_url, revision_id[:7], link_attrs)
     else:
         if not link_text:
             link_text = revision_id
         return gen_link(revision_url, link_text, link_attrs)
 
 
 def gen_directory_link(
     sha1_git,
     snapshot_context=None,
     link_text="Browse",
     link_attrs={"class": "btn btn-default btn-sm", "role": "button"},
 ):
     """
     Utility function for generating a link to a directory HTML view
     to insert in Django templates.
 
     Args:
         sha1_git (str): directory identifier
         link_text (str): optional text for the generated link
             (the directory id will be used by default)
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         An HTML link in the form '<a href="directory_view_url">link_text</a>'
 
     """
     if not sha1_git:
         return None
 
     query_params = _snapshot_context_query_params(snapshot_context)
 
     directory_url = reverse(
         "browse-directory", url_args={"sha1_git": sha1_git}, query_params=query_params
     )
 
     if not link_text:
         link_text = sha1_git
     return gen_link(directory_url, link_text, link_attrs)
 
 
 def gen_snapshot_link(
     snapshot_id,
     snapshot_context=None,
     link_text="Browse",
     link_attrs={"class": "btn btn-default btn-sm", "role": "button"},
 ):
     """
     Utility function for generating a link to a snapshot HTML view
     to insert in Django templates.
 
     Args:
         snapshot_id (str): snapshot identifier
         link_text (str): optional text for the generated link
             (the snapshot id will be used by default)
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         An HTML link in the form '<a href="snapshot_view_url">link_text</a>'
 
     """
 
     query_params = _snapshot_context_query_params(snapshot_context)
 
     snapshot_url = reverse(
         "browse-snapshot",
         url_args={"snapshot_id": snapshot_id},
         query_params=query_params,
     )
     if not link_text:
         link_text = snapshot_id
     return gen_link(snapshot_url, link_text, link_attrs)
 
 
 def gen_content_link(
     sha1_git,
     snapshot_context=None,
     link_text="Browse",
     link_attrs={"class": "btn btn-default btn-sm", "role": "button"},
 ):
     """
     Utility function for generating a link to a content HTML view
     to insert in Django templates.
 
     Args:
         sha1_git (str): content identifier
         link_text (str): optional text for the generated link
             (the content sha1_git will be used by default)
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         An HTML link in the form '<a href="content_view_url">link_text</a>'
 
     """
     if not sha1_git:
         return None
 
     query_params = _snapshot_context_query_params(snapshot_context)
 
     content_url = reverse(
         "browse-content",
         url_args={"query_string": "sha1_git:" + sha1_git},
         query_params=query_params,
     )
     if not link_text:
         link_text = sha1_git
     return gen_link(content_url, link_text, link_attrs)
 
 
 def get_revision_log_url(revision_id, snapshot_context=None):
     """
     Utility function for getting the URL for a revision log HTML view
     (possibly in the context of an origin).
 
     Args:
         revision_id (str): revision identifier the history heads to
         snapshot_context (dict): if provided, generate snapshot-dependent
             browsing link
     Returns:
         The revision log view URL
     """
     query_params = {}
     if snapshot_context:
         query_params = _snapshot_context_query_params(snapshot_context)
 
     query_params["revision"] = revision_id
     if snapshot_context and snapshot_context["origin_info"]:
         revision_log_url = reverse("browse-origin-log", query_params=query_params)
     elif snapshot_context:
         url_args = {"snapshot_id": snapshot_context["snapshot_id"]}
         del query_params["snapshot"]
         revision_log_url = reverse(
             "browse-snapshot-log", url_args=url_args, query_params=query_params
         )
     else:
         revision_log_url = reverse(
             "browse-revision-log", url_args={"sha1_git": revision_id}
         )
     return revision_log_url
 
 
 def gen_revision_log_link(
     revision_id,
     snapshot_context=None,
     link_text="Browse",
     link_attrs={"class": "btn btn-default btn-sm", "role": "button"},
 ):
     """
     Utility function for generating a link to a revision log HTML view
     (possibly in the context of an origin) to insert in Django templates.
 
     Args:
         revision_id (str): revision identifier the history heads to
         snapshot_context (dict): if provided, generate snapshot-dependent
             browsing link
         link_text (str): optional text to use for the generated link
             (the revision id will be used by default)
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         An HTML link in the form
         '<a href="revision_log_view_url">link_text</a>'
     """
     if not revision_id:
         return None
 
     revision_log_url = get_revision_log_url(revision_id, snapshot_context)
 
     if not link_text:
         link_text = revision_id
     return gen_link(revision_log_url, link_text, link_attrs)
 
 
 def gen_person_mail_link(person, link_text=None):
     """
     Utility function for generating a mail link to a person to insert
     in Django templates.
 
     Args:
         person (dict): dictionary containing person data
             (*name*, *email*, *fullname*)
         link_text (str): optional text to use for the generated mail link
             (the person name will be used by default)
 
     Returns:
         str: A mail link to the person or the person name if no email is
             present in person data
     """
     person_name = person["name"] or person["fullname"] or "None"
     if link_text is None:
         link_text = person_name
     person_email = person["email"] if person["email"] else None
     if person_email is None and "@" in person_name and " " not in person_name:
         person_email = person_name
     if person_email:
         return gen_link(url="mailto:%s" % person_email, link_text=link_text)
     else:
         return person_name
 
 
 def gen_release_link(
     sha1_git,
     snapshot_context=None,
     link_text="Browse",
     link_attrs={"class": "btn btn-default btn-sm", "role": "button"},
 ):
     """
     Utility function for generating a link to a release HTML view
     to insert in Django templates.
 
     Args:
         sha1_git (str): release identifier
         link_text (str): optional text for the generated link
             (the release id will be used by default)
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         An HTML link in the form '<a href="release_view_url">link_text</a>'
 
     """
 
     query_params = _snapshot_context_query_params(snapshot_context)
 
     release_url = reverse(
         "browse-release", url_args={"sha1_git": sha1_git}, query_params=query_params
     )
     if not link_text:
         link_text = sha1_git
     return gen_link(release_url, link_text, link_attrs)
 
 
 def format_log_entries(revision_log, per_page, snapshot_context=None):
     """
     Utility functions that process raw revision log data for HTML display.
     Its purpose is to:
 
         * add links to relevant browse views
         * format date in human readable format
         * truncate the message log
 
     Args:
         revision_log (list): raw revision log as returned by the swh-web api
         per_page (int): number of log entries per page
         snapshot_context (dict): if provided, generate snapshot-dependent
             browsing link
 
 
     """
     revision_log_data = []
     for i, rev in enumerate(revision_log):
         if i == per_page:
             break
         author_name = "None"
         author_fullname = "None"
         committer_fullname = "None"
         if rev["author"]:
             author_name = gen_person_mail_link(rev["author"])
             author_fullname = rev["author"]["fullname"]
         if rev["committer"]:
             committer_fullname = rev["committer"]["fullname"]
         author_date = format_utc_iso_date(rev["date"])
         committer_date = format_utc_iso_date(rev["committer_date"])
 
         tooltip = "revision %s\n" % rev["id"]
         tooltip += "author: %s\n" % author_fullname
         tooltip += "author date: %s\n" % author_date
         tooltip += "committer: %s\n" % committer_fullname
         tooltip += "committer date: %s\n\n" % committer_date
         if rev["message"]:
             tooltip += textwrap.indent(rev["message"], " " * 4)
 
         revision_log_data.append(
             {
                 "author": author_name,
                 "id": rev["id"][:7],
                 "message": rev["message"],
                 "date": author_date,
                 "commit_date": committer_date,
                 "url": gen_revision_url(rev["id"], snapshot_context),
                 "tooltip": tooltip,
             }
         )
     return revision_log_data
 
 
 # list of common readme names ordered by preference
 # (lower indices have higher priority)
 _common_readme_names = [
     "readme.markdown",
     "readme.md",
     "readme.rst",
     "readme.txt",
     "readme",
 ]
 
 
 def get_readme_to_display(readmes):
     """
     Process a list of readme files found in a directory
     in order to find the adequate one to display.
 
     Args:
         readmes: a list of dict where keys are readme file names and values
             are readme sha1s
 
     Returns:
         A tuple (readme_name, readme_sha1)
     """
     readme_name = None
     readme_url = None
     readme_sha1 = None
     readme_html = None
 
     lc_readmes = {k.lower(): {"orig_name": k, "sha1": v} for k, v in readmes.items()}
 
     # look for readme names according to the preference order
     # defined by the _common_readme_names list
     for common_readme_name in _common_readme_names:
         if common_readme_name in lc_readmes:
             readme_name = lc_readmes[common_readme_name]["orig_name"]
             readme_sha1 = lc_readmes[common_readme_name]["sha1"]
             readme_url = reverse(
                 "browse-content-raw",
                 url_args={"query_string": readme_sha1},
                 query_params={"re_encode": "true"},
             )
             break
 
     # otherwise pick the first readme like file if any
     if not readme_name and len(readmes.items()) > 0:
         readme_name = next(iter(readmes))
         readme_sha1 = readmes[readme_name]
         readme_url = reverse(
             "browse-content-raw",
             url_args={"query_string": readme_sha1},
             query_params={"re_encode": "true"},
         )
 
     # convert rst README to html server side as there is
     # no viable solution to perform that task client side
     if readme_name and readme_name.endswith(".rst"):
-        cache_entry_id = "readme_%s" % readme_sha1
-        cache_entry = cache.get(cache_entry_id)
 
-        if cache_entry:
-            readme_html = cache_entry
-        else:
-            try:
-                rst_doc = request_content(readme_sha1)
-                readme_html = rst_to_html(rst_doc["raw_data"])
-                cache.set(cache_entry_id, readme_html)
-            except Exception as exc:
-                sentry_sdk.capture_exception(exc)
-                readme_html = "Readme bytes are not available"
+        @django_cache(
+            catch_exception=True,
+            exception_return_value="Readme bytes are not available",
+        )
+        def _rst_readme_to_html(readme_sha1):
+            rst_doc = request_content(readme_sha1)
+            return rst_to_html(rst_doc["raw_data"])
+
+        readme_html = _rst_readme_to_html(readme_sha1)
 
     return readme_name, readme_url, readme_html
diff --git a/swh/web/common/utils.py b/swh/web/common/utils.py
index da65d01b..38e483de 100644
--- a/swh/web/common/utils.py
+++ b/swh/web/common/utils.py
@@ -1,549 +1,595 @@
 # Copyright (C) 2017-2022  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU Affero General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 from datetime import datetime, timezone
+import functools
 import os
 import re
-from typing import Any, Dict, List, Optional
+from typing import Any, Callable, Dict, List, Optional
 import urllib.parse
 from xml.etree import ElementTree
 
 from bs4 import BeautifulSoup
 from docutils.core import publish_parts
 import docutils.parsers.rst
 import docutils.utils
 from docutils.writers.html5_polyglot import HTMLTranslator, Writer
 from iso8601 import ParseError, parse_date
 from pkg_resources import get_distribution
 from prometheus_client.registry import CollectorRegistry
 import requests
 from requests.auth import HTTPBasicAuth
+import sentry_sdk
 
 from django.core.cache import cache
+from django.core.cache.backends.base import DEFAULT_TIMEOUT
 from django.http import HttpRequest, QueryDict
 from django.shortcuts import redirect
 from django.urls import resolve
 from django.urls import reverse as django_reverse
 
 from swh.web.auth.utils import (
     ADD_FORGE_MODERATOR_PERMISSION,
     ADMIN_LIST_DEPOSIT_PERMISSION,
     MAILMAP_ADMIN_PERMISSION,
 )
 from swh.web.common.exc import BadInputExc
 from swh.web.common.typing import QueryParameters
 from swh.web.config import SWH_WEB_SERVER_NAME, get_config, search
 
 SWH_WEB_METRICS_REGISTRY = CollectorRegistry(auto_describe=True)
 
 swh_object_icons = {
     "alias": "mdi mdi-star",
     "branch": "mdi mdi-source-branch",
     "branches": "mdi mdi-source-branch",
     "content": "mdi mdi-file-document",
     "cnt": "mdi mdi-file-document",
     "directory": "mdi mdi-folder",
     "dir": "mdi mdi-folder",
     "origin": "mdi mdi-source-repository",
     "ori": "mdi mdi-source-repository",
     "person": "mdi mdi-account",
     "revisions history": "mdi mdi-history",
     "release": "mdi mdi-tag",
     "rel": "mdi mdi-tag",
     "releases": "mdi mdi-tag",
     "revision": "mdi mdi-rotate-90 mdi-source-commit",
     "rev": "mdi mdi-rotate-90 mdi-source-commit",
     "snapshot": "mdi mdi-camera",
     "snp": "mdi mdi-camera",
     "visits": "mdi mdi-calendar-month",
 }
 
 
 def reverse(
     viewname: str,
     url_args: Optional[Dict[str, Any]] = None,
     query_params: Optional[QueryParameters] = None,
     current_app: Optional[str] = None,
     urlconf: Optional[str] = None,
     request: Optional[HttpRequest] = None,
 ) -> str:
     """An override of django reverse function supporting query parameters.
 
     Args:
         viewname: the name of the django view from which to compute a url
         url_args: dictionary of url arguments indexed by their names
         query_params: dictionary of query parameters to append to the
             reversed url
         current_app: the name of the django app tighten to the view
         urlconf: url configuration module
         request: build an absolute URI if provided
 
     Returns:
         str: the url of the requested view with processed arguments and
         query parameters
     """
 
     if url_args:
         url_args = {k: v for k, v in url_args.items() if v is not None}
 
     url = django_reverse(
         viewname, urlconf=urlconf, kwargs=url_args, current_app=current_app
     )
 
     if query_params:
         query_params = {k: v for k, v in query_params.items() if v is not None}
 
     if query_params and len(query_params) > 0:
         query_dict = QueryDict("", mutable=True)
         for k in sorted(query_params.keys()):
             query_dict[k] = query_params[k]
         url += "?" + query_dict.urlencode(safe="/;:")
 
     if request is not None:
         url = request.build_absolute_uri(url)
 
     return url
 
 
 def datetime_to_utc(date):
     """Returns datetime in UTC without timezone info
 
     Args:
         date (datetime.datetime): input datetime with timezone info
 
     Returns:
         datetime.datetime: datetime in UTC without timezone info
     """
     if date.tzinfo and date.tzinfo != timezone.utc:
         return date.astimezone(tz=timezone.utc)
     else:
         return date
 
 
 def parse_iso8601_date_to_utc(iso_date: str) -> datetime:
     """Given an ISO 8601 datetime string, parse the result as UTC datetime.
 
     Returns:
         a timezone-aware datetime representing the parsed date
 
     Raises:
         swh.web.common.exc.BadInputExc: provided date does not respect ISO 8601 format
 
     Samples:
         - 2016-01-12
         - 2016-01-12T09:19:12+0100
         - 2007-01-14T20:34:22Z
 
     """
     try:
         date = parse_date(iso_date)
         return datetime_to_utc(date)
     except ParseError as e:
         raise BadInputExc(e)
 
 
 def shorten_path(path):
     """Shorten the given path: for each hash present, only return the first
     8 characters followed by an ellipsis"""
 
     sha256_re = r"([0-9a-f]{8})[0-9a-z]{56}"
     sha1_re = r"([0-9a-f]{8})[0-9a-f]{32}"
 
     ret = re.sub(sha256_re, r"\1...", path)
     return re.sub(sha1_re, r"\1...", ret)
 
 
 def format_utc_iso_date(iso_date, fmt="%d %B %Y, %H:%M UTC"):
     """Turns a string representation of an ISO 8601 datetime string
     to UTC and format it into a more human readable one.
 
     For instance, from the following input
     string: '2017-05-04T13:27:13+02:00' the following one
     is returned: '04 May 2017, 11:27 UTC'.
     Custom format string may also be provided
     as parameter
 
     Args:
         iso_date (str): a string representation of an ISO 8601 date
         fmt (str): optional date formatting string
 
     Returns:
         str: a formatted string representation of the input iso date
     """
     if not iso_date:
         return iso_date
     date = parse_iso8601_date_to_utc(iso_date)
     return date.strftime(fmt)
 
 
 def gen_path_info(path):
     """Function to generate path data navigation for use
     with a breadcrumb in the swh web ui.
 
     For instance, from a path /folder1/folder2/folder3,
     it returns the following list::
 
         [{'name': 'folder1', 'path': 'folder1'},
          {'name': 'folder2', 'path': 'folder1/folder2'},
          {'name': 'folder3', 'path': 'folder1/folder2/folder3'}]
 
     Args:
         path: a filesystem path
 
     Returns:
         list: a list of path data for navigation as illustrated above.
 
     """
     path_info = []
     if path:
         sub_paths = path.strip("/").split("/")
         path_from_root = ""
         for p in sub_paths:
             path_from_root += "/" + p
             path_info.append({"name": p, "path": path_from_root.strip("/")})
     return path_info
 
 
 def parse_rst(text, report_level=2):
     """
     Parse a reStructuredText string with docutils.
 
     Args:
         text (str): string with reStructuredText markups in it
         report_level (int): level of docutils report messages to print
             (1 info 2 warning 3 error 4 severe 5 none)
 
     Returns:
         docutils.nodes.document: a parsed docutils document
     """
     parser = docutils.parsers.rst.Parser()
     components = (docutils.parsers.rst.Parser,)
     settings = docutils.frontend.OptionParser(
         components=components
     ).get_default_values()
     settings.report_level = report_level
     document = docutils.utils.new_document("rst-doc", settings=settings)
     parser.parse(text, document)
     return document
 
 
 def get_client_ip(request):
     """
     Return the client IP address from an incoming HTTP request.
 
     Args:
         request (django.http.HttpRequest): the incoming HTTP request
 
     Returns:
         str: The client IP address
     """
     x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
     if x_forwarded_for:
         ip = x_forwarded_for.split(",")[0]
     else:
         ip = request.META.get("REMOTE_ADDR")
     return ip
 
 
 def is_swh_web_development(request: HttpRequest) -> bool:
     """Indicate if we are running a development version of swh-web.
     """
     site_base_url = request.build_absolute_uri("/")
     return any(
         host in site_base_url for host in ("localhost", "127.0.0.1", "testserver")
     )
 
 
 def is_swh_web_staging(request: HttpRequest) -> bool:
     """Indicate if we are running a staging version of swh-web.
     """
     config = get_config()
     site_base_url = request.build_absolute_uri("/")
     return any(
         server_name in site_base_url for server_name in config["staging_server_names"]
     )
 
 
 def is_swh_web_production(request: HttpRequest) -> bool:
     """Indicate if we are running the public production version of swh-web.
     """
     return SWH_WEB_SERVER_NAME in request.build_absolute_uri("/")
 
 
 browsers_supported_image_mimes = set(
     [
         "image/gif",
         "image/png",
         "image/jpeg",
         "image/bmp",
         "image/webp",
         "image/svg",
         "image/svg+xml",
     ]
 )
 
 
 def context_processor(request):
     """
     Django context processor used to inject variables
     in all swh-web templates.
     """
     config = get_config()
     if (
         hasattr(request, "user")
         and request.user.is_authenticated
         and not hasattr(request.user, "backend")
     ):
         # To avoid django.template.base.VariableDoesNotExist errors
         # when rendering templates when standard Django user is logged in.
         request.user.backend = "django.contrib.auth.backends.ModelBackend"
 
     return {
         "swh_object_icons": swh_object_icons,
         "available_languages": None,
         "swh_client_config": config["client_config"],
         "oidc_enabled": bool(config["keycloak"]["server_url"]),
         "browsers_supported_image_mimes": browsers_supported_image_mimes,
         "keycloak": config["keycloak"],
         "site_base_url": request.build_absolute_uri("/"),
         "DJANGO_SETTINGS_MODULE": os.environ["DJANGO_SETTINGS_MODULE"],
         "status": config["status"],
         "swh_web_dev": is_swh_web_development(request),
         "swh_web_staging": is_swh_web_staging(request),
         "swh_web_version": get_distribution("swh.web").version,
         "iframe_mode": False,
         "ADMIN_LIST_DEPOSIT_PERMISSION": ADMIN_LIST_DEPOSIT_PERMISSION,
         "ADD_FORGE_MODERATOR_PERMISSION": ADD_FORGE_MODERATOR_PERMISSION,
         "FEATURES": get_config()["features"],
         "MAILMAP_ADMIN_PERMISSION": MAILMAP_ADMIN_PERMISSION,
     }
 
 
 def resolve_branch_alias(
     snapshot: Dict[str, Any], branch: Optional[Dict[str, Any]]
 ) -> Optional[Dict[str, Any]]:
     """
     Resolve branch alias in snapshot content.
 
     Args:
         snapshot: a full snapshot content
         branch: a branch alias contained in the snapshot
     Returns:
         The real snapshot branch that got aliased.
     """
     while branch and branch["target_type"] == "alias":
         if branch["target"] in snapshot["branches"]:
             branch = snapshot["branches"][branch["target"]]
         else:
             from swh.web.common import archive
 
             snp = archive.lookup_snapshot(
                 snapshot["id"], branches_from=branch["target"], branches_count=1
             )
             if snp and branch["target"] in snp["branches"]:
                 branch = snp["branches"][branch["target"]]
             else:
                 branch = None
     return branch
 
 
 class _NoHeaderHTMLTranslator(HTMLTranslator):
     """
     Docutils translator subclass to customize the generation of HTML
     from reST-formatted docstrings
     """
 
     def __init__(self, document):
         super().__init__(document)
         self.body_prefix = []
         self.body_suffix = []
 
 
 _HTML_WRITER = Writer()
 _HTML_WRITER.translator_class = _NoHeaderHTMLTranslator
 
 
 def rst_to_html(rst: str) -> str:
     """
     Convert reStructuredText document into HTML.
 
     Args:
         rst: A string containing a reStructuredText document
 
     Returns:
         Body content of the produced HTML conversion.
 
     """
     settings = {
         "initial_header_level": 2,
         "halt_level": 4,
         "traceback": True,
         "file_insertion_enabled": False,
         "raw_enabled": False,
     }
     pp = publish_parts(rst, writer=_HTML_WRITER, settings_overrides=settings)
     return f'<div class="swh-rst">{pp["html_body"]}</div>'
 
 
 def prettify_html(html: str) -> str:
     """
     Prettify an HTML document.
 
     Args:
         html: Input HTML document
 
     Returns:
         The prettified HTML document
     """
     return BeautifulSoup(html, "lxml").prettify()
 
 
+def django_cache(
+    timeout: int = DEFAULT_TIMEOUT,
+    catch_exception: bool = False,
+    exception_return_value: Any = None,
+    invalidate_cache_pred: Callable[[Any], bool] = lambda val: False,
+):
+    """Decorator to put the result of a function call in Django cache,
+    subsequent calls will directly return the cached value.
+
+    Args:
+        timeout: The number of seconds value will be hold in cache
+        catch_exception: If :const:`True`, any thrown exception by
+            the decorated function will be caught and not reraised
+        exception_return_value: The value to return if previous
+            parameter is set to :const:`True`
+        invalidate_cache_pred: A predicate function enabling to
+            invalidate the cache under certain conditions, decorated
+            function will then be called again
+
+    Returns:
+        The returned value of the decorated function for the specified
+        parameters
+
+    """
+
+    def inner(func):
+        @functools.wraps(func)
+        def wrapper(*args, **kwargs):
+            func_args = args + (0,) + tuple(sorted(kwargs.items()))
+            cache_key = str(hash((func.__module__, func.__name__) + func_args))
+            ret = cache.get(cache_key)
+            if ret is None or invalidate_cache_pred(ret):
+                try:
+                    ret = func(*args, **kwargs)
+                except Exception as exc:
+                    sentry_sdk.capture_exception(exc)
+                    if catch_exception:
+                        return exception_return_value
+                    else:
+                        raise
+                else:
+                    cache.set(cache_key, ret, timeout=timeout)
+            return ret
+
+        return wrapper
+
+    return inner
+
+
 def _deposits_list_url(
     deposits_list_base_url: str, page_size: int, username: Optional[str]
 ) -> str:
     params = {"page_size": str(page_size)}
     if username is not None:
         params["username"] = username
     return f"{deposits_list_base_url}?{urllib.parse.urlencode(params)}"
 
 
 def get_deposits_list(username: Optional[str] = None) -> List[Dict[str, Any]]:
     """Return the list of software deposits using swh-deposit API
     """
     config = get_config()["deposit"]
     deposits_list_base_url = config["private_api_url"] + "deposits"
     deposits_list_auth = HTTPBasicAuth(
         config["private_api_user"], config["private_api_password"]
     )
 
     deposits_list_url = _deposits_list_url(
         deposits_list_base_url, page_size=1, username=username
     )
 
     nb_deposits = requests.get(
         deposits_list_url, auth=deposits_list_auth, timeout=30
     ).json()["count"]
 
-    cache_key = f"swh-deposit-list-{username}"
-    deposits_data = cache.get(cache_key)
-    if not deposits_data or deposits_data["count"] != nb_deposits:
+    @django_cache(invalidate_cache_pred=lambda data: data["count"] != nb_deposits)
+    def _get_deposits_data():
         deposits_list_url = _deposits_list_url(
             deposits_list_base_url, page_size=nb_deposits, username=username
         )
-        deposits_data = requests.get(
+        return requests.get(
             deposits_list_url, auth=deposits_list_auth, timeout=30,
         ).json()
-        cache.set(cache_key, deposits_data)
+
+    deposits_data = _get_deposits_data()
 
     return deposits_data["results"]
 
 
+@django_cache()
 def get_deposit_raw_metadata(deposit_id: int) -> Optional[str]:
-    cache_key = f"swh-deposit-raw-metadata-{deposit_id}"
-    metadata = cache.get(cache_key)
-    if metadata is None:
-        config = get_config()["deposit"]
-
-        url = f"{config['private_api_url']}/{deposit_id}/meta"
-        metadata = requests.get(url).json()["raw_metadata"]
-        cache.set(cache_key, metadata)
-
-    return metadata
+    config = get_config()["deposit"]
+    url = f"{config['private_api_url']}/{deposit_id}/meta"
+    return requests.get(url).json()["raw_metadata"]
 
 
 def origin_visit_types() -> List[str]:
     """Return the exhaustive list of visit types for origins
     ingested into the archive.
     """
     try:
         return sorted(search().visit_types_count().keys())
     except Exception:
         return []
 
 
 def redirect_to_new_route(request, new_route, permanent=True):
     """Redirect a request to another route with url args and query parameters
     eg: /origin/<url:url-val>/log?path=test can be redirected as
     /log?url=<url-val>&path=test. This can be used to deprecate routes
     """
     request_path = resolve(request.path_info)
     args = {**request_path.kwargs, **request.GET.dict()}
     return redirect(reverse(new_route, query_params=args), permanent=permanent,)
 
 
 NAMESPACES = {
     "swh": "https://www.softwareheritage.org/schema/2018/deposit",
     "schema": "http://schema.org/",
 }
 
 
 def parse_swh_metadata_provenance(raw_metadata: str) -> Optional[str]:
     """Parse swh metadata-provenance out of the raw metadata deposit. If found, returns the
     value, None otherwise.
 
     .. code-block:: xml
 
          <swh:deposit>
            <swh:metadata-provenance>
              <schema:url>https://example.org/metadata/url</schema:url>
            </swh:metadata-provenance>
          </swh:deposit>
 
     Args:
         raw_metadata: raw metadata out of deposits received
 
     Returns:
         Either the metadata provenance url if any or None otherwise
 
     """
     metadata = ElementTree.fromstring(raw_metadata)
     url = metadata.findtext(
         "swh:deposit/swh:metadata-provenance/schema:url", namespaces=NAMESPACES,
     )
     return url or None
 
 
 def parse_swh_deposit_origin(raw_metadata: str) -> Optional[str]:
     """Parses <swh:add_to_origin> and <swh:create_origin> from metadata document,
     if any. They are mutually exclusive and tested as such in the deposit.
 
     .. code-block:: xml
 
        <swh:deposit>
          <swh:create_origin>
            <swh:origin url='https://example.org/repo/software123/'/>
          </swh:reference>
        </swh:deposit>
 
     .. code-block:: xml
 
        <swh:deposit>
          <swh:add_to_origin>
            <swh:origin url='https://example.org/repo/software123/'/>
          </swh:add_to_origin>
        </swh:deposit>
 
     Returns:
         The one not null if any, None otherwise
 
     """
     metadata = ElementTree.fromstring(raw_metadata)
     for origin_tag in ["create_origin", "add_to_origin"]:
         elt = metadata.find(
             f"swh:deposit/swh:{origin_tag}/swh:origin[@url]", namespaces=NAMESPACES
         )
         if elt is not None:
             return elt.attrib["url"]
     return None
 
 
 def has_add_forge_now_permission(user) -> bool:
     """Is a user considered an add-forge-now moderator?
 
     Returns
         True if a user is staff or has add forge now moderator permission
 
     """
     return user.is_staff or user.has_perm(ADD_FORGE_MODERATOR_PERMISSION)
diff --git a/swh/web/misc/coverage.py b/swh/web/misc/coverage.py
index 603a266e..6edb1400 100644
--- a/swh/web/misc/coverage.py
+++ b/swh/web/misc/coverage.py
@@ -1,422 +1,431 @@
 # Copyright (C) 2018-2022  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU Affero General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 from collections import Counter, defaultdict
 from typing import Any, Dict, List, Tuple
 from urllib.parse import urlparse
 
-import sentry_sdk
-
 from django.conf.urls import url
-from django.core.cache import cache
 from django.http.request import HttpRequest
 from django.http.response import HttpResponse
 from django.shortcuts import render
 from django.views.decorators.cache import never_cache
 from django.views.decorators.clickjacking import xframe_options_exempt
 
 from swh.scheduler.model import SchedulerMetrics
 from swh.web.common import archive
 from swh.web.common.origin_save import get_savable_visit_types
-from swh.web.common.utils import get_deposits_list, is_swh_web_production, reverse
+from swh.web.common.utils import (
+    django_cache,
+    get_deposits_list,
+    is_swh_web_production,
+    reverse,
+)
 from swh.web.config import scheduler
 
 _swh_arch_overview_doc = (
     "https://docs.softwareheritage.org/devel/architecture/overview.html"
 )
 
 # Current coverage list of the archive in a high level overview fashion,
 # categorized as follow:
 #   - listed origins: origins discovered using a swh lister
 #   - legacy: origins where public hosting service has closed
 #   - deposited: origins coming from swh-deposit
 #
 # TODO: Store that list in a database table somewhere (swh-scheduler, swh-storage ?)
 #       and retrieve it dynamically
 listed_origins: Dict[str, Any] = {
     "info": (
         "These software origins get continuously discovered and archived using "
         f'the <a href="{_swh_arch_overview_doc}#listers" target="_blank" '
         'rel="noopener noreferrer">listers</a> implemented by Software Heritage.'
     ),
     "origins": [
         {
             "type": "bitbucket",
             "info_url": "https://bitbucket.org",
             "info": "public repositories from Bitbucket",
             "search_pattern": "https://bitbucket.org/",
         },
         {
             "type": "cgit",
             "info_url": "https://git.zx2c4.com/cgit/about",
             "info": "public repositories from cgit instances",
             "search_pattern": "cgit",
         },
         {
             "type": "CRAN",
             "info_url": "https://cran.r-project.org",
             "info": "source packages from The Comprehensive R Archive Network",
             "search_pattern": "https://cran.r-project.org/",
         },
         {
             "type": "debian",
             "info_url": "https://www.debian.org",
             "info": "source packages from Debian and Debian-based distributions",
             "search_pattern": "deb://",
         },
         {
             "type": "gitea",
             "info_url": "https://gitea.io",
             "info": "public repositories from Gitea instances",
             "search_pattern": "gitea",
         },
         {
             "type": "github",
             "info_url": "https://github.com",
             "info": "public repositories from GitHub",
             "search_pattern": "https://github.com/",
         },
         {
             "type": "gitlab",
             "info_url": "https://gitlab.com",
             "info": "public repositories from multiple GitLab instances",
             "search_pattern": "gitlab",
         },
         {
             "type": "guix",
             "info_url": "https://guix.gnu.org",
             "info": "source code tarballs used to build the Guix package collection",
             "visit_types": ["nixguix"],
             "search_pattern": "https://guix.gnu.org/sources.json",
         },
         {
             "type": "GNU",
             "info_url": "https://www.gnu.org",
             "info": "releases from the GNU project (as of August 2015)",
             "search_pattern": "gnu",
         },
         {
             "type": "heptapod",
             "info_url": "https://heptapod.net/",
             "info": "public repositories from multiple Heptapod instances",
             "search_pattern": "heptapod",
         },
         {
             "type": "launchpad",
             "info_url": "https://launchpad.net",
             "logo": "img/logos/launchpad.png",
             "info": "public repositories from Launchpad",
             "search_pattern": "https://git.launchpad.net/",
         },
         {
             "type": "nixos",
             "info_url": "https://nixos.org",
             "info": "source code tarballs used to build the Nix package collection",
             "visit_types": ["nixguix"],
             "search_pattern": (
                 "https://nix-community.github.io/nixpkgs-swh/sources-unstable.json"
             ),
         },
         {
             "type": "npm",
             "info_url": "https://www.npmjs.com",
             "info": "public packages from the package registry for javascript",
             "search_pattern": "https://www.npmjs.com",
         },
         {
             "type": "opam",
             "info_url": "https://opam.ocaml.org/",
             "info": "public packages from the source-based package manager for OCaml",
             "search_pattern": "opam+https://opam.ocaml.org/",
         },
         # apart our forge, most phabricator origins have not been archived
         # while they have been listed so do not display those type of origins
         # until new listing processes have been executed and origins loaded
         #
         # {
         #     "type": "phabricator",
         #     "info_url": "https://www.phacility.com/phabricator",
         #     "info": "public repositories from multiple Phabricator instances",
         #     "search_pattern": "phabricator",
         # },
         {
             "type": "pypi",
             "info_url": "https://pypi.org",
             "info": "source packages from the Python Package Index",
             "search_pattern": "https://pypi.org",
         },
         {
             "type": "sourceforge",
             "info_url": "https://sourceforge.net",
             "info": "public repositories from SourceForge",
             "search_pattern": "code.sf.net",
         },
     ],
 }
 
 legacy_origins: Dict[str, Any] = {
     "info": (
         "Discontinued hosting services. Those origins have been archived "
         "by Software Heritage."
     ),
     "origins": [
         {
             "type": "gitorious",
             "info_url": "https://en.wikipedia.org/wiki/Gitorious",
             "info": (
                 "public repositories from the former Gitorious code hosting service"
             ),
             "visit_types": ["git"],
             "search_pattern": "https://gitorious.org",
             "count": "122,014",
         },
         {
             "type": "googlecode",
             "info_url": "https://code.google.com/archive",
             "info": (
                 "public repositories from the former Google Code project "
                 "hosting service"
             ),
             "visit_types": ["git", "hg", "svn"],
             "search_pattern": "googlecode.com",
             "count": "790,026",
         },
         {
             "type": "bitbucket",
             "info_url": "https://bitbucket.org",
             "info": "public repositories from Bitbucket",
             "search_pattern": "https://bitbucket.org/",
             "visit_types": ["hg"],
             "count": "336,795",
         },
     ],
 }
 
 deposited_origins: Dict[str, Any] = {
     "info": (
         "These origins are directly pushed into the archive by trusted partners "
         f'using the <a href="{_swh_arch_overview_doc}#deposit" target="_blank" '
         'rel="noopener noreferrer">deposit</a> service of Software Heritage.'
     ),
     "origins": [
         {
             "type": "elife",
             "info_url": "https://elifesciences.org",
             "info": (
                 "research software source code associated to the articles "
                 "eLife publishes"
             ),
             "search_pattern": "elife.stencila.io",
             "visit_types": ["deposit"],
         },
         {
             "type": "hal",
             "info_url": "https://hal.archives-ouvertes.fr",
             "info": "scientific software source code deposited in the open archive HAL",
             "visit_types": ["deposit"],
             "search_pattern": "hal.archives-ouvertes.fr",
         },
         {
             "type": "ipol",
             "info_url": "https://www.ipol.im",
             "info": "software artifacts associated to the articles IPOL publishes",
             "visit_types": ["deposit"],
             "search_pattern": "doi.org/10.5201",
         },
     ],
 }
 
 _cache_timeout = 5 * 60
 
 
 def _get_listers_metrics(
     cache_metrics: bool = False,
 ) -> Dict[str, List[Tuple[str, SchedulerMetrics]]]:
     """Returns scheduler metrics in the following mapping:
     Dict[lister_name, List[Tuple[instance_name, SchedulerMetrics]]]
     as a lister instance has one SchedulerMetrics object per visit type.
     """
-    cache_key = "lister_metrics"
-    listers_metrics = cache.get(cache_key, {})
-    if not listers_metrics:
+
+    @django_cache(
+        timeout=_cache_timeout,
+        catch_exception=True,
+        exception_return_value={},
+        invalidate_cache_pred=lambda m: not cache_metrics,
+    )
+    def _get_listers_metrics_internal():
         listers_metrics = defaultdict(list)
-        try:
-            listers = scheduler().get_listers()
-            scheduler_metrics = scheduler().get_metrics()
-            for lister in listers:
-                for metrics in filter(
-                    lambda m: m.lister_id == lister.id, scheduler_metrics
-                ):
-                    listers_metrics[lister.name].append((lister.instance_name, metrics))
-            if cache_metrics:
-                cache.set(cache_key, listers_metrics, timeout=_cache_timeout)
-        except Exception as e:
-            sentry_sdk.capture_exception(e)
-
-    return listers_metrics
+        listers = scheduler().get_listers()
+        scheduler_metrics = scheduler().get_metrics()
+        for lister in listers:
+            for metrics in filter(
+                lambda m: m.lister_id == lister.id, scheduler_metrics
+            ):
+                listers_metrics[lister.name].append((lister.instance_name, metrics))
+
+        return listers_metrics
+
+    return _get_listers_metrics_internal()
 
 
 def _get_deposits_netloc_counts(cache_counts: bool = False) -> Counter:
     """Return deposit counts per origin url network location.
     """
 
     def _process_origin_url(origin_url):
         parsed_url = urlparse(origin_url)
         netloc = parsed_url.netloc
         # special treatment for doi.org netloc as it is not specific enough
         # for origins mapping
         if parsed_url.netloc == "doi.org":
             netloc += "/" + parsed_url.path.split("/")[1]
         return netloc
 
-    cache_key = "deposits_netloc_counts"
-    deposits_netloc_counts = cache.get(cache_key, Counter())
-    if not deposits_netloc_counts:
+    @django_cache(
+        timeout=_cache_timeout,
+        catch_exception=True,
+        exception_return_value=Counter(),
+        invalidate_cache_pred=lambda m: not cache_counts,
+    )
+    def _get_deposits_netloc_counts_internal():
         netlocs = []
-        try:
-            deposits = get_deposits_list()
-            netlocs = [
-                _process_origin_url(d["origin_url"])
-                for d in deposits
-                if d["status"] == "done"
-            ]
-            deposits_netloc_counts = Counter(netlocs)
-            if cache_counts:
-                cache.set(cache_key, deposits_netloc_counts, timeout=_cache_timeout)
-        except Exception as e:
-            sentry_sdk.capture_exception(e)
+        deposits = get_deposits_list()
+        netlocs = [
+            _process_origin_url(d["origin_url"])
+            for d in deposits
+            if d["status"] == "done"
+        ]
+        deposits_netloc_counts = Counter(netlocs)
+        return deposits_netloc_counts
 
-    return deposits_netloc_counts
+    return _get_deposits_netloc_counts_internal()
 
 
 def _get_nixguix_origins_count(origin_url: str, cache_count: bool = False) -> int:
     """Returns number of archived tarballs for NixOS, aka the number
     of branches in a dedicated origin in the archive.
     """
-    cache_key = f"nixguix_origins_count_{origin_url}"
-    nixguix_origins_count = cache.get(cache_key, 0)
-    if not nixguix_origins_count:
+
+    @django_cache(
+        timeout=_cache_timeout,
+        catch_exception=True,
+        exception_return_value=0,
+        invalidate_cache_pred=lambda m: not cache_count,
+    )
+    def _get_nixguix_origins_count_internal():
         snapshot = archive.lookup_latest_origin_snapshot(origin_url)
         if snapshot:
             snapshot_sizes = archive.lookup_snapshot_sizes(snapshot["id"])
             nixguix_origins_count = snapshot_sizes["release"]
         else:
             nixguix_origins_count = 0
-        if cache_count:
-            cache.set(cache_key, nixguix_origins_count, timeout=_cache_timeout)
-    return nixguix_origins_count
+        return nixguix_origins_count
+
+    return _get_nixguix_origins_count_internal()
 
 
 def _search_url(query: str, visit_type: str) -> str:
     return reverse(
         "browse-search",
         query_params={
             "q": query,
             "visit_type": visit_type,
             "with_visit": "true",
             "with_content": "true",
         },
     )
 
 
 @xframe_options_exempt
 @never_cache
 def _swh_coverage(request: HttpRequest) -> HttpResponse:
     use_cache = is_swh_web_production(request)
     listers_metrics = _get_listers_metrics(use_cache)
     for origins in listed_origins["origins"]:
         origins["instances"] = {}
         origins_type = origins["type"]
 
         # special processing for nixos/guix origins as there is no
         # scheduler metrics for those
         if origins_type in ("nixos", "guix"):
             count = _get_nixguix_origins_count(origins["search_pattern"], use_cache)
 
             origins["count"] = f"{count:,}" if count else ""
             origins["instances"][origins_type] = {"nixguix": {"count": count}}
 
         if origins_type not in listers_metrics:
             continue
 
         count_total = sum(
             [metrics.origins_known for _, metrics in listers_metrics[origins_type]]
         )
         count_never_visited = sum(
             [
                 metrics.origins_never_visited
                 for _, metrics in listers_metrics[origins_type]
             ]
         )
         count = count_total - count_never_visited
 
         origins["count"] = f"{count:,}"
         origins["instances"] = defaultdict(dict)
         for instance, metrics in listers_metrics[origins_type]:
             # these types are available in staging/docker but not yet in production
             if (
                 metrics.visit_type in ("bzr", "cvs")
                 and metrics.visit_type not in get_savable_visit_types()
             ):
                 continue
             instance_count = metrics.origins_known - metrics.origins_never_visited
             origins["instances"][instance].update(
                 {metrics.visit_type: {"count": f"{instance_count:,}"}}
             )
             origins["visit_types"] = list(
                 set(origins["instances"][instance].keys())
                 | set(origins.get("visit_types", []))
             )
 
         if origins_type == "CRAN":
             origins["instances"]["cran"]["cran"] = {"count": origins["count"]}
 
         # defaultdict cannot be iterated in django template
         origins["instances"] = dict(origins["instances"])
 
     for origins in listed_origins["origins"]:
         instances = origins["instances"]
         nb_instances = len(instances)
         for instance_name, visit_types in instances.items():
             for visit_type in visit_types:
                 if nb_instances > 1:
                     search_pattern = instance_name
                 else:
                     search_pattern = origins["search_pattern"]
                 search_url = _search_url(search_pattern, visit_type)
                 visit_types[visit_type]["search_url"] = search_url
 
     for origins in legacy_origins["origins"]:
         origins["search_urls"] = {}
         for visit_type in origins["visit_types"]:
             origins["search_urls"][visit_type] = _search_url(
                 origins["search_pattern"], visit_type
             )
 
     deposits_counts = _get_deposits_netloc_counts(use_cache)
 
     for origins in deposited_origins["origins"]:
         if origins["search_pattern"] in deposits_counts:
             origins["count"] = f"{deposits_counts[origins['search_pattern']]:,}"
         origins["search_urls"] = {
             "deposit": _search_url(origins["search_pattern"], "deposit")
         }
 
     return render(
         request,
         "misc/coverage.html",
         {
             "origins": {
                 "Regular crawling": listed_origins,
                 "Discontinued hosting": legacy_origins,
                 "On demand archival": deposited_origins,
             }
         },
     )
 
 
 urlpatterns = [
     url(r"^coverage/$", _swh_coverage, name="swh-coverage"),
 ]
diff --git a/swh/web/tests/common/test_utils.py b/swh/web/tests/common/test_utils.py
index 4d39a2ba..65cf28f5 100644
--- a/swh/web/tests/common/test_utils.py
+++ b/swh/web/tests/common/test_utils.py
@@ -1,357 +1,433 @@
 # Copyright (C) 2017-2022  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU Affero General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 from base64 import b64encode
 import datetime
+import math
 from os.path import join
+import sys
 from urllib.parse import quote
 
 import pytest
 
 from django.conf.urls import url
 from django.test.utils import override_settings
 from django.urls.exceptions import NoReverseMatch
 
 from swh.web.common import utils
 from swh.web.common.exc import BadInputExc
 from swh.web.config import SWH_WEB_SERVER_NAME, SWH_WEB_STAGING_SERVER_NAMES, get_config
 
 
 def test_shorten_path_noop():
     noops = ["/api/", "/browse/", "/content/symbol/foobar/"]
 
     for noop in noops:
         assert utils.shorten_path(noop) == noop
 
 
 def test_shorten_path_sha1():
     sha1 = "aafb16d69fd30ff58afdd69036a26047f3aebdc6"
     short_sha1 = sha1[:8] + "..."
 
     templates = [
         "/api/1/content/sha1:%s/",
         "/api/1/content/sha1_git:%s/",
         "/api/1/directory/%s/",
         "/api/1/content/sha1:%s/ctags/",
     ]
 
     for template in templates:
         assert utils.shorten_path(template % sha1) == template % short_sha1
 
 
 def test_shorten_path_sha256():
     sha256 = "aafb16d69fd30ff58afdd69036a26047" "213add102934013a014dfca031c41aef"
     short_sha256 = sha256[:8] + "..."
 
     templates = [
         "/api/1/content/sha256:%s/",
         "/api/1/directory/%s/",
         "/api/1/content/sha256:%s/filetype/",
     ]
 
     for template in templates:
         assert utils.shorten_path(template % sha256) == template % short_sha256
 
 
 @pytest.mark.parametrize(
     "input_timestamp, output_date",
     [
         (
             "2016-01-12",
             datetime.datetime(2016, 1, 12, 0, 0, tzinfo=datetime.timezone.utc),
         ),
         (
             "2016-01-12T09:19:12+0100",
             datetime.datetime(2016, 1, 12, 8, 19, 12, tzinfo=datetime.timezone.utc),
         ),
         (
             "2007-01-14T20:34:22Z",
             datetime.datetime(2007, 1, 14, 20, 34, 22, tzinfo=datetime.timezone.utc),
         ),
     ],
 )
 def test_parse_iso8601_date_to_utc_ok(input_timestamp, output_date):
     assert utils.parse_iso8601_date_to_utc(input_timestamp) == output_date
 
 
 @pytest.mark.parametrize(
     "invalid_iso8601_timestamp", ["Today is January 1, 2047 at 8:21:00AM", "1452591542"]
 )
 def test_parse_iso8601_date_to_utc_ko(invalid_iso8601_timestamp):
     with pytest.raises(BadInputExc):
         utils.parse_iso8601_date_to_utc(invalid_iso8601_timestamp)
 
 
 def test_format_utc_iso_date():
     assert (
         utils.format_utc_iso_date("2017-05-04T13:27:13+02:00")
         == "04 May 2017, 11:27 UTC"
     )
 
 
 def test_gen_path_info():
     input_path = "/home/user/swh-environment/swh-web/"
     expected_result = [
         {"name": "home", "path": "home"},
         {"name": "user", "path": "home/user"},
         {"name": "swh-environment", "path": "home/user/swh-environment"},
         {"name": "swh-web", "path": "home/user/swh-environment/swh-web"},
     ]
     path_info = utils.gen_path_info(input_path)
     assert path_info == expected_result
 
     input_path = "home/user/swh-environment/swh-web"
     path_info = utils.gen_path_info(input_path)
     assert path_info == expected_result
 
 
 def test_rst_to_html():
     rst = (
         "Section\n"
         "=======\n\n"
         "**Some strong text**\n\n"
         "* This is a bulleted list.\n"
         "* It has two items, the second\n"
         "  item uses two lines.\n"
         "\n"
         "1. This is a numbered list.\n"
         "2. It has two items too.\n"
         "\n"
         "#. This is a numbered list.\n"
         "#. It has two items too.\n"
     )
 
     expected_html = (
         '<div class="swh-rst"><h1 class="title">Section</h1>\n'
         "<p><strong>Some strong text</strong></p>\n"
         '<ul class="simple">\n'
         "<li><p>This is a bulleted list.</p></li>\n"
         "<li><p>It has two items, the second\n"
         "item uses two lines.</p></li>\n"
         "</ul>\n"
         '<ol class="arabic simple">\n'
         "<li><p>This is a numbered list.</p></li>\n"
         "<li><p>It has two items too.</p></li>\n"
         "<li><p>This is a numbered list.</p></li>\n"
         "<li><p>It has two items too.</p></li>\n"
         "</ol>\n"
         "</div>"
     )
 
     assert utils.rst_to_html(rst) == expected_html
 
 
 def sample_test_view(request, string, number):
     pass
 
 
 def sample_test_view_no_url_args(request):
     pass
 
 
 urlpatterns = [
     url(
         r"^sample/test/(?P<string>.+)/view/(?P<number>[0-9]+)/$",
         sample_test_view,
         name="sample-test-view",
     ),
     url(
         r"^sample/test/view/no/url/args/$",
         sample_test_view_no_url_args,
         name="sample-test-view-no-url-args",
     ),
 ]
 
 
 @override_settings(ROOT_URLCONF=__name__)
 def test_reverse_url_args_only_ok():
     string = "foo"
     number = 55
     url = utils.reverse(
         "sample-test-view", url_args={"string": string, "number": number}
     )
     assert url == f"/sample/test/{string}/view/{number}/"
 
 
 @override_settings(ROOT_URLCONF=__name__)
 def test_reverse_url_args_only_ko():
     string = "foo"
     with pytest.raises(NoReverseMatch):
         utils.reverse("sample-test-view", url_args={"string": string, "number": string})
 
 
 @override_settings(ROOT_URLCONF=__name__)
 def test_reverse_no_url_args():
     url = utils.reverse("sample-test-view-no-url-args")
     assert url == "/sample/test/view/no/url/args/"
 
 
 @override_settings(ROOT_URLCONF=__name__)
 def test_reverse_query_params_only():
     start = 0
     scope = "foo"
     url = utils.reverse(
         "sample-test-view-no-url-args", query_params={"start": start, "scope": scope}
     )
     assert url == f"/sample/test/view/no/url/args/?scope={scope}&start={start}"
 
     url = utils.reverse(
         "sample-test-view-no-url-args", query_params={"start": start, "scope": None}
     )
     assert url == f"/sample/test/view/no/url/args/?start={start}"
 
 
 @override_settings(ROOT_URLCONF=__name__)
 def test_reverse_query_params_encode():
     libname = "libstc++"
     url = utils.reverse(
         "sample-test-view-no-url-args", query_params={"libname": libname}
     )
     assert url == f"/sample/test/view/no/url/args/?libname={quote(libname, safe='/;:')}"
 
 
 @override_settings(ROOT_URLCONF=__name__)
 def test_reverse_url_args_query_params():
     string = "foo"
     number = 55
     start = 10
     scope = "bar"
     url = utils.reverse(
         "sample-test-view",
         url_args={"string": string, "number": number},
         query_params={"start": start, "scope": scope},
     )
     assert url == f"/sample/test/{string}/view/{number}/?scope={scope}&start={start}"
 
 
 @override_settings(ROOT_URLCONF=__name__)
 def test_reverse_absolute_uri(request_factory):
     request = request_factory.get(utils.reverse("sample-test-view-no-url-args"))
     url = utils.reverse("sample-test-view-no-url-args", request=request)
     assert url == f"http://{request.META['SERVER_NAME']}/sample/test/view/no/url/args/"
 
 
 def test_get_deposits_list(requests_mock):
     deposits_data = {
         "count": 2,
         "results": [
             {
                 "check_task_id": "351820217",
                 "client": 2,
                 "collection": 1,
                 "complete_date": "2021-01-21T07:52:19.919312Z",
                 "external_id": "hal-03116143",
                 "id": 1412,
                 "load_task_id": "351820260",
                 "origin_url": "https://hal.archives-ouvertes.fr/hal-03116143",
                 "parent": None,
                 "reception_date": "2021-01-21T07:52:19.471019Z",
                 "status": "done",
                 "status_detail": None,
                 "swhid": "swh:1:dir:f25157ad1b13cb20ac3457d4f6756b49ac63d079",
             },
             {
                 "check_task_id": "381576507",
                 "client": 2,
                 "collection": 1,
                 "complete_date": "2021-07-07T08:00:44.726676Z",
                 "external_id": "hal-03275052",
                 "id": 1693,
                 "load_task_id": "381576508",
                 "origin_url": "https://hal.archives-ouvertes.fr/hal-03275052",
                 "parent": None,
                 "reception_date": "2021-07-07T08:00:44.327661Z",
                 "status": "done",
                 "status_detail": None,
                 "swhid": "swh:1:dir:825fa96d1810177ec08a772ffa5bd34bbd08b89c",
             },
         ],
     }
 
     config = get_config()["deposit"]
     deposits_list_url = config["private_api_url"] + "deposits"
 
     basic_auth_payload = (
         config["private_api_user"] + ":" + config["private_api_password"]
     ).encode()
 
     requests_mock.get(
         deposits_list_url,
         json=deposits_data,
         request_headers={
             "Authorization": f"Basic {b64encode(basic_auth_payload).decode('ascii')}"
         },
     )
 
     assert utils.get_deposits_list() == deposits_data["results"]
 
 
 @pytest.mark.parametrize("backend", ["swh-search", "swh-storage"])
 def test_origin_visit_types(mocker, backend):
     if backend != "swh-search":
         # equivalent to not configuring search in the config
         search = mocker.patch("swh.web.common.utils.search")
         search.return_value = None
         assert utils.origin_visit_types() == []
     else:
         # see swh/web/tests/data.py for origins added for tests
         assert utils.origin_visit_types() == ["git", "tar"]
 
 
 @pytest.mark.parametrize("server_name", ["localhost", "127.0.0.1", "testserver"])
 def test_is_swh_web_development(request_factory, server_name):
     request = request_factory.get("/", SERVER_NAME=server_name)
     assert utils.is_swh_web_development(request)
 
 
 @pytest.mark.parametrize("server_name", SWH_WEB_STAGING_SERVER_NAMES)
 def test_is_swh_web_staging(request_factory, server_name):
     request = request_factory.get("/", SERVER_NAME=server_name)
     assert utils.is_swh_web_staging(request)
 
 
 def test_is_swh_web_production(request_factory):
     request = request_factory.get("/", SERVER_NAME=SWH_WEB_SERVER_NAME)
     assert utils.is_swh_web_production(request)
 
 
 @pytest.mark.parametrize(
     "raw_metadata_file,expected_url",
     [
         ("raw-metadata-provenance.xml", "https://example.org/metadata/provenance"),
         ("raw-metadata-no-swh.xml", None),
     ],
 )
 def test_parse_swh_provenance(datadir, raw_metadata_file, expected_url):
     metadata_path = join(datadir, "deposit", raw_metadata_file)
     with open(metadata_path, "r") as f:
         raw_metadata = f.read()
 
     actual_url = utils.parse_swh_metadata_provenance(raw_metadata)
 
     assert actual_url == expected_url
 
 
 @pytest.mark.parametrize(
     "raw_metadata_file,expected_url",
     [
         (
             "raw-metadata-create-origin.xml",
             "https://example.org/metadata/create-origin",
         ),
         (
             "raw-metadata-add-to-origin.xml",
             "https://example.org/metadata/add-to-origin",
         ),
         ("raw-metadata-no-swh.xml", None),
     ],
 )
 def test_parse_swh_origins(datadir, raw_metadata_file, expected_url):
     metadata_path = join(datadir, "deposit", raw_metadata_file)
     with open(metadata_path, "r") as f:
         raw_metadata = f.read()
 
     actual_url = utils.parse_swh_deposit_origin(raw_metadata)
 
     assert actual_url == expected_url
+
+
+def add(x, y):
+    return x + y
+
+
+def test_django_cache(mocker):
+    """Decorated function should be called once and returned value
+    put in django cache."""
+    spy_add = mocker.spy(sys.modules[__name__], "add")
+    spy_cache_set = mocker.spy(utils.cache, "set")
+
+    cached_add = utils.django_cache()(add)
+
+    val = cached_add(1, 2)
+    val2 = cached_add(1, 2)
+
+    assert val == val2 == 3
+    assert spy_add.call_count == 1
+    assert spy_cache_set.call_count == 1
+
+
+def test_django_cache_invalidate_cache_pred(mocker):
+    """Decorated function should be called twice and returned value
+    put in django cache twice."""
+    spy_add = mocker.spy(sys.modules[__name__], "add")
+    spy_cache_set = mocker.spy(utils.cache, "set")
+
+    cached_add = utils.django_cache(invalidate_cache_pred=lambda val: val == 3)(add)
+
+    val = cached_add(1, 2)
+    val2 = cached_add(1, 2)
+
+    assert val == val2 == 3
+    assert spy_add.call_count == 2
+    assert spy_cache_set.call_count == 2
+
+
+def test_django_cache_raise_exception(mocker):
+    """Decorated function should be called twice, exceptions should be
+    raised and no value put in django cache"""
+    spy_add = mocker.spy(sys.modules[__name__], "add")
+    spy_cache_set = mocker.spy(utils.cache, "set")
+
+    cached_add = utils.django_cache()(add)
+
+    with pytest.raises(TypeError):
+        cached_add(1, "2")
+
+    with pytest.raises(TypeError):
+        cached_add(1, "2")
+
+    assert spy_add.call_count == 2
+    assert spy_cache_set.call_count == 0
+
+
+def test_django_cache_catch_exception(mocker):
+    """Decorated function should be called twice, exceptions should not be
+    raised, specified fallback value should be returned and no value put
+    in django cache"""
+    spy_add = mocker.spy(sys.modules[__name__], "add")
+    spy_cache_set = mocker.spy(utils.cache, "set")
+
+    cached_add = utils.django_cache(
+        catch_exception=True, exception_return_value=math.nan
+    )(add)
+
+    val = cached_add(1, "2")
+    val2 = cached_add(1, "2")
+
+    assert math.isnan(val)
+    assert math.isnan(val2)
+    assert spy_add.call_count == 2
+    assert spy_cache_set.call_count == 0