diff --git a/requirements-test.txt b/requirements-test.txt
index f2f50fc4..966983a9 100644
--- a/requirements-test.txt
+++ b/requirements-test.txt
@@ -1,18 +1,19 @@
 decorator  # dependency of swh.core[http]
 djangorestframework-stubs
 django-stubs
 django-test-migrations
 hypothesis
 pytest < 7.0.0  # v7.0.0 removed _pytest.tmpdir.TempdirFactory, which is used by some of the pytest plugins we use
 pytest-django
 pytest-mock
 pytest-postgresql
 requests-mock != 1.9.0, != 1.9.1
 swh.core[http] >= 0.0.95
 swh.loader.git >= 0.8.0
 swh-scheduler[testing] >= 0.5.0
 swh.storage >= 0.1.1
+types-chardet
 types-docutils
 types-psycopg2
 types-pyyaml
 types-requests
diff --git a/requirements.txt b/requirements.txt
index e8bc3ef5..7bb1e6e1 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,25 +1,26 @@
 # Add here external Python modules dependencies, one per line. Module names
 # should match https://pypi.python.org/pypi names. For the full spec or
 # dependency lines, see https://pip.readthedocs.org/en/1.1/requirements.html
 
 beautifulsoup4
+chardet
 cryptography
 django < 3
 django-cors-headers
 django-js-reverse
 djangorestframework
 django-webpack-loader
 docutils
 htmlmin
 iso8601
 lxml
 prometheus-client
 pybadges >= 2.2.1
 pygments
 python-magic >= 0.4.0
 python-memcached
 pyyaml
 requests
 sentry-sdk
 typing-extensions
 psycopg2 < 2.9
diff --git a/swh/web/browse/utils.py b/swh/web/browse/utils.py
index a40777c2..8dfc4898 100644
--- a/swh/web/browse/utils.py
+++ b/swh/web/browse/utils.py
@@ -1,714 +1,734 @@
 # Copyright (C) 2017-2021  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU Affero General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import base64
 import stat
 import textwrap
+from typing import Tuple
 
+import chardet
 import magic
 import sentry_sdk
 
 from django.core.cache import cache
 from django.utils.html import escape
 from django.utils.safestring import mark_safe
 
 from swh.web.common import archive, highlightjs
 from swh.web.common.exc import NotFoundExc
 from swh.web.common.utils import (
     browsers_supported_image_mimes,
     format_utc_iso_date,
     reverse,
     rst_to_html,
 )
 from swh.web.config import get_config
 
 
 def get_directory_entries(sha1_git):
     """Function that retrieves the content of a directory
     from the archive.
 
     The directories entries are first sorted in lexicographical order.
     Sub-directories and regular files are then extracted.
 
     Args:
         sha1_git: sha1_git identifier of the directory
 
     Returns:
         A tuple whose first member corresponds to the sub-directories list
         and second member the regular files list
 
     Raises:
         NotFoundExc if the directory is not found
     """
     cache_entry_id = "directory_entries_%s" % sha1_git
     cache_entry = cache.get(cache_entry_id)
 
     if cache_entry:
         return cache_entry
 
     entries = list(archive.lookup_directory(sha1_git))
     for e in entries:
         e["perms"] = stat.filemode(e["perms"])
         if e["type"] == "rev":
             # modify dir entry name to explicitly show it points
             # to a revision
             e["name"] = "%s @ %s" % (e["name"], e["target"][:7])
 
     dirs = [e for e in entries if e["type"] in ("dir", "rev")]
     files = [e for e in entries if e["type"] == "file"]
 
     dirs = sorted(dirs, key=lambda d: d["name"])
     files = sorted(files, key=lambda f: f["name"])
 
     cache.set(cache_entry_id, (dirs, files))
 
     return dirs, files
 
 
 def get_mimetype_and_encoding_for_content(content):
     """Function that returns the mime type and the encoding associated to
     a content buffer using the magic module under the hood.
 
     Args:
         content (bytes): a content buffer
 
     Returns:
         A tuple (mimetype, encoding), for instance ('text/plain', 'us-ascii'),
         associated to the provided content.
 
     """
     m = magic.Magic(mime=True, mime_encoding=True)
     mime_encoding = m.from_buffer(content)
     mime_type, encoding = mime_encoding.split(";")
     encoding = encoding.replace(" charset=", "")
     return mime_type, encoding
 
 
 # maximum authorized content size in bytes for HTML display
 # with code highlighting
 content_display_max_size = get_config()["content_display_max_size"]
 
 
-def _re_encode_content(mimetype, encoding, content_data):
-    # encode textual content to utf-8 if needed
-    if mimetype.startswith("text/"):
-        # probably a malformed UTF-8 content, re-encode it
-        # by replacing invalid chars with a substitution one
-        if encoding == "unknown-8bit":
+def re_encode_content(
+    mimetype: str, encoding: str, content_data: bytes
+) -> Tuple[str, str, bytes]:
+    """Try to re-encode textual content if it is not encoded to UTF-8
+    for proper display in the browse Web UI.
+
+    Args:
+        mimetype: content mimetype as detected by python-magic
+        encoding: content encoding as detected by python-magic
+        content_data: raw content bytes
+
+    Returns:
+        A tuple with 3 members: content mimetype, content encoding (possibly updated
+        after processing), content raw bytes (possibly reencoded to UTF-8)
+    """
+    if mimetype.startswith("text/") and encoding not in ("us-ascii", "utf-8"):
+        # first check if chardet detects an encoding with confidence
+        result = chardet.detect(content_data)
+        if result["confidence"] >= 0.9:
+            encoding = result["encoding"]
+            content_data = content_data.decode(encoding).encode("utf-8")
+        elif encoding == "unknown-8bit":
+            # probably a malformed UTF-8 content, re-encode it
+            # by replacing invalid chars with a substitution one
             content_data = content_data.decode("utf-8", "replace").encode("utf-8")
         elif encoding not in ["utf-8", "binary"]:
             content_data = content_data.decode(encoding, "replace").encode("utf-8")
     elif mimetype.startswith("application/octet-stream"):
         # file may detect a text content as binary
         # so try to decode it for display
         encodings = ["us-ascii", "utf-8"]
         encodings += ["iso-8859-%s" % i for i in range(1, 17)]
         for enc in encodings:
             try:
                 content_data = content_data.decode(enc).encode("utf-8")
             except Exception as exc:
                 sentry_sdk.capture_exception(exc)
             else:
                 # ensure display in content view
                 encoding = enc
                 mimetype = "text/plain"
                 break
     return mimetype, encoding, content_data
 
 
 def request_content(
     query_string, max_size=content_display_max_size, re_encode=True,
 ):
     """Function that retrieves a content from the archive.
 
     Raw bytes content is first retrieved, then the content mime type.
     If the mime type is not stored in the archive, it will be computed
     using Python magic module.
 
     Args:
         query_string: a string of the form "[ALGO_HASH:]HASH" where
             optional ALGO_HASH can be either ``sha1``, ``sha1_git``,
             ``sha256``, or ``blake2s256`` (default to ``sha1``) and HASH
             the hexadecimal representation of the hash value
         max_size: the maximum size for a content to retrieve (default to 1MB,
             no size limit if None)
 
     Returns:
         A tuple whose first member corresponds to the content raw bytes
         and second member the content mime type
 
     Raises:
         NotFoundExc if the content is not found
     """
     content_data = archive.lookup_content(query_string)
     filetype = None
     language = None
     # requests to the indexer db may fail so properly handle
     # those cases in order to avoid content display errors
     try:
         filetype = archive.lookup_content_filetype(query_string)
         language = archive.lookup_content_language(query_string)
     except Exception as exc:
         sentry_sdk.capture_exception(exc)
     mimetype = "unknown"
     encoding = "unknown"
     if filetype:
         mimetype = filetype["mimetype"]
         encoding = filetype["encoding"]
 
     if not max_size or content_data["length"] < max_size:
         try:
             content_raw = archive.lookup_content_raw(query_string)
         except Exception as exc:
             sentry_sdk.capture_exception(exc)
             raise NotFoundExc(
                 "The bytes of the content are currently not available "
                 "in the archive."
             )
         else:
             content_data["raw_data"] = content_raw["data"]
 
             if not filetype:
                 mimetype, encoding = get_mimetype_and_encoding_for_content(
                     content_data["raw_data"]
                 )
 
             if re_encode:
-                mimetype, encoding, raw_data = _re_encode_content(
+                mimetype, encoding, raw_data = re_encode_content(
                     mimetype, encoding, content_data["raw_data"]
                 )
                 content_data["raw_data"] = raw_data
 
     else:
         content_data["raw_data"] = None
 
     content_data["mimetype"] = mimetype
     content_data["encoding"] = encoding
 
     if language:
         content_data["language"] = language["lang"]
     else:
         content_data["language"] = "not detected"
 
     return content_data
 
 
 def prepare_content_for_display(content_data, mime_type, path):
     """Function that prepares a content for HTML display.
 
     The function tries to associate a programming language to a
     content in order to perform syntax highlighting client-side
     using highlightjs. The language is determined using either
     the content filename or its mime type.
     If the mime type corresponds to an image format supported
     by web browsers, the content will be encoded in base64
     for displaying the image.
 
     Args:
         content_data (bytes): raw bytes of the content
         mime_type (string): mime type of the content
         path (string): path of the content including filename
 
     Returns:
         A dict containing the content bytes (possibly different from the one
         provided as parameter if it is an image) under the key 'content_data
         and the corresponding highlightjs language class under the
         key 'language'.
     """
 
     language = None
     if path:
         language = highlightjs.get_hljs_language_from_filename(path.split("/")[-1])
 
     if language is None:
         language = highlightjs.get_hljs_language_from_mime_type(mime_type)
 
     if language is None:
         language = "plaintext"
 
     if mime_type.startswith("image/"):
         if mime_type in browsers_supported_image_mimes:
             content_data = base64.b64encode(content_data).decode("ascii")
 
     if mime_type.startswith("image/svg"):
         mime_type = "image/svg+xml"
 
     if mime_type.startswith("text/") or mime_type.startswith("application/"):
         content_data = content_data.decode("utf-8", errors="replace")
 
     return {"content_data": content_data, "language": language, "mimetype": mime_type}
 
 
 def gen_link(url, link_text=None, link_attrs=None):
     """
     Utility function for generating an HTML link to insert
     in Django templates.
 
     Args:
         url (str): an url
         link_text (str): optional text for the produced link,
             if not provided the url will be used
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         An HTML link in the form '<a href="url">link_text</a>'
 
     """
     attrs = " "
     if link_attrs:
         for k, v in link_attrs.items():
             attrs += '%s="%s" ' % (k, v)
     if not link_text:
         link_text = url
     link = '<a%shref="%s">%s</a>' % (attrs, escape(url), escape(link_text))
     return mark_safe(link)
 
 
 def _snapshot_context_query_params(snapshot_context):
     query_params = {}
     if not snapshot_context:
         return query_params
     if snapshot_context and snapshot_context["origin_info"]:
         origin_info = snapshot_context["origin_info"]
         snp_query_params = snapshot_context["query_params"]
         query_params = {"origin_url": origin_info["url"]}
         if "timestamp" in snp_query_params:
             query_params["timestamp"] = snp_query_params["timestamp"]
         if "visit_id" in snp_query_params:
             query_params["visit_id"] = snp_query_params["visit_id"]
         if "snapshot" in snp_query_params and "visit_id" not in query_params:
             query_params["snapshot"] = snp_query_params["snapshot"]
     elif snapshot_context:
         query_params = {"snapshot": snapshot_context["snapshot_id"]}
 
     if snapshot_context["release"]:
         query_params["release"] = snapshot_context["release"]
     elif snapshot_context["branch"] and snapshot_context["branch"] not in (
         "HEAD",
         snapshot_context["revision_id"],
     ):
         query_params["branch"] = snapshot_context["branch"]
     elif snapshot_context["revision_id"]:
         query_params["revision"] = snapshot_context["revision_id"]
     return query_params
 
 
 def gen_revision_url(revision_id, snapshot_context=None):
     """
     Utility function for generating an url to a revision.
 
     Args:
         revision_id (str): a revision id
         snapshot_context (dict): if provided, generate snapshot-dependent
             browsing url
 
     Returns:
         str: The url to browse the revision
 
     """
     query_params = _snapshot_context_query_params(snapshot_context)
     # remove query parameters not needed for a revision view
     query_params.pop("revision", None)
     query_params.pop("release", None)
 
     return reverse(
         "browse-revision", url_args={"sha1_git": revision_id}, query_params=query_params
     )
 
 
 def gen_revision_link(
     revision_id,
     shorten_id=False,
     snapshot_context=None,
     link_text="Browse",
     link_attrs={"class": "btn btn-default btn-sm", "role": "button"},
 ):
     """
     Utility function for generating a link to a revision HTML view
     to insert in Django templates.
 
     Args:
         revision_id (str): a revision id
         shorten_id (boolean): whether to shorten the revision id to 7
             characters for the link text
         snapshot_context (dict): if provided, generate snapshot-dependent
             browsing link
         link_text (str): optional text for the generated link
             (the revision id will be used by default)
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         str: An HTML link in the form '<a href="revision_url">revision_id</a>'
 
     """
     if not revision_id:
         return None
 
     revision_url = gen_revision_url(revision_id, snapshot_context)
 
     if shorten_id:
         return gen_link(revision_url, revision_id[:7], link_attrs)
     else:
         if not link_text:
             link_text = revision_id
         return gen_link(revision_url, link_text, link_attrs)
 
 
 def gen_directory_link(
     sha1_git,
     snapshot_context=None,
     link_text="Browse",
     link_attrs={"class": "btn btn-default btn-sm", "role": "button"},
 ):
     """
     Utility function for generating a link to a directory HTML view
     to insert in Django templates.
 
     Args:
         sha1_git (str): directory identifier
         link_text (str): optional text for the generated link
             (the directory id will be used by default)
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         An HTML link in the form '<a href="directory_view_url">link_text</a>'
 
     """
     if not sha1_git:
         return None
 
     query_params = _snapshot_context_query_params(snapshot_context)
 
     directory_url = reverse(
         "browse-directory", url_args={"sha1_git": sha1_git}, query_params=query_params
     )
 
     if not link_text:
         link_text = sha1_git
     return gen_link(directory_url, link_text, link_attrs)
 
 
 def gen_snapshot_link(
     snapshot_id,
     snapshot_context=None,
     link_text="Browse",
     link_attrs={"class": "btn btn-default btn-sm", "role": "button"},
 ):
     """
     Utility function for generating a link to a snapshot HTML view
     to insert in Django templates.
 
     Args:
         snapshot_id (str): snapshot identifier
         link_text (str): optional text for the generated link
             (the snapshot id will be used by default)
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         An HTML link in the form '<a href="snapshot_view_url">link_text</a>'
 
     """
 
     query_params = _snapshot_context_query_params(snapshot_context)
 
     snapshot_url = reverse(
         "browse-snapshot",
         url_args={"snapshot_id": snapshot_id},
         query_params=query_params,
     )
     if not link_text:
         link_text = snapshot_id
     return gen_link(snapshot_url, link_text, link_attrs)
 
 
 def gen_content_link(
     sha1_git,
     snapshot_context=None,
     link_text="Browse",
     link_attrs={"class": "btn btn-default btn-sm", "role": "button"},
 ):
     """
     Utility function for generating a link to a content HTML view
     to insert in Django templates.
 
     Args:
         sha1_git (str): content identifier
         link_text (str): optional text for the generated link
             (the content sha1_git will be used by default)
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         An HTML link in the form '<a href="content_view_url">link_text</a>'
 
     """
     if not sha1_git:
         return None
 
     query_params = _snapshot_context_query_params(snapshot_context)
 
     content_url = reverse(
         "browse-content",
         url_args={"query_string": "sha1_git:" + sha1_git},
         query_params=query_params,
     )
     if not link_text:
         link_text = sha1_git
     return gen_link(content_url, link_text, link_attrs)
 
 
 def get_revision_log_url(revision_id, snapshot_context=None):
     """
     Utility function for getting the URL for a revision log HTML view
     (possibly in the context of an origin).
 
     Args:
         revision_id (str): revision identifier the history heads to
         snapshot_context (dict): if provided, generate snapshot-dependent
             browsing link
     Returns:
         The revision log view URL
     """
     query_params = {}
     if snapshot_context:
         query_params = _snapshot_context_query_params(snapshot_context)
 
     query_params["revision"] = revision_id
     if snapshot_context and snapshot_context["origin_info"]:
         revision_log_url = reverse("browse-origin-log", query_params=query_params)
     elif snapshot_context:
         url_args = {"snapshot_id": snapshot_context["snapshot_id"]}
         del query_params["snapshot"]
         revision_log_url = reverse(
             "browse-snapshot-log", url_args=url_args, query_params=query_params
         )
     else:
         revision_log_url = reverse(
             "browse-revision-log", url_args={"sha1_git": revision_id}
         )
     return revision_log_url
 
 
 def gen_revision_log_link(
     revision_id,
     snapshot_context=None,
     link_text="Browse",
     link_attrs={"class": "btn btn-default btn-sm", "role": "button"},
 ):
     """
     Utility function for generating a link to a revision log HTML view
     (possibly in the context of an origin) to insert in Django templates.
 
     Args:
         revision_id (str): revision identifier the history heads to
         snapshot_context (dict): if provided, generate snapshot-dependent
             browsing link
         link_text (str): optional text to use for the generated link
             (the revision id will be used by default)
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         An HTML link in the form
         '<a href="revision_log_view_url">link_text</a>'
     """
     if not revision_id:
         return None
 
     revision_log_url = get_revision_log_url(revision_id, snapshot_context)
 
     if not link_text:
         link_text = revision_id
     return gen_link(revision_log_url, link_text, link_attrs)
 
 
 def gen_person_mail_link(person, link_text=None):
     """
     Utility function for generating a mail link to a person to insert
     in Django templates.
 
     Args:
         person (dict): dictionary containing person data
             (*name*, *email*, *fullname*)
         link_text (str): optional text to use for the generated mail link
             (the person name will be used by default)
 
     Returns:
         str: A mail link to the person or the person name if no email is
             present in person data
     """
     person_name = person["name"] or person["fullname"] or "None"
     if link_text is None:
         link_text = person_name
     person_email = person["email"] if person["email"] else None
     if person_email is None and "@" in person_name and " " not in person_name:
         person_email = person_name
     if person_email:
         return gen_link(url="mailto:%s" % person_email, link_text=link_text)
     else:
         return person_name
 
 
 def gen_release_link(
     sha1_git,
     snapshot_context=None,
     link_text="Browse",
     link_attrs={"class": "btn btn-default btn-sm", "role": "button"},
 ):
     """
     Utility function for generating a link to a release HTML view
     to insert in Django templates.
 
     Args:
         sha1_git (str): release identifier
         link_text (str): optional text for the generated link
             (the release id will be used by default)
         link_attrs (dict): optional attributes (e.g. class)
             to add to the link
 
     Returns:
         An HTML link in the form '<a href="release_view_url">link_text</a>'
 
     """
 
     query_params = _snapshot_context_query_params(snapshot_context)
 
     release_url = reverse(
         "browse-release", url_args={"sha1_git": sha1_git}, query_params=query_params
     )
     if not link_text:
         link_text = sha1_git
     return gen_link(release_url, link_text, link_attrs)
 
 
 def format_log_entries(revision_log, per_page, snapshot_context=None):
     """
     Utility functions that process raw revision log data for HTML display.
     Its purpose is to:
 
         * add links to relevant browse views
         * format date in human readable format
         * truncate the message log
 
     Args:
         revision_log (list): raw revision log as returned by the swh-web api
         per_page (int): number of log entries per page
         snapshot_context (dict): if provided, generate snapshot-dependent
             browsing link
 
 
     """
     revision_log_data = []
     for i, rev in enumerate(revision_log):
         if i == per_page:
             break
         author_name = "None"
         author_fullname = "None"
         committer_fullname = "None"
         if rev["author"]:
             author_name = gen_person_mail_link(rev["author"])
             author_fullname = rev["author"]["fullname"]
         if rev["committer"]:
             committer_fullname = rev["committer"]["fullname"]
         author_date = format_utc_iso_date(rev["date"])
         committer_date = format_utc_iso_date(rev["committer_date"])
 
         tooltip = "revision %s\n" % rev["id"]
         tooltip += "author: %s\n" % author_fullname
         tooltip += "author date: %s\n" % author_date
         tooltip += "committer: %s\n" % committer_fullname
         tooltip += "committer date: %s\n\n" % committer_date
         if rev["message"]:
             tooltip += textwrap.indent(rev["message"], " " * 4)
 
         revision_log_data.append(
             {
                 "author": author_name,
                 "id": rev["id"][:7],
                 "message": rev["message"],
                 "date": author_date,
                 "commit_date": committer_date,
                 "url": gen_revision_url(rev["id"], snapshot_context),
                 "tooltip": tooltip,
             }
         )
     return revision_log_data
 
 
 # list of common readme names ordered by preference
 # (lower indices have higher priority)
 _common_readme_names = [
     "readme.markdown",
     "readme.md",
     "readme.rst",
     "readme.txt",
     "readme",
 ]
 
 
 def get_readme_to_display(readmes):
     """
     Process a list of readme files found in a directory
     in order to find the adequate one to display.
 
     Args:
         readmes: a list of dict where keys are readme file names and values
             are readme sha1s
 
     Returns:
         A tuple (readme_name, readme_sha1)
     """
     readme_name = None
     readme_url = None
     readme_sha1 = None
     readme_html = None
 
     lc_readmes = {k.lower(): {"orig_name": k, "sha1": v} for k, v in readmes.items()}
 
     # look for readme names according to the preference order
     # defined by the _common_readme_names list
     for common_readme_name in _common_readme_names:
         if common_readme_name in lc_readmes:
             readme_name = lc_readmes[common_readme_name]["orig_name"]
             readme_sha1 = lc_readmes[common_readme_name]["sha1"]
             readme_url = reverse(
                 "browse-content-raw",
                 url_args={"query_string": readme_sha1},
                 query_params={"re_encode": "true"},
             )
             break
 
     # otherwise pick the first readme like file if any
     if not readme_name and len(readmes.items()) > 0:
         readme_name = next(iter(readmes))
         readme_sha1 = readmes[readme_name]
         readme_url = reverse(
             "browse-content-raw",
             url_args={"query_string": readme_sha1},
             query_params={"re_encode": "true"},
         )
 
     # convert rst README to html server side as there is
     # no viable solution to perform that task client side
     if readme_name and readme_name.endswith(".rst"):
         cache_entry_id = "readme_%s" % readme_sha1
         cache_entry = cache.get(cache_entry_id)
 
         if cache_entry:
             readme_html = cache_entry
         else:
             try:
                 rst_doc = request_content(readme_sha1)
                 readme_html = rst_to_html(rst_doc["raw_data"])
                 cache.set(cache_entry_id, readme_html)
             except Exception as exc:
                 sentry_sdk.capture_exception(exc)
                 readme_html = "Readme bytes are not available"
 
     return readme_name, readme_url, readme_html
diff --git a/swh/web/tests/browse/test_utils.py b/swh/web/tests/browse/test_utils.py
index 0c93c1d9..be40b22d 100644
--- a/swh/web/tests/browse/test_utils.py
+++ b/swh/web/tests/browse/test_utils.py
@@ -1,89 +1,101 @@
 # Copyright (C) 2017-2021  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU Affero General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import pytest
 
 from swh.web.browse.utils import (
     gen_link,
     gen_person_mail_link,
     gen_revision_link,
     get_mimetype_and_encoding_for_content,
     prepare_content_for_display,
+    re_encode_content,
 )
 from swh.web.common.utils import reverse
 
 
 def test_get_mimetype_and_encoding_for_content():
     text = b"Hello world!"
     assert get_mimetype_and_encoding_for_content(text) == ("text/plain", "us-ascii",)
 
 
 def test_gen_link():
     assert (
         gen_link("https://www.softwareheritage.org/", "swh")
         == '<a href="https://www.softwareheritage.org/">swh</a>'
     )
 
 
 def test_gen_revision_link():
     revision_id = "28a0bc4120d38a394499382ba21d6965a67a3703"
     revision_url = reverse("browse-revision", url_args={"sha1_git": revision_id})
 
     assert gen_revision_link(
         revision_id, link_text=None, link_attrs=None
     ) == '<a href="%s">%s</a>' % (revision_url, revision_id)
     assert gen_revision_link(
         revision_id, shorten_id=True, link_attrs=None
     ) == '<a href="%s">%s</a>' % (revision_url, revision_id[:7])
 
 
 def test_gen_person_mail_link():
     person_full = {
         "name": "John Doe",
         "email": "john.doe@swh.org",
         "fullname": "John Doe <john.doe@swh.org>",
     }
 
     assert gen_person_mail_link(person_full) == '<a href="mailto:%s">%s</a>' % (
         person_full["email"],
         person_full["name"],
     )
 
     link_text = "Mail"
     assert gen_person_mail_link(
         person_full, link_text=link_text
     ) == '<a href="mailto:%s">%s</a>' % (person_full["email"], link_text)
 
     person_partial_email = {"name": None, "email": None, "fullname": "john.doe@swh.org"}
 
     assert gen_person_mail_link(
         person_partial_email
     ) == '<a href="mailto:%s">%s</a>' % (
         person_partial_email["fullname"],
         person_partial_email["fullname"],
     )
 
     person_partial = {
         "name": None,
         "email": None,
         "fullname": "John Doe <john.doe@swh.org>",
     }
 
     assert gen_person_mail_link(person_partial) == person_partial["fullname"]
 
     person_none = {"name": None, "email": None, "fullname": None}
 
     assert gen_person_mail_link(person_none) == "None"
 
 
 @pytest.mark.parametrize(
     "path, expected_language",
     [("CMakeLists.txt", "cmake"), ("path/CMakeLists.txt", "cmake")],
 )
 def test_prepare_content_display_language_for_filename(path, expected_language):
     content_display = prepare_content_for_display(
         content_data=b"", mime_type="", path=path
     )
     assert content_display["language"] == expected_language
+
+
+def test_re_encode_content_for_shift_jis_encoding():
+    data = b"/* \x8a\xd6\x98A\x82\xcc\x95\xb6\x8e\x9a\x83R\x81[\x83h\x95\xcf\x8a\xb7 */"
+    mime_type, encoding = get_mimetype_and_encoding_for_content(data)
+
+    _, encoding, re_encoded_data = re_encode_content(mime_type, encoding, data)
+
+    assert encoding == "SHIFT_JIS"
+    assert data.decode(encoding) == re_encoded_data.decode("utf-8")
+    assert re_encoded_data.decode("utf-8") == "/* 関連の文字コード変換 */"
diff --git a/swh/web/tests/browse/views/test_content.py b/swh/web/tests/browse/views/test_content.py
index e24d6a86..28688d0d 100644
--- a/swh/web/tests/browse/views/test_content.py
+++ b/swh/web/tests/browse/views/test_content.py
@@ -1,1076 +1,1076 @@
 # Copyright (C) 2017-2021  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU Affero General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import random
 import re
 
 import pytest
 
 from django.utils.html import escape
 
 from swh.model.hashutil import hash_to_bytes
 from swh.model.model import ObjectType as ModelObjectType
 from swh.model.model import Release, Snapshot, SnapshotBranch, TargetType
 from swh.model.swhids import ObjectType
 from swh.web.browse.snapshot_context import process_snapshot_branches
 from swh.web.browse.utils import (
-    _re_encode_content,
     get_mimetype_and_encoding_for_content,
     prepare_content_for_display,
+    re_encode_content,
 )
 from swh.web.common.exc import NotFoundExc
 from swh.web.common.identifiers import gen_swhid
 from swh.web.common.utils import (
     format_utc_iso_date,
     gen_path_info,
     parse_iso8601_date_to_utc,
     reverse,
 )
 from swh.web.tests.data import get_content
 from swh.web.tests.django_asserts import assert_contains, assert_not_contains
 from swh.web.tests.utils import check_html_get_response, check_http_get_response
 
 
 def test_content_view_text(client, archive_data, content_text):
     sha1_git = content_text["sha1_git"]
 
     url = reverse(
         "browse-content",
         url_args={"query_string": content_text["sha1"]},
         query_params={"path": content_text["path"]},
     )
 
     url_raw = reverse(
         "browse-content-raw", url_args={"query_string": content_text["sha1"]}
     )
 
     resp = check_html_get_response(
         client, url, status_code=200, template_used="browse/content.html"
     )
 
     content_display = _process_content_for_display(archive_data, content_text)
     mimetype = content_display["mimetype"]
 
     if mimetype.startswith("text/"):
         assert_contains(resp, '<code class="%s">' % content_display["language"])
         assert_contains(resp, escape(content_display["content_data"]))
     assert_contains(resp, url_raw)
 
     swh_cnt_id = gen_swhid(ObjectType.CONTENT, sha1_git)
     swh_cnt_id_url = reverse("browse-swhid", url_args={"swhid": swh_cnt_id})
     assert_contains(resp, swh_cnt_id)
     assert_contains(resp, swh_cnt_id_url)
     assert_not_contains(resp, "swh-metadata-popover")
 
 
 def test_content_view_no_highlight(
     client, archive_data, content_application_no_highlight, content_text_no_highlight
 ):
     for content_ in (content_application_no_highlight, content_text_no_highlight):
         content = content_
         sha1_git = content["sha1_git"]
 
         url = reverse("browse-content", url_args={"query_string": content["sha1"]})
 
         url_raw = reverse(
             "browse-content-raw", url_args={"query_string": content["sha1"]}
         )
 
         resp = check_html_get_response(
             client, url, status_code=200, template_used="browse/content.html"
         )
 
         content_display = _process_content_for_display(archive_data, content)
 
         if content["encoding"] != "binary":
             assert_contains(resp, '<code class="plaintext">')
             assert_contains(resp, escape(content_display["content_data"]))
 
         assert_contains(resp, url_raw)
 
         swh_cnt_id = gen_swhid(ObjectType.CONTENT, sha1_git)
         swh_cnt_id_url = reverse("browse-swhid", url_args={"swhid": swh_cnt_id})
 
         assert_contains(resp, swh_cnt_id)
         assert_contains(resp, swh_cnt_id_url)
 
 
 def test_content_view_no_utf8_text(client, archive_data, content_text_non_utf8):
     sha1_git = content_text_non_utf8["sha1_git"]
 
     url = reverse(
         "browse-content", url_args={"query_string": content_text_non_utf8["sha1"]}
     )
 
     resp = check_html_get_response(
         client, url, status_code=200, template_used="browse/content.html"
     )
 
     content_display = _process_content_for_display(archive_data, content_text_non_utf8)
 
     swh_cnt_id = gen_swhid(ObjectType.CONTENT, sha1_git)
     swh_cnt_id_url = reverse("browse-swhid", url_args={"swhid": swh_cnt_id})
     assert_contains(resp, swh_cnt_id_url)
     assert_contains(resp, escape(content_display["content_data"]))
 
 
 def test_content_view_image(client, archive_data, content_image_type):
     url = reverse(
         "browse-content", url_args={"query_string": content_image_type["sha1"]}
     )
 
     url_raw = reverse(
         "browse-content-raw", url_args={"query_string": content_image_type["sha1"]}
     )
 
     resp = check_html_get_response(
         client, url, status_code=200, template_used="browse/content.html"
     )
 
     content_display = _process_content_for_display(archive_data, content_image_type)
     mimetype = content_display["mimetype"]
     content_data = content_display["content_data"]
     assert_contains(resp, '<img src="data:%s;base64,%s"/>' % (mimetype, content_data))
     assert_contains(resp, url_raw)
 
 
 def test_content_view_image_no_rendering(
     client, archive_data, content_unsupported_image_type_rendering
 ):
     url = reverse(
         "browse-content",
         url_args={"query_string": content_unsupported_image_type_rendering["sha1"]},
     )
 
     resp = check_html_get_response(
         client, url, status_code=200, template_used="browse/content.html"
     )
 
     mimetype = content_unsupported_image_type_rendering["mimetype"]
     encoding = content_unsupported_image_type_rendering["encoding"]
     assert_contains(
         resp,
         (
             f"Content with mime type {mimetype} and encoding {encoding} "
             "cannot be displayed."
         ),
     )
 
 
 def test_content_view_text_with_path(client, archive_data, content_text):
     path = content_text["path"]
 
     url = reverse(
         "browse-content",
         url_args={"query_string": content_text["sha1"]},
         query_params={"path": path},
     )
 
     resp = check_html_get_response(
         client, url, status_code=200, template_used="browse/content.html"
     )
 
     assert_contains(resp, '<nav class="bread-crumbs')
 
     content_display = _process_content_for_display(archive_data, content_text)
     mimetype = content_display["mimetype"]
 
     if mimetype.startswith("text/"):
         hljs_language = content_text["hljs_language"]
         assert_contains(resp, '<code class="%s">' % hljs_language)
         assert_contains(resp, escape(content_display["content_data"]))
 
     split_path = path.split("/")
 
     root_dir_sha1 = split_path[0]
     filename = split_path[-1]
     path = path.replace(root_dir_sha1 + "/", "").replace(filename, "")
 
     swhid_context = {
         "anchor": gen_swhid(ObjectType.DIRECTORY, root_dir_sha1),
         "path": f"/{path}{filename}",
     }
 
     swh_cnt_id = gen_swhid(
         ObjectType.CONTENT, content_text["sha1_git"], metadata=swhid_context
     )
     swh_cnt_id_url = reverse("browse-swhid", url_args={"swhid": swh_cnt_id})
     assert_contains(resp, swh_cnt_id)
     assert_contains(resp, swh_cnt_id_url)
 
     path_info = gen_path_info(path)
 
     root_dir_url = reverse("browse-directory", url_args={"sha1_git": root_dir_sha1})
 
     assert_contains(resp, '<li class="swh-path">', count=len(path_info) + 1)
 
     assert_contains(
         resp, '<a href="' + root_dir_url + '">' + root_dir_sha1[:7] + "</a>"
     )
 
     for p in path_info:
         dir_url = reverse(
             "browse-directory",
             url_args={"sha1_git": root_dir_sha1},
             query_params={"path": p["path"]},
         )
         assert_contains(resp, '<a href="' + dir_url + '">' + p["name"] + "</a>")
 
     assert_contains(resp, "<li>" + filename + "</li>")
 
     url_raw = reverse(
         "browse-content-raw",
         url_args={"query_string": content_text["sha1"]},
         query_params={"filename": filename},
     )
     assert_contains(resp, url_raw)
 
     url = reverse(
         "browse-content",
         url_args={"query_string": content_text["sha1"]},
         query_params={"path": filename},
     )
 
     resp = check_html_get_response(
         client, url, status_code=200, template_used="browse/content.html"
     )
 
     assert_not_contains(resp, '<nav class="bread-crumbs')
 
     invalid_path = "%s/foo/bar/baz" % root_dir_sha1
     url = reverse(
         "browse-content",
         url_args={"query_string": content_text["sha1"]},
         query_params={"path": invalid_path},
     )
 
     resp = check_html_get_response(
         client, url, status_code=404, template_used="error.html"
     )
 
 
 def test_content_raw_text(client, archive_data, content_text):
     url = reverse("browse-content-raw", url_args={"query_string": content_text["sha1"]})
 
     resp = check_http_get_response(
         client, url, status_code=200, content_type="text/plain"
     )
 
     content_data = archive_data.content_get_data(content_text["sha1"])["data"]
 
     assert resp["Content-Type"] == "text/plain"
     assert resp["Content-disposition"] == (
         "filename=%s_%s" % ("sha1", content_text["sha1"])
     )
     assert resp.content == content_data
 
     filename = content_text["path"].split("/")[-1]
 
     url = reverse(
         "browse-content-raw",
         url_args={"query_string": content_text["sha1"]},
         query_params={"filename": filename},
     )
 
     resp = check_http_get_response(
         client, url, status_code=200, content_type="text/plain"
     )
 
     assert resp["Content-Type"] == "text/plain"
     assert resp["Content-disposition"] == "filename=%s" % filename
     assert resp.content == content_data
 
 
 def test_content_raw_no_utf8_text(client, content_text_non_utf8):
     url = reverse(
         "browse-content-raw", url_args={"query_string": content_text_non_utf8["sha1"]}
     )
 
     resp = check_http_get_response(
         client, url, status_code=200, content_type="text/plain"
     )
     _, encoding = get_mimetype_and_encoding_for_content(resp.content)
     assert encoding == content_text_non_utf8["encoding"]
 
 
 def test_content_raw_bin(client, archive_data, content_image_type):
     url = reverse(
         "browse-content-raw", url_args={"query_string": content_image_type["sha1"]}
     )
 
     resp = check_http_get_response(
         client, url, status_code=200, content_type="application/octet-stream"
     )
 
     filename = content_image_type["path"].split("/")[-1]
     content_data = archive_data.content_get_data(content_image_type["sha1"])["data"]
 
     assert resp["Content-Type"] == "application/octet-stream"
     assert resp["Content-disposition"] == "attachment; filename=%s_%s" % (
         "sha1",
         content_image_type["sha1"],
     )
     assert resp.content == content_data
 
     url = reverse(
         "browse-content-raw",
         url_args={"query_string": content_image_type["sha1"]},
         query_params={"filename": filename},
     )
 
     resp = check_http_get_response(
         client, url, status_code=200, content_type="application/octet-stream"
     )
 
     assert resp["Content-Type"] == "application/octet-stream"
     assert resp["Content-disposition"] == "attachment; filename=%s" % filename
     assert resp.content == content_data
 
 
 @pytest.mark.django_db
 @pytest.mark.parametrize("staff_user_logged_in", [False, True])
 def test_content_request_errors(
     client, staff_user, invalid_sha1, unknown_content, staff_user_logged_in
 ):
 
     if staff_user_logged_in:
         client.force_login(staff_user)
 
     url = reverse("browse-content", url_args={"query_string": invalid_sha1})
     check_html_get_response(client, url, status_code=400, template_used="error.html")
 
     url = reverse("browse-content", url_args={"query_string": unknown_content["sha1"]})
     check_html_get_response(
         client, url, status_code=404, template_used="browse/content.html"
     )
 
 
 def test_content_bytes_missing(client, archive_data, mocker, content):
     mock_archive = mocker.patch("swh.web.browse.utils.archive")
     content_data = archive_data.content_get(content["sha1"])
 
     mock_archive.lookup_content.return_value = content_data
     mock_archive.lookup_content_filetype.side_effect = Exception()
     mock_archive.lookup_content_raw.side_effect = NotFoundExc(
         "Content bytes not available!"
     )
 
     url = reverse("browse-content", url_args={"query_string": content["sha1"]})
 
     check_html_get_response(
         client, url, status_code=404, template_used="browse/content.html"
     )
 
 
 def test_content_too_large(client, mocker):
     mock_request_content = mocker.patch("swh.web.browse.views.content.request_content")
     stub_content_too_large_data = {
         "checksums": {
             "sha1": "8624bcdae55baeef00cd11d5dfcfa60f68710a02",
             "sha1_git": "94a9ed024d3859793618152ea559a168bbcbb5e2",
             "sha256": (
                 "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b16" "5a1dcd80c7c545eb65b903"
             ),
             "blake2s256": (
                 "38702b7168c7785bfe748b51b45d9856070ba90" "f9dc6d90f2ea75d4356411ffe"
             ),
         },
         "length": 30000000,
         "raw_data": None,
         "mimetype": "text/plain",
         "encoding": "us-ascii",
         "language": "not detected",
         "licenses": "GPL",
         "error_code": 200,
         "error_message": "",
         "error_description": "",
     }
 
     content_sha1 = stub_content_too_large_data["checksums"]["sha1"]
 
     mock_request_content.return_value = stub_content_too_large_data
 
     url = reverse("browse-content", url_args={"query_string": content_sha1})
 
     url_raw = reverse("browse-content-raw", url_args={"query_string": content_sha1})
 
     resp = check_html_get_response(
         client, url, status_code=200, template_used="browse/content.html"
     )
 
     assert_contains(resp, "Content is too large to be displayed")
     assert_contains(resp, url_raw)
 
 
 def test_content_uppercase(client, content):
     url = reverse(
         "browse-content-uppercase-checksum",
         url_args={"query_string": content["sha1"].upper()},
     )
 
     resp = check_html_get_response(client, url, status_code=302)
 
     redirect_url = reverse("browse-content", url_args={"query_string": content["sha1"]})
 
     assert resp["location"] == redirect_url
 
 
 def test_content_utf8_detected_as_binary_display(
     client, archive_data, content_utf8_detected_as_binary
 ):
     url = reverse(
         "browse-content",
         url_args={"query_string": content_utf8_detected_as_binary["sha1"]},
     )
 
     resp = check_html_get_response(
         client, url, status_code=200, template_used="browse/content.html"
     )
 
     content_display = _process_content_for_display(
         archive_data, content_utf8_detected_as_binary
     )
 
     assert_contains(resp, escape(content_display["content_data"]))
 
 
 def test_content_origin_snapshot_branch_browse(
     client, archive_data, origin_with_multiple_visits
 ):
     origin_url = origin_with_multiple_visits["url"]
     visits = archive_data.origin_visit_get(origin_url)
     visit = random.choice(visits)
     snapshot = archive_data.snapshot_get(visit["snapshot"])
     snapshot_sizes = archive_data.snapshot_count_branches(visit["snapshot"])
     branches, releases, _ = process_snapshot_branches(snapshot)
     branch_info = random.choice(branches)
 
     directory = archive_data.revision_get(branch_info["revision"])["directory"]
     directory_content = archive_data.directory_ls(directory)
     directory_file = random.choice(
         [e for e in directory_content if e["type"] == "file"]
     )
 
     url = reverse(
         "browse-content",
         url_args={"query_string": directory_file["checksums"]["sha1"]},
         query_params={
             "origin_url": origin_with_multiple_visits["url"],
             "snapshot": snapshot["id"],
             "branch": branch_info["name"],
             "path": directory_file["name"],
         },
     )
 
     resp = check_html_get_response(
         client, url, status_code=200, template_used="browse/content.html"
     )
 
     _check_origin_snapshot_related_html(
         resp, origin_with_multiple_visits, snapshot, snapshot_sizes, branches, releases
     )
     assert_contains(resp, directory_file["name"])
     assert_contains(resp, f"Branch: <strong>{branch_info['name']}</strong>")
 
     cnt_swhid = gen_swhid(
         ObjectType.CONTENT,
         directory_file["checksums"]["sha1_git"],
         metadata={
             "origin": origin_url,
             "visit": gen_swhid(ObjectType.SNAPSHOT, snapshot["id"]),
             "anchor": gen_swhid(ObjectType.REVISION, branch_info["revision"]),
             "path": f"/{directory_file['name']}",
         },
     )
     assert_contains(resp, cnt_swhid)
 
     dir_swhid = gen_swhid(
         ObjectType.DIRECTORY,
         directory,
         metadata={
             "origin": origin_url,
             "visit": gen_swhid(ObjectType.SNAPSHOT, snapshot["id"]),
             "anchor": gen_swhid(ObjectType.REVISION, branch_info["revision"]),
         },
     )
     assert_contains(resp, dir_swhid)
 
     rev_swhid = gen_swhid(
         ObjectType.REVISION,
         branch_info["revision"],
         metadata={
             "origin": origin_url,
             "visit": gen_swhid(ObjectType.SNAPSHOT, snapshot["id"]),
         },
     )
     assert_contains(resp, rev_swhid)
 
     snp_swhid = gen_swhid(
         ObjectType.SNAPSHOT, snapshot["id"], metadata={"origin": origin_url,},
     )
     assert_contains(resp, snp_swhid)
 
 
 def test_content_origin_snapshot_release_browse(
     client, archive_data, origin_with_multiple_visits
 ):
     origin_url = origin_with_multiple_visits["url"]
     visits = archive_data.origin_visit_get(origin_url)
     visit = random.choice(visits)
     snapshot = archive_data.snapshot_get(visit["snapshot"])
     snapshot_sizes = archive_data.snapshot_count_branches(visit["snapshot"])
     branches, releases, _ = process_snapshot_branches(snapshot)
     release_info = random.choice(releases)
 
     directory_content = archive_data.directory_ls(release_info["directory"])
     directory_file = random.choice(
         [e for e in directory_content if e["type"] == "file"]
     )
 
     url = reverse(
         "browse-content",
         url_args={"query_string": directory_file["checksums"]["sha1"]},
         query_params={
             "origin_url": origin_url,
             "snapshot": snapshot["id"],
             "release": release_info["name"],
             "path": directory_file["name"],
         },
     )
 
     resp = check_html_get_response(
         client, url, status_code=200, template_used="browse/content.html"
     )
 
     _check_origin_snapshot_related_html(
         resp, origin_with_multiple_visits, snapshot, snapshot_sizes, branches, releases
     )
     assert_contains(resp, directory_file["name"])
     assert_contains(resp, f"Release: <strong>{release_info['name']}</strong>")
 
     cnt_swhid = gen_swhid(
         ObjectType.CONTENT,
         directory_file["checksums"]["sha1_git"],
         metadata={
             "origin": origin_url,
             "visit": gen_swhid(ObjectType.SNAPSHOT, snapshot["id"]),
             "anchor": gen_swhid(ObjectType.RELEASE, release_info["id"]),
             "path": f"/{directory_file['name']}",
         },
     )
     assert_contains(resp, cnt_swhid)
 
     dir_swhid = gen_swhid(
         ObjectType.DIRECTORY,
         release_info["directory"],
         metadata={
             "origin": origin_url,
             "visit": gen_swhid(ObjectType.SNAPSHOT, snapshot["id"]),
             "anchor": gen_swhid(ObjectType.RELEASE, release_info["id"]),
         },
     )
     assert_contains(resp, dir_swhid)
 
     rev_swhid = gen_swhid(
         ObjectType.REVISION,
         release_info["target"],
         metadata={
             "origin": origin_url,
             "visit": gen_swhid(ObjectType.SNAPSHOT, snapshot["id"]),
         },
     )
     assert_contains(resp, rev_swhid)
 
     rel_swhid = gen_swhid(
         ObjectType.RELEASE,
         release_info["id"],
         metadata={
             "origin": origin_url,
             "visit": gen_swhid(ObjectType.SNAPSHOT, snapshot["id"]),
         },
     )
     assert_contains(resp, rel_swhid)
 
     snp_swhid = gen_swhid(
         ObjectType.SNAPSHOT, snapshot["id"], metadata={"origin": origin_url,},
     )
     assert_contains(resp, snp_swhid)
 
 
 def _check_origin_snapshot_related_html(
     resp, origin, snapshot, snapshot_sizes, branches, releases
 ):
     browse_origin_url = reverse(
         "browse-origin", query_params={"origin_url": origin["url"]}
     )
     assert_contains(resp, f'href="{browse_origin_url}"')
 
     origin_branches_url = reverse(
         "browse-origin-branches",
         query_params={"origin_url": origin["url"], "snapshot": snapshot["id"]},
     )
 
     assert_contains(resp, f'href="{escape(origin_branches_url)}"')
     assert_contains(resp, f"Branches ({snapshot_sizes['revision']})")
 
     origin_releases_url = reverse(
         "browse-origin-releases",
         query_params={"origin_url": origin["url"], "snapshot": snapshot["id"]},
     )
 
     assert_contains(resp, f'href="{escape(origin_releases_url)}"')
     assert_contains(resp, f"Releases ({snapshot_sizes['release']})")
 
     assert_contains(resp, '<li class="swh-branch">', count=len(branches))
     assert_contains(resp, '<li class="swh-release">', count=len(releases))
 
 
 def _process_content_for_display(archive_data, content):
     content_data = archive_data.content_get_data(content["sha1"])
 
     mime_type, encoding = get_mimetype_and_encoding_for_content(content_data["data"])
 
-    mime_type, encoding, content_data = _re_encode_content(
+    mime_type, encoding, content_data = re_encode_content(
         mime_type, encoding, content_data["data"]
     )
 
     content_display = prepare_content_for_display(
         content_data, mime_type, content["path"]
     )
 
     assert type(content_display["content_data"]) == str
 
     return content_display
 
 
 def test_content_dispaly_empty_query_string_missing_path(client):
     url = reverse("browse-content", query_params={"origin_url": "http://example.com"},)
     resp = check_html_get_response(
         client, url, status_code=400, template_used="error.html"
     )
     assert_contains(resp, "The path query parameter must be provided.", status_code=400)
 
 
 def test_content_dispaly_empty_query_string_and_snapshot_origin(client):
     url = reverse("browse-content", query_params={"path": "test.txt"},)
     resp = check_html_get_response(client, url, status_code=400,)
     assert_contains(
         resp,
         "The origin_url or snapshot query parameters must be provided.",
         status_code=400,
     )
 
 
 def test_content_dispaly_empty_query_string_with_origin(
     client, archive_data, origin_with_multiple_visits
 ):
     origin_url = origin_with_multiple_visits["url"]
     snapshot = archive_data.snapshot_get_latest(origin_url)
     head_rev_id = archive_data.snapshot_get_head(snapshot)
     head_rev = archive_data.revision_get(head_rev_id)
     dir_content = archive_data.directory_ls(head_rev["directory"])
     dir_files = [e for e in dir_content if e["type"] == "file"]
     dir_file = random.choice(dir_files)
 
     url = reverse(
         "browse-content",
         query_params={"origin_url": origin_url, "path": dir_file["name"],},
     )
 
     resp = check_html_get_response(client, url, status_code=302,)
     redict_url = reverse(
         "browse-content",
         url_args={"query_string": f"sha1_git:{dir_file['checksums']['sha1_git']}"},
         query_params={"origin_url": origin_url, "path": dir_file["name"],},
     )
     assert resp.url == redict_url
 
 
 def test_content_dispaly_empty_query_string_with_snapshot(
     client, archive_data, origin_with_multiple_visits
 ):
     origin_url = origin_with_multiple_visits["url"]
     snapshot = archive_data.snapshot_get_latest(origin_url)
     head_rev_id = archive_data.snapshot_get_head(snapshot)
     head_rev = archive_data.revision_get(head_rev_id)
     dir_content = archive_data.directory_ls(head_rev["directory"])
     dir_files = [e for e in dir_content if e["type"] == "file"]
     dir_file = random.choice(dir_files)
     url = reverse(
         "browse-content",
         query_params={"snapshot": snapshot["id"], "path": dir_file["name"],},
     )
 
     resp = check_html_get_response(client, url, status_code=302,)
     redict_url = reverse(
         "browse-content",
         url_args={"query_string": f"sha1_git:{dir_file['checksums']['sha1_git']}"},
         query_params={"snapshot": snapshot["id"], "path": dir_file["name"],},
     )
     assert resp.url == redict_url
 
 
 def test_browse_origin_content_no_visit(client, mocker, origin):
     mock_get_origin_visits = mocker.patch(
         "swh.web.common.origin_visits.get_origin_visits"
     )
     mock_get_origin_visits.return_value = []
     mock_archive = mocker.patch("swh.web.common.origin_visits.archive")
     mock_archive.lookup_origin_visit_latest.return_value = None
     url = reverse(
         "browse-content", query_params={"origin_url": origin["url"], "path": "foo"},
     )
 
     resp = check_html_get_response(
         client, url, status_code=404, template_used="error.html"
     )
     assert_contains(resp, "No valid visit", status_code=404)
     assert not mock_get_origin_visits.called
 
 
 def test_browse_origin_content_unknown_visit(client, mocker, origin):
     mock_get_origin_visits = mocker.patch(
         "swh.web.common.origin_visits.get_origin_visits"
     )
     mock_get_origin_visits.return_value = [{"visit": 1}]
 
     url = reverse(
         "browse-content",
         query_params={"origin_url": origin["url"], "path": "foo", "visit_id": 2},
     )
 
     resp = check_html_get_response(
         client, url, status_code=404, template_used="error.html"
     )
     assert re.search("Resource not found", resp.content.decode("utf-8"))
 
 
 def test_browse_origin_content_not_found(client, origin):
     url = reverse(
         "browse-content",
         query_params={"origin_url": origin["url"], "path": "/invalid/file/path"},
     )
 
     resp = check_html_get_response(
         client, url, status_code=404, template_used="error.html"
     )
     assert re.search("Resource not found", resp.content.decode("utf-8"))
 
 
 def test_browse_content_invalid_origin(client):
     url = reverse(
         "browse-content",
         query_params={
             "origin_url": "http://invalid-origin",
             "path": "/invalid/file/path",
         },
     )
 
     resp = check_html_get_response(
         client, url, status_code=404, template_used="error.html"
     )
     assert re.search("Resource not found", resp.content.decode("utf-8"))
 
 
 def test_origin_content_view(
     client, archive_data, swh_scheduler, origin_with_multiple_visits
 ):
     origin_visits = archive_data.origin_visit_get(origin_with_multiple_visits["url"])
 
     def _get_archive_data(visit_idx):
         snapshot = archive_data.snapshot_get(origin_visits[visit_idx]["snapshot"])
         head_rev_id = archive_data.snapshot_get_head(snapshot)
         head_rev = archive_data.revision_get(head_rev_id)
         dir_content = archive_data.directory_ls(head_rev["directory"])
         dir_files = [e for e in dir_content if e["type"] == "file"]
         dir_file = random.choice(dir_files)
         branches, releases, _ = process_snapshot_branches(snapshot)
         return {
             "branches": branches,
             "releases": releases,
             "root_dir_sha1": head_rev["directory"],
             "content": get_content(dir_file["checksums"]["sha1"]),
             "visit": origin_visits[visit_idx],
             "snapshot_sizes": archive_data.snapshot_count_branches(snapshot["id"]),
         }
 
     tdata = _get_archive_data(-1)
 
     _origin_content_view_test_helper(
         client,
         archive_data,
         origin_with_multiple_visits,
         origin_visits[-1],
         tdata["snapshot_sizes"],
         tdata["branches"],
         tdata["releases"],
         tdata["root_dir_sha1"],
         tdata["content"],
     )
 
     _origin_content_view_test_helper(
         client,
         archive_data,
         origin_with_multiple_visits,
         origin_visits[-1],
         tdata["snapshot_sizes"],
         tdata["branches"],
         tdata["releases"],
         tdata["root_dir_sha1"],
         tdata["content"],
         timestamp=tdata["visit"]["date"],
     )
 
     _origin_content_view_test_helper(
         client,
         archive_data,
         origin_with_multiple_visits,
         origin_visits[-1],
         tdata["snapshot_sizes"],
         tdata["branches"],
         tdata["releases"],
         tdata["root_dir_sha1"],
         tdata["content"],
         snapshot_id=tdata["visit"]["snapshot"],
     )
 
     tdata = _get_archive_data(0)
 
     _origin_content_view_test_helper(
         client,
         archive_data,
         origin_with_multiple_visits,
         origin_visits[0],
         tdata["snapshot_sizes"],
         tdata["branches"],
         tdata["releases"],
         tdata["root_dir_sha1"],
         tdata["content"],
         visit_id=tdata["visit"]["visit"],
     )
 
     _origin_content_view_test_helper(
         client,
         archive_data,
         origin_with_multiple_visits,
         origin_visits[0],
         tdata["snapshot_sizes"],
         tdata["branches"],
         tdata["releases"],
         tdata["root_dir_sha1"],
         tdata["content"],
         snapshot_id=tdata["visit"]["snapshot"],
     )
 
 
 def _origin_content_view_test_helper(
     client,
     archive_data,
     origin_info,
     origin_visit,
     snapshot_sizes,
     origin_branches,
     origin_releases,
     root_dir_sha1,
     content,
     visit_id=None,
     timestamp=None,
     snapshot_id=None,
 ):
     content_path = "/".join(content["path"].split("/")[1:])
 
     if not visit_id and not snapshot_id:
         visit_id = origin_visit["visit"]
 
     query_params = {"origin_url": origin_info["url"], "path": content_path}
 
     if timestamp:
         query_params["timestamp"] = timestamp
 
     if visit_id:
         query_params["visit_id"] = visit_id
     elif snapshot_id:
         query_params["snapshot"] = snapshot_id
 
     url = reverse(
         "browse-content",
         url_args={"query_string": f"sha1_git:{content['sha1_git']}"},
         query_params=query_params,
     )
 
     resp = check_html_get_response(
         client, url, status_code=200, template_used="browse/content.html"
     )
 
     assert type(content["data"]) == str
 
     assert_contains(resp, '<code class="%s">' % content["hljs_language"])
     assert_contains(resp, escape(content["data"]))
 
     split_path = content_path.split("/")
 
     filename = split_path[-1]
     path = content_path.replace(filename, "")[:-1]
 
     path_info = gen_path_info(path)
 
     del query_params["path"]
 
     if timestamp:
         query_params["timestamp"] = format_utc_iso_date(
             parse_iso8601_date_to_utc(timestamp).isoformat(), "%Y-%m-%dT%H:%M:%SZ"
         )
 
     root_dir_url = reverse(
         "browse-directory",
         url_args={"sha1_git": root_dir_sha1},
         query_params=query_params,
     )
 
     assert_contains(resp, '<li class="swh-path">', count=len(path_info) + 1)
 
     assert_contains(resp, '<a href="%s">%s</a>' % (root_dir_url, root_dir_sha1[:7]))
 
     for p in path_info:
         query_params["path"] = p["path"]
         dir_url = reverse("browse-origin-directory", query_params=query_params)
         assert_contains(resp, '<a href="%s">%s</a>' % (dir_url, p["name"]))
 
     assert_contains(resp, "<li>%s</li>" % filename)
 
     query_string = "sha1_git:" + content["sha1_git"]
 
     url_raw = reverse(
         "browse-content-raw",
         url_args={"query_string": query_string},
         query_params={"filename": filename},
     )
     assert_contains(resp, url_raw)
 
     if "path" in query_params:
         del query_params["path"]
 
     origin_branches_url = reverse("browse-origin-branches", query_params=query_params)
 
     assert_contains(resp, f'href="{escape(origin_branches_url)}"')
     assert_contains(resp, f"Branches ({snapshot_sizes['revision']})")
 
     origin_releases_url = reverse("browse-origin-releases", query_params=query_params)
 
     assert_contains(resp, f'href="{escape(origin_releases_url)}">')
     assert_contains(resp, f"Releases ({snapshot_sizes['release']})")
 
     assert_contains(resp, '<li class="swh-branch">', count=len(origin_branches))
 
     query_params["path"] = content_path
 
     for branch in origin_branches:
         root_dir_branch_url = reverse(
             "browse-origin-content",
             query_params={"branch": branch["name"], **query_params},
         )
 
         assert_contains(resp, '<a href="%s">' % root_dir_branch_url)
 
     assert_contains(resp, '<li class="swh-release">', count=len(origin_releases))
 
     query_params["branch"] = None
     for release in origin_releases:
         root_dir_release_url = reverse(
             "browse-origin-content",
             query_params={"release": release["name"], **query_params},
         )
 
         assert_contains(resp, '<a href="%s">' % root_dir_release_url)
 
     url = reverse(
         "browse-content",
         url_args={"query_string": query_string},
         query_params=query_params,
     )
 
     resp = check_html_get_response(
         client, url, status_code=200, template_used="browse/content.html"
     )
 
     snapshot = archive_data.snapshot_get(origin_visit["snapshot"])
     head_rev_id = archive_data.snapshot_get_head(snapshot)
 
     swhid_context = {
         "origin": origin_info["url"],
         "visit": gen_swhid(ObjectType.SNAPSHOT, snapshot["id"]),
         "anchor": gen_swhid(ObjectType.REVISION, head_rev_id),
         "path": f"/{content_path}",
     }
 
     swh_cnt_id = gen_swhid(
         ObjectType.CONTENT, content["sha1_git"], metadata=swhid_context
     )
     swh_cnt_id_url = reverse("browse-swhid", url_args={"swhid": swh_cnt_id})
     assert_contains(resp, swh_cnt_id)
     assert_contains(resp, swh_cnt_id_url)
 
     assert_contains(resp, "swh-take-new-snapshot")
 
     _check_origin_link(resp, origin_info["url"])
 
     assert_not_contains(resp, "swh-metadata-popover")
 
 
 def _check_origin_link(resp, origin_url):
     browse_origin_url = reverse(
         "browse-origin", query_params={"origin_url": origin_url}
     )
     assert_contains(resp, f'href="{browse_origin_url}"')
 
 
 @pytest.mark.django_db
 @pytest.mark.parametrize("staff_user_logged_in", [False, True])
 def test_browse_content_snapshot_context_release_directory_target(
     client, staff_user, archive_data, directory_with_files, staff_user_logged_in
 ):
 
     if staff_user_logged_in:
         client.force_login(staff_user)
 
     release_name = "v1.0.0"
     release = Release(
         name=release_name.encode(),
         message=f"release {release_name}".encode(),
         target=hash_to_bytes(directory_with_files),
         target_type=ModelObjectType.DIRECTORY,
         synthetic=True,
     )
     archive_data.release_add([release])
 
     snapshot = Snapshot(
         branches={
             release_name.encode(): SnapshotBranch(
                 target=release.id, target_type=TargetType.RELEASE
             ),
         },
     )
     archive_data.snapshot_add([snapshot])
 
     dir_content = archive_data.directory_ls(directory_with_files)
     file_entry = random.choice(
         [entry for entry in dir_content if entry["type"] == "file"]
     )
 
     sha1_git = file_entry["checksums"]["sha1_git"]
 
     browse_url = reverse(
         "browse-content",
         url_args={"query_string": f"sha1_git:{sha1_git}"},
         query_params={
             "path": file_entry["name"],
             "release": release_name,
             "snapshot": snapshot.id.hex(),
         },
     )
 
     check_html_get_response(
         client, browse_url, status_code=200, template_used="browse/content.html"
     )
diff --git a/swh/web/tests/data.py b/swh/web/tests/data.py
index f9051bd1..c5201a30 100644
--- a/swh/web/tests/data.py
+++ b/swh/web/tests/data.py
@@ -1,544 +1,544 @@
 # Copyright (C) 2018-2021  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU Affero General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 from copy import deepcopy
 from datetime import timedelta
 import os
 from pathlib import Path
 import random
 import time
 from typing import Dict, List, Optional, Set
 
 from swh.core.config import merge_configs
 from swh.counters import get_counters
 from swh.indexer.ctags import CtagsIndexer
 from swh.indexer.fossology_license import FossologyLicenseIndexer
 from swh.indexer.mimetype import MimetypeIndexer
 from swh.indexer.storage import get_indexer_storage
 from swh.indexer.storage.model import OriginIntrinsicMetadataRow
 from swh.loader.git.from_disk import GitLoaderFromArchive
 from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_hex
 from swh.model.model import (
     Content,
     Directory,
     Origin,
     OriginVisit,
     OriginVisitStatus,
     Snapshot,
 )
 from swh.model.swhids import CoreSWHID, ObjectType, QualifiedSWHID
 from swh.search import get_search
 from swh.storage import get_storage
 from swh.storage.algos.dir_iterators import dir_iterator
 from swh.storage.algos.snapshot import snapshot_get_latest
 from swh.storage.interface import Sha1
 from swh.storage.utils import now
 from swh.web import config
 from swh.web.browse.utils import (
-    _re_encode_content,
     get_mimetype_and_encoding_for_content,
     prepare_content_for_display,
+    re_encode_content,
 )
 from swh.web.common import archive
 
 # Module used to initialize data that will be provided as tests input
 
 # Base content indexer configuration
 _TEST_INDEXER_BASE_CONFIG = {
     "storage": {"cls": "memory"},
     "objstorage": {"cls": "memory", "args": {},},
     "indexer_storage": {"cls": "memory", "args": {},},
 }
 
 
 def random_sha1():
     return hash_to_hex(bytes(random.randint(0, 255) for _ in range(20)))
 
 
 def random_sha256():
     return hash_to_hex(bytes(random.randint(0, 255) for _ in range(32)))
 
 
 def random_blake2s256():
     return hash_to_hex(bytes(random.randint(0, 255) for _ in range(32)))
 
 
 def random_content():
     return {
         "sha1": random_sha1(),
         "sha1_git": random_sha1(),
         "sha256": random_sha256(),
         "blake2s256": random_blake2s256(),
     }
 
 
 _TEST_MIMETYPE_INDEXER_CONFIG = merge_configs(
     _TEST_INDEXER_BASE_CONFIG,
     {
         "tools": {
             "name": "file",
             "version": "1:5.30-1+deb9u1",
             "configuration": {"type": "library", "debian-package": "python3-magic"},
         }
     },
 )
 
 
 _TEST_LICENSE_INDEXER_CONFIG = merge_configs(
     _TEST_INDEXER_BASE_CONFIG,
     {
         "workdir": "/tmp/swh/indexer.fossology.license",
         "tools": {
             "name": "nomos",
             "version": "3.1.0rc2-31-ga2cbb8c",
             "configuration": {"command_line": "nomossa <filepath>",},
         },
     },
 )
 
 
 _TEST_CTAGS_INDEXER_CONFIG = merge_configs(
     _TEST_INDEXER_BASE_CONFIG,
     {
         "workdir": "/tmp/swh/indexer.ctags",
         "languages": {"c": "c"},
         "tools": {
             "name": "universal-ctags",
             "version": "~git7859817b",
             "configuration": {
                 "command_line": """ctags --fields=+lnz --sort=no --links=no """
                 """--output-format=json <filepath>"""
             },
         },
     },
 )
 
 
 # Lightweight git repositories that will be loaded to generate
 # input data for tests
 _TEST_ORIGINS = [
     {
         "type": "git",
         "url": "https://github.com/memononen/libtess2",
         "archives": ["libtess2.zip"],
         "metadata": {
             "@context": "https://doi.org/10.5063/schema/codemeta-2.0",
             "description": (
                 "Game and tools oriented refactored version of GLU tessellator."
             ),
         },
     },
     {
         "type": "git",
         "url": "https://github.com/wcoder/highlightjs-line-numbers.js",
         "archives": [
             "highlightjs-line-numbers.js.zip",
             "highlightjs-line-numbers.js_visit2.zip",
         ],
         "metadata": {
             "@context": "https://doi.org/10.5063/schema/codemeta-2.0",
             "description": "Line numbering plugin for Highlight.js",
         },
     },
     {
         "type": "git",
         "url": "repo_with_submodules",
         "archives": ["repo_with_submodules.tgz"],
         "metadata": {
             "@context": "https://doi.org/10.5063/schema/codemeta-2.0",
             "description": "This is just a sample repository with submodules",
         },
     },
 ]
 
 _contents = {}
 
 
 def _add_extra_contents(storage, contents):
     pbm_image_data = b"""P1
 # PBM example
 24 7
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 1 1 1 1 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 1 1 1 0
 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0
 0 1 1 1 0 0 0 1 1 1 0 0 0 1 1 1 0 0 0 1 1 1 1 0
 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0
 0 1 0 0 0 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"""
 
     # add file with mimetype image/x-portable-bitmap in the archive content
     pbm_content = Content.from_data(pbm_image_data)
     storage.content_add([pbm_content])
     contents.add(pbm_content.sha1)
 
     # add file with mimetype application/pgp-keys in the archive content
     gpg_path = os.path.join(
         os.path.dirname(__file__), "resources/contents/other/extensions/public.gpg"
     )
     gpg_content = Content.from_data(Path(gpg_path).read_bytes())
     storage.content_add([gpg_content])
     contents.add(gpg_content.sha1)
 
 
 INDEXER_TOOL = {
     "tool_name": "swh-web tests",
     "tool_version": "1.0",
     "tool_configuration": {},
 }
 
 ORIGIN_METADATA_KEY = "keywords"
 ORIGIN_METADATA_VALUE = "git"
 
 ORIGIN_MASTER_REVISION = {}
 
 
 def _add_origin(
     storage, search, counters, origin_url, visit_type="git", snapshot_branches={}
 ):
     storage.origin_add([Origin(url=origin_url)])
     search.origin_update(
         [{"url": origin_url, "has_visits": True, "visit_types": [visit_type]}]
     )
     counters.add("origin", [origin_url])
     date = now()
     visit = OriginVisit(origin=origin_url, date=date, type=visit_type)
     visit = storage.origin_visit_add([visit])[0]
     counters.add("origin_visit", [f"{visit.unique_key()}"])
     snapshot = Snapshot.from_dict({"branches": snapshot_branches})
     storage.snapshot_add([snapshot])
     counters.add("snapshot", [snapshot.id])
     visit_status = OriginVisitStatus(
         origin=origin_url,
         visit=visit.visit,
         date=date + timedelta(minutes=1),
         type=visit.type,
         status="full",
         snapshot=snapshot.id,
     )
     storage.origin_visit_status_add([visit_status])
     counters.add("origin_visit_status", [f"{visit_status.unique_key()}"])
 
 
 # Tests data initialization
 def _init_tests_data():
     # To hold reference to the memory storage
     storage = get_storage("memory")
 
     # Create search instance
     search = get_search("memory")
     search.initialize()
     search.origin_update({"url": origin["url"]} for origin in _TEST_ORIGINS)
 
     # create the counters instance
     counters = get_counters("memory")
 
     # Create indexer storage instance that will be shared by indexers
     idx_storage = get_indexer_storage("memory")
 
     # Declare a test tool for origin intrinsic metadata tests
     idx_tool = idx_storage.indexer_configuration_add([INDEXER_TOOL])[0]
     INDEXER_TOOL["id"] = idx_tool["id"]
 
     # Load git repositories from archives
     for origin in _TEST_ORIGINS:
         for i, archive_ in enumerate(origin["archives"]):
             if i > 0:
                 # ensure visit dates will be different when simulating
                 # multiple visits of an origin
                 time.sleep(1)
             origin_repo_archive = os.path.join(
                 os.path.dirname(__file__), "resources/repos/%s" % archive_
             )
             loader = GitLoaderFromArchive(
                 storage, origin["url"], archive_path=origin_repo_archive,
             )
 
             result = loader.load()
             assert result["status"] == "eventful"
 
         ori = storage.origin_get([origin["url"]])[0]
         origin.update(ori.to_dict())  # add an 'id' key if enabled
         search.origin_update(
             [{"url": origin["url"], "has_visits": True, "visit_types": ["git"]}]
         )
 
     for i in range(250):
         _add_origin(
             storage,
             search,
             counters,
             origin_url=f"https://many.origins/{i+1}",
             visit_type="tar",
         )
 
     sha1s: Set[Sha1] = set()
     directories = set()
     revisions = set()
     releases = set()
     snapshots = set()
     swhids = []
 
     content_path = {}
 
     # Get all objects loaded into the test archive
     common_metadata = {ORIGIN_METADATA_KEY: ORIGIN_METADATA_VALUE}
     for origin in _TEST_ORIGINS:
         origin_revisions = set()
         snp = snapshot_get_latest(storage, origin["url"])
         swhids.append(
             QualifiedSWHID(
                 object_type=ObjectType.SNAPSHOT, object_id=snp.id, origin=origin["url"]
             )
         )
         snapshots.add(hash_to_hex(snp.id))
         for branch_name, branch_data in snp.branches.items():
             target_type = branch_data.target_type.value
             if target_type == "revision":
                 origin_revisions.add(branch_data.target)
                 swhids.append(
                     QualifiedSWHID(
                         object_type=ObjectType.REVISION,
                         object_id=branch_data.target,
                         origin=origin["url"],
                         visit=CoreSWHID(
                             object_type=ObjectType.SNAPSHOT, object_id=snp.id
                         ),
                     )
                 )
                 if b"master" in branch_name:
                     # Add some origin intrinsic metadata for tests
                     metadata = common_metadata
                     metadata.update(origin.get("metadata", {}))
                     origin_metadata = OriginIntrinsicMetadataRow(
                         id=origin["url"],
                         from_revision=branch_data.target,
                         indexer_configuration_id=idx_tool["id"],
                         metadata=metadata,
                         mappings=[],
                     )
                     idx_storage.origin_intrinsic_metadata_add([origin_metadata])
                     search.origin_update(
                         [{"url": origin["url"], "intrinsic_metadata": metadata}]
                     )
 
                     ORIGIN_MASTER_REVISION[origin["url"]] = hash_to_hex(
                         branch_data.target
                     )
             elif target_type == "release":
                 release = storage.release_get([branch_data.target])[0]
                 origin_revisions.add(release.target)
                 releases.add(hash_to_hex(branch_data.target))
                 swhids.append(
                     QualifiedSWHID(
                         object_type=ObjectType.RELEASE,
                         object_id=branch_data.target,
                         origin=origin["url"],
                         visit=CoreSWHID(
                             object_type=ObjectType.SNAPSHOT, object_id=snp.id
                         ),
                     )
                 )
 
         for rev_log in storage.revision_shortlog(origin_revisions):
             rev_id = rev_log[0]
             revisions.add(rev_id)
 
         for rev in storage.revision_get(sorted(origin_revisions)):
             if rev is None:
                 continue
             dir_id = rev.directory
             directories.add(hash_to_hex(dir_id))
             for entry in dir_iterator(storage, dir_id):
                 if entry["type"] == "file":
                     sha1s.add(entry["sha1"])
                     content_path[entry["sha1"]] = "/".join(
                         [hash_to_hex(dir_id), entry["path"].decode("utf-8")]
                     )
                     swhids.append(
                         QualifiedSWHID(
                             object_type=ObjectType.CONTENT,
                             object_id=entry["sha1_git"],
                             origin=origin["url"],
                             visit=CoreSWHID(
                                 object_type=ObjectType.SNAPSHOT, object_id=snp.id
                             ),
                             anchor=CoreSWHID(
                                 object_type=ObjectType.REVISION, object_id=rev.id
                             ),
                             path=b"/" + entry["path"],
                         )
                     )
                 elif entry["type"] == "dir":
                     directories.add(hash_to_hex(entry["target"]))
                     swhids.append(
                         QualifiedSWHID(
                             object_type=ObjectType.DIRECTORY,
                             object_id=entry["target"],
                             origin=origin["url"],
                             visit=CoreSWHID(
                                 object_type=ObjectType.SNAPSHOT, object_id=snp.id
                             ),
                             anchor=CoreSWHID(
                                 object_type=ObjectType.REVISION, object_id=rev.id
                             ),
                             path=b"/" + entry["path"] + b"/",
                         )
                     )
 
     _add_extra_contents(storage, sha1s)
 
     # Get all checksums for each content
     result: List[Optional[Content]] = storage.content_get(list(sha1s))
 
     contents: List[Dict] = []
     for content in result:
         assert content is not None
         sha1 = hash_to_hex(content.sha1)
         content_metadata = {
             algo: hash_to_hex(getattr(content, algo)) for algo in DEFAULT_ALGORITHMS
         }
 
         path = ""
         if content.sha1 in content_path:
             path = content_path[content.sha1]
 
         cnt_data = storage.content_get_data(content.sha1)
         assert cnt_data is not None
         mimetype, encoding = get_mimetype_and_encoding_for_content(cnt_data)
-        _, _, cnt_data = _re_encode_content(mimetype, encoding, cnt_data)
+        _, _, cnt_data = re_encode_content(mimetype, encoding, cnt_data)
 
         content_display_data = prepare_content_for_display(cnt_data, mimetype, path)
 
         content_metadata.update(
             {
                 "path": path,
                 "mimetype": mimetype,
                 "encoding": encoding,
                 "hljs_language": content_display_data["language"],
                 "raw_data": cnt_data,
                 "data": content_display_data["content_data"],
             }
         )
 
         _contents[sha1] = content_metadata
         contents.append(content_metadata)
 
     # Add the empty directory to the test archive
     storage.directory_add([Directory(entries=())])
 
     # Add empty content to the test archive
     storage.content_add([Content.from_data(data=b"")])
 
     # Add fake git origin with pull request branches
     _add_origin(
         storage,
         search,
         counters,
         origin_url="https://git.example.org/project",
         snapshot_branches={
             b"refs/heads/master": {
                 "target_type": "revision",
                 "target": next(iter(revisions)),
             },
             **{
                 f"refs/pull/{i}".encode(): {
                     "target_type": "revision",
                     "target": next(iter(revisions)),
                 }
                 for i in range(300)
             },
         },
     )
 
     counters.add("revision", revisions)
     counters.add("release", releases)
     counters.add("directory", directories)
     counters.add("content", [content["sha1"] for content in contents])
 
     # Return tests data
     return {
         "search": search,
         "storage": storage,
         "idx_storage": idx_storage,
         "counters": counters,
         "origins": _TEST_ORIGINS,
         "contents": list(sorted(contents, key=lambda c: c["sha1"])),
         "directories": list(sorted(directories)),
         "releases": list(sorted(releases)),
         "revisions": list(sorted(map(hash_to_hex, revisions))),
         "snapshots": list(sorted(snapshots)),
         "swhids": swhids,
     }
 
 
 def _init_indexers(tests_data):
     # Instantiate content indexers that will be used in tests
     # and force them to use the memory storages
     indexers = {}
     for idx_name, idx_class, idx_config in (
         ("mimetype_indexer", MimetypeIndexer, _TEST_MIMETYPE_INDEXER_CONFIG),
         ("license_indexer", FossologyLicenseIndexer, _TEST_LICENSE_INDEXER_CONFIG),
         ("ctags_indexer", CtagsIndexer, _TEST_CTAGS_INDEXER_CONFIG),
     ):
         idx = idx_class(config=idx_config)
         idx.storage = tests_data["storage"]
         idx.objstorage = tests_data["storage"].objstorage
         idx.idx_storage = tests_data["idx_storage"]
         idx.register_tools(idx.config["tools"])
         indexers[idx_name] = idx
 
     return indexers
 
 
 def get_content(content_sha1):
     return _contents.get(content_sha1)
 
 
 _tests_data = None
 _current_tests_data = None
 _indexer_loggers = {}
 
 
 def get_tests_data(reset=False):
     """
     Initialize tests data and return them in a dict.
     """
     global _tests_data, _current_tests_data
     if _tests_data is None:
         _tests_data = _init_tests_data()
         indexers = _init_indexers(_tests_data)
         for (name, idx) in indexers.items():
             # pytest makes the loggers use a temporary file; and deepcopy
             # requires serializability. So we remove them, and add them
             # back after the copy.
             _indexer_loggers[name] = idx.log
             del idx.log
         _tests_data.update(indexers)
     if reset or _current_tests_data is None:
         _current_tests_data = deepcopy(_tests_data)
         for (name, logger) in _indexer_loggers.items():
             _current_tests_data[name].log = logger
     return _current_tests_data
 
 
 def override_storages(storage, idx_storage, search, counters):
     """
     Helper function to replace the storages from which archive data
     are fetched.
     """
     swh_config = config.get_config()
     swh_config.update(
         {
             "storage": storage,
             "indexer_storage": idx_storage,
             "search": search,
             "counters": counters,
         }
     )
 
     archive.storage = storage
     archive.idx_storage = idx_storage
     archive.search = search
     archive.counters = counters