diff --git a/mypy.ini b/mypy.ini index e1234e22..b51bd7de 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,52 +1,49 @@ [mypy] namespace_packages = True warn_unused_ignores = True # support for django magic: https://github.com/typeddjango/django-stubs plugins = mypy_django_plugin.main, mypy_drf_plugin.main [mypy.plugins.django-stubs] django_settings_module = swh.web.settings.development # 3rd party libraries without stubs (yet) [mypy-bs4.*] ignore_missing_imports = True [mypy-corsheaders.*] ignore_missing_imports = True [mypy-django_js_reverse.*] ignore_missing_imports = True [mypy-htmlmin.*] ignore_missing_imports = True [mypy-magic.*] ignore_missing_imports = True [mypy-pkg_resources.*] ignore_missing_imports = True [mypy-prometheus_client.*] ignore_missing_imports = True [mypy-pygments.*] ignore_missing_imports = True -[mypy-pypandoc.*] -ignore_missing_imports = True - [mypy-pytest.*] ignore_missing_imports = True [mypy-requests_mock.*] ignore_missing_imports = True [mypy-sphinx.*] ignore_missing_imports = True [mypy-sphinxcontrib.*] ignore_missing_imports = True [mypy-swh.docs.*] ignore_missing_imports = True diff --git a/requirements.txt b/requirements.txt index b5169317..a68201b3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,31 +1,30 @@ # Add here external Python modules dependencies, one per line. Module names # should match https://pypi.python.org/pypi names. For the full spec or # dependency lines, see https://pip.readthedocs.org/en/1.1/requirements.html # Runtime dependencies beautifulsoup4 django < 3 django-cors-headers djangorestframework django_webpack_loader django_js_reverse docutils python-magic >= 0.4.0 htmlmin lxml prometheus_client pygments -pypandoc python-dateutil pyyaml requests python-memcached pybadges sentry-sdk typing-extensions # Doc dependencies sphinx sphinxcontrib-httpdomain diff --git a/swh/web/assets/src/bundles/webapp/webapp.css b/swh/web/assets/src/bundles/webapp/webapp.css index fd016b36..a078c7ab 100644 --- a/swh/web/assets/src/bundles/webapp/webapp.css +++ b/swh/web/assets/src/bundles/webapp/webapp.css @@ -1,594 +1,609 @@ /** * Copyright (C) 2018-2019 The Software Heritage developers * See the AUTHORS file at the top-level directory of this distribution * License: GNU Affero General Public License version 3, or any later version * See top-level LICENSE file for more information */ html { height: 100%; overflow-x: hidden; scroll-behavior: auto !important; } body { min-height: 100%; margin: 0; position: relative; padding-bottom: 120px; } a:active, a.active { outline: none; } code { background-color: #f9f2f4; } pre code { background-color: transparent; } footer { background-color: #262626; color: #fff; font-size: 0.8rem; position: absolute; bottom: 0; width: 100%; padding-top: 20px; padding-bottom: 20px; } footer a, footer a:visited, footer a:hover { color: #fecd1b; } footer a:hover { text-decoration: underline; } .link-color { color: #fecd1b; } pre { background-color: #f5f5f5; border: 1px solid #ccc; border-radius: 4px; padding: 9.5px; font-size: 0.8rem; } .btn.active { background-color: #e7e7e7; } .card { margin-bottom: 5px !important; overflow-x: auto; } .navbar-brand { padding: 5px; margin-right: 0; } .table { margin-bottom: 0; } .swh-table thead { background-color: #f2f4f5; border-top: 1px solid rgba(0, 0, 0, 0.2); font-weight: normal; } .swh-table-striped th { border-top: none; } .swh-table-striped tbody tr:nth-child(even) { background-color: #f2f4f5; } .swh-table-striped tbody tr:nth-child(odd) { background-color: #fff; } .swh-web-app-link a { text-decoration: none; border: none; } .swh-web-app-link:hover { background-color: #efeff2; } .table > thead > tr > th { border-top: none; border-bottom: 1px solid #e20026; } .table > tbody > tr > td { border-style: none; } .sitename .first-word, .sitename .second-word { color: rgba(0, 0, 0, 0.75); font-weight: normal; font-size: 1.2rem; } .sitename .first-word { font-family: 'Alegreya Sans', sans-serif; } .sitename .second-word { font-family: 'Alegreya', serif; } .swh-counter { font-size: 150%; } @media (max-width: 600px) { .swh-counter-container { margin-top: 1rem; } } .swh-http-error { margin: 0 auto; text-align: center; } .swh-http-error-head { color: #2d353c; font-size: 30px; } .swh-http-error-code { bottom: 60%; color: #2d353c; font-size: 96px; line-height: 80px; margin-bottom: 10px !important; } .swh-http-error-desc { font-size: 12px; color: #647788; text-align: center; } .swh-http-error-desc pre { display: inline-block; text-align: left; max-width: 800px; white-space: pre-wrap; } .popover { max-width: 97%; z-index: 40000; } .modal { text-align: center; padding: 0 !important; z-index: 50000; } .modal::before { content: ''; display: inline-block; height: 100%; vertical-align: middle; margin-right: -4px; } .modal-dialog { display: inline-block; text-align: left; vertical-align: middle; } .dropdown-submenu { position: relative; } .dropdown-submenu .dropdown-menu { top: 0; left: -100%; margin-top: -5px; margin-left: -2px; } .dropdown-item:hover, .dropdown-item:focus { background-color: rgba(0, 0, 0, 0.1); } a.dropdown-left::before { content: "\f0d9"; font-family: 'FontAwesome'; display: block; width: 20px; height: 20px; float: left; margin-left: 0; } #swh-navbar { border-top-style: none; border-left-style: none; border-right-style: none; border-bottom-style: solid; border-bottom-width: 5px; border-image: linear-gradient(to right, rgb(226, 0, 38) 0%, rgb(254, 205, 27) 100%) 1 1 1 1; width: 100%; padding: 5px; margin-bottom: 10px; margin-top: 30px; justify-content: normal; flex-wrap: nowrap; height: 72px; overflow: hidden; } #back-to-top { display: none; position: fixed; bottom: 30px; right: 30px; z-index: 10; } #back-to-top a img { display: block; width: 32px; height: 32px; background-size: 32px 32px; text-indent: -999px; overflow: hidden; } .swh-top-bar { direction: ltr; height: 30px; position: fixed; top: 0; left: 0; width: 100%; z-index: 99999; background-color: #262626; color: #fff; text-align: center; font-size: 14px; } .swh-top-bar ul { margin-top: 4px; padding-left: 0; white-space: nowrap; } .swh-top-bar li { display: inline-block; margin-left: 10px; margin-right: 10px; } .swh-top-bar a, .swh-top-bar a:visited { color: white; } .swh-top-bar a.swh-current-site, .swh-top-bar a.swh-current-site:visited { color: #fecd1b; } .swh-position-right { position: absolute; right: 0; } .swh-donate-link { border: 1px solid #fecd1b; background-color: #e20026; color: white !important; padding: 3px; border-radius: 3px; } .swh-navbar-content h4 { padding-top: 7px; } .swh-navbar-content .bread-crumbs { display: block; margin-left: -40px; } .swh-navbar-content .bread-crumbs li.bc-no-root { padding-top: 7px; } .main-sidebar { margin-top: 30px; } .content-wrapper { background: none; } .brand-image { max-height: 40px; } .brand-link { padding-top: 18.5px; padding-bottom: 18px; padding-left: 4px; border-bottom: 5px solid #e20026 !important; } .navbar-header a, ul.dropdown-menu a, ul.navbar-nav a, ul.nav-sidebar a { border-bottom-style: none; color: #323232; } .swh-sidebar .nav-link.active { color: #323232 !important; background-color: #e7e7e7 !important; } .swh-image-error { width: 80px; height: auto; } @media (max-width: 600px) { .card { min-width: 80%; } .swh-image-error { width: 40px; height: auto; } .swh-navbar-content h4 { font-size: 1rem; } .swh-donate-link { display: none; } } .form-check-label { padding-top: 4px; } .swh-id-option { display: inline-block; margin-right: 5px; line-height: 1rem; } .nav-pills .nav-link:not(.active):hover { color: rgba(0, 0, 0, 0.55); } .swh-heading-color { color: #e20026 !important; } .sidebar-mini.sidebar-collapse .main-sidebar:hover { width: 4.6rem; } .sidebar-mini.sidebar-collapse .main-sidebar:hover .user-panel > .info, .sidebar-mini.sidebar-collapse .main-sidebar:hover .nav-sidebar .nav-link p, .sidebar-mini.sidebar-collapse .main-sidebar:hover .brand-text { visibility: hidden !important; } .sidebar .nav-link p, .main-sidebar .brand-text, .sidebar .user-panel .info { transition: none; } .sidebar-mini.sidebar-mini.sidebar-collapse .sidebar { padding-right: 0; } .swh-words-logo { position: absolute; top: 0; left: 0; width: 73px; height: 73px; text-align: center; font-size: 10pt; color: rgba(0, 0, 0, 0.75); } .swh-words-logo:hover { text-decoration: none; } .swh-words-logo-swh { line-height: 1; padding-top: 13px; visibility: hidden; } hr.swh-faded-line { border: 0; height: 1px; background-image: linear-gradient(to left, #f0f0f0, #8c8b8b, #f0f0f0); } +/* Ensure that section title with link is colored like standard section title */ +.swh-readme h1 a, +.swh-readme h2 a, +.swh-readme h3 a, +.swh-readme h4 a, +.swh-readme h5 a, +.swh-readme h6 a { + color: #e20026; +} + +/* Make list compact in reStructuredText rendering */ +.swh-rst li p { + margin-bottom: 0; +} + .swh-readme-txt pre { background: none; border: none; } .swh-coverage-col { padding-left: 10px; padding-right: 10px; } .swh-coverage { height: calc(65px + 1em); padding-top: 0.3rem; border: none; } .swh-coverage a { text-decoration: none; } .swh-coverage-logo { display: block; width: 100%; height: 50px; margin-left: auto; margin-right: auto; object-fit: contain; /* polyfill for old browsers, see https://github.com/bfred-it/object-fit-images */ font-family: 'object-fit: contain;'; } .swh-coverage-list { width: 100%; height: 320px; border: none; } tr.swh-tr-hover-highlight:hover td { background: #ededed; } tr.swh-api-doc-route a { text-decoration: none; } .swh-apidoc .col { margin: 10px; } a.toggle-col { text-decoration: none; } a.toggle-col.col-hidden { text-decoration: line-through; } .admonition.warning { background: #fcf8e3; border: 1px solid #faebcc; padding: 15px; border-radius: 4px; } .admonition.warning p { margin-bottom: 0; } .admonition.warning .first { font-size: 1.5rem; } .swh-popover { max-height: 50vh; overflow-y: auto; overflow-x: auto; padding: 0; padding-right: 1.4em; } @media screen and (min-width: 768px) { .swh-popover { max-width: 50vw; } } .swh-metadata-table-row { border-top: 1px solid #ddd !important; } .swh-metadata-table-key { min-width: 200px; max-width: 200px; width: 200px; } .swh-metadata-table-value pre { white-space: pre-wrap; } .d3-wrapper { position: relative; height: 0; width: 100%; padding: 0; /* padding-bottom will be overwritten by JavaScript later */ padding-bottom: 100%; } .d3-wrapper > svg { position: absolute; height: 100%; width: 100%; left: 0; top: 0; } div.d3-tooltip { position: absolute; text-align: center; width: auto; height: auto; padding: 2px; font: 12px sans-serif; background: white; border: 1px solid black; border-radius: 4px; pointer-events: none; } .page-link { cursor: pointer; } .wrapper { overflow: hidden; } .swh-badge { padding-bottom: 1rem; cursor: pointer; } .swh-badge-html, .swh-badge-md, .swh-badge-rst { white-space: pre-wrap; } diff --git a/swh/web/browse/utils.py b/swh/web/browse/utils.py index 09d649e0..42ef46cc 100644 --- a/swh/web/browse/utils.py +++ b/swh/web/browse/utils.py @@ -1,1106 +1,1104 @@ # Copyright (C) 2017-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information import base64 import magic -import pypandoc import stat import textwrap from collections import defaultdict from threading import Lock from django.core.cache import cache from django.utils.safestring import mark_safe from django.utils.html import escape import sentry_sdk from swh.model.identifiers import persistent_identifier from swh.web.common import highlightjs, service from swh.web.common.exc import NotFoundExc, http_status_code_message from swh.web.common.origin_visits import get_origin_visit from swh.web.common.utils import ( reverse, format_utc_iso_date, get_swh_persistent_id, - swh_object_icons + swh_object_icons, rst_to_html ) from swh.web.config import get_config def get_directory_entries(sha1_git): """Function that retrieves the content of a directory from the archive. The directories entries are first sorted in lexicographical order. Sub-directories and regular files are then extracted. Args: sha1_git: sha1_git identifier of the directory Returns: A tuple whose first member corresponds to the sub-directories list and second member the regular files list Raises: NotFoundExc if the directory is not found """ cache_entry_id = 'directory_entries_%s' % sha1_git cache_entry = cache.get(cache_entry_id) if cache_entry: return cache_entry entries = list(service.lookup_directory(sha1_git)) for e in entries: e['perms'] = stat.filemode(e['perms']) if e['type'] == 'rev': # modify dir entry name to explicitly show it points # to a revision e['name'] = '%s @ %s' % (e['name'], e['target'][:7]) dirs = [e for e in entries if e['type'] in ('dir', 'rev')] files = [e for e in entries if e['type'] == 'file'] dirs = sorted(dirs, key=lambda d: d['name']) files = sorted(files, key=lambda f: f['name']) cache.set(cache_entry_id, (dirs, files)) return dirs, files _lock = Lock() def get_mimetype_and_encoding_for_content(content): """Function that returns the mime type and the encoding associated to a content buffer using the magic module under the hood. Args: content (bytes): a content buffer Returns: A tuple (mimetype, encoding), for instance ('text/plain', 'us-ascii'), associated to the provided content. """ # https://pypi.org/project/python-magic/ # packaged as python3-magic in debian buster if hasattr(magic, 'from_buffer'): m = magic.Magic(mime=True, mime_encoding=True) mime_encoding = m.from_buffer(content) mime_type, encoding = mime_encoding.split(';') encoding = encoding.replace(' charset=', '') # https://pypi.org/project/file-magic/ # packaged as python3-magic in debian stretch else: # TODO: Remove that code when production environment is upgraded # to debian buster # calls to the file-magic API are not thread-safe so they must # be protected with a Lock to guarantee they will succeed _lock.acquire() magic_result = magic.detect_from_content(content) _lock.release() mime_type = magic_result.mime_type encoding = magic_result.encoding return mime_type, encoding # maximum authorized content size in bytes for HTML display # with code highlighting content_display_max_size = get_config()['content_display_max_size'] snapshot_content_max_size = get_config()['snapshot_content_max_size'] def _re_encode_content(mimetype, encoding, content_data): # encode textual content to utf-8 if needed if mimetype.startswith('text/'): # probably a malformed UTF-8 content, re-encode it # by replacing invalid chars with a substitution one if encoding == 'unknown-8bit': content_data = content_data.decode('utf-8', 'replace')\ .encode('utf-8') elif encoding not in ['utf-8', 'binary']: content_data = content_data.decode(encoding, 'replace')\ .encode('utf-8') elif mimetype.startswith('application/octet-stream'): # file may detect a text content as binary # so try to decode it for display encodings = ['us-ascii', 'utf-8'] encodings += ['iso-8859-%s' % i for i in range(1, 17)] for enc in encodings: try: content_data = content_data.decode(enc).encode('utf-8') except Exception as exc: sentry_sdk.capture_exception(exc) else: # ensure display in content view encoding = enc mimetype = 'text/plain' break return mimetype, encoding, content_data def request_content(query_string, max_size=content_display_max_size, raise_if_unavailable=True, re_encode=True): """Function that retrieves a content from the archive. Raw bytes content is first retrieved, then the content mime type. If the mime type is not stored in the archive, it will be computed using Python magic module. Args: query_string: a string of the form "[ALGO_HASH:]HASH" where optional ALGO_HASH can be either ``sha1``, ``sha1_git``, ``sha256``, or ``blake2s256`` (default to ``sha1``) and HASH the hexadecimal representation of the hash value max_size: the maximum size for a content to retrieve (default to 1MB, no size limit if None) Returns: A tuple whose first member corresponds to the content raw bytes and second member the content mime type Raises: NotFoundExc if the content is not found """ content_data = service.lookup_content(query_string) filetype = None language = None license = None # requests to the indexer db may fail so properly handle # those cases in order to avoid content display errors try: filetype = service.lookup_content_filetype(query_string) language = service.lookup_content_language(query_string) license = service.lookup_content_license(query_string) except Exception as exc: sentry_sdk.capture_exception(exc) mimetype = 'unknown' encoding = 'unknown' if filetype: mimetype = filetype['mimetype'] encoding = filetype['encoding'] # workaround when encountering corrupted data due to implicit # conversion from bytea to text in the indexer db (see T818) # TODO: Remove that code when all data have been correctly converted if mimetype.startswith('\\'): filetype = None content_data['error_code'] = 200 content_data['error_message'] = '' content_data['error_description'] = '' if not max_size or content_data['length'] < max_size: try: content_raw = service.lookup_content_raw(query_string) except Exception as exc: if raise_if_unavailable: raise exc else: sentry_sdk.capture_exception(exc) content_data['raw_data'] = None content_data['error_code'] = 404 content_data['error_description'] = \ 'The bytes of the content are currently not available in the archive.' # noqa content_data['error_message'] = \ http_status_code_message[content_data['error_code']] else: content_data['raw_data'] = content_raw['data'] if not filetype: mimetype, encoding = \ get_mimetype_and_encoding_for_content(content_data['raw_data']) # noqa if re_encode: mimetype, encoding, raw_data = _re_encode_content( mimetype, encoding, content_data['raw_data']) content_data['raw_data'] = raw_data else: content_data['raw_data'] = None content_data['mimetype'] = mimetype content_data['encoding'] = encoding if language: content_data['language'] = language['lang'] else: content_data['language'] = 'not detected' if license: content_data['licenses'] = ', '.join(license['facts'][0]['licenses']) else: content_data['licenses'] = 'not detected' return content_data _browsers_supported_image_mimes = set(['image/gif', 'image/png', 'image/jpeg', 'image/bmp', 'image/webp', 'image/svg', 'image/svg+xml']) def prepare_content_for_display(content_data, mime_type, path): """Function that prepares a content for HTML display. The function tries to associate a programming language to a content in order to perform syntax highlighting client-side using highlightjs. The language is determined using either the content filename or its mime type. If the mime type corresponds to an image format supported by web browsers, the content will be encoded in base64 for displaying the image. Args: content_data (bytes): raw bytes of the content mime_type (string): mime type of the content path (string): path of the content including filename Returns: A dict containing the content bytes (possibly different from the one provided as parameter if it is an image) under the key 'content_data and the corresponding highlightjs language class under the key 'language'. """ language = highlightjs.get_hljs_language_from_filename(path) if not language: language = highlightjs.get_hljs_language_from_mime_type(mime_type) if not language: language = 'nohighlight' elif mime_type.startswith('application/'): mime_type = mime_type.replace('application/', 'text/') if mime_type.startswith('image/'): if mime_type in _browsers_supported_image_mimes: content_data = base64.b64encode(content_data).decode('ascii') else: content_data = None if mime_type.startswith('image/svg'): mime_type = 'image/svg+xml' if mime_type.startswith('text/'): content_data = content_data.decode('utf-8', errors='replace') return {'content_data': content_data, 'language': language, 'mimetype': mime_type} def process_snapshot_branches(snapshot): """ Process a dictionary describing snapshot branches: extract those targeting revisions and releases, put them in two different lists, then sort those lists in lexicographical order of the branches' names. Args: snapshot_branches (dict): A dict describing the branches of a snapshot as returned for instance by :func:`swh.web.common.service.lookup_snapshot` Returns: tuple: A tuple whose first member is the sorted list of branches targeting revisions and second member the sorted list of branches targeting releases """ snapshot_branches = snapshot['branches'] branches = {} branch_aliases = {} releases = {} revision_to_branch = defaultdict(set) revision_to_release = defaultdict(set) release_to_branch = defaultdict(set) for branch_name, target in snapshot_branches.items(): if not target: # FIXME: display branches with an unknown target anyway continue target_id = target['target'] target_type = target['target_type'] if target_type == 'revision': branches[branch_name] = { 'name': branch_name, 'revision': target_id, } revision_to_branch[target_id].add(branch_name) elif target_type == 'release': release_to_branch[target_id].add(branch_name) elif target_type == 'alias': branch_aliases[branch_name] = target_id # FIXME: handle pointers to other object types def _enrich_release_branch(branch, release): releases[branch] = { 'name': release['name'], 'branch_name': branch, 'date': format_utc_iso_date(release['date']), 'id': release['id'], 'message': release['message'], 'target_type': release['target_type'], 'target': release['target'], } def _enrich_revision_branch(branch, revision): branches[branch].update({ 'revision': revision['id'], 'directory': revision['directory'], 'date': format_utc_iso_date(revision['date']), 'message': revision['message'] }) releases_info = service.lookup_release_multiple( release_to_branch.keys() ) for release in releases_info: branches_to_update = release_to_branch[release['id']] for branch in branches_to_update: _enrich_release_branch(branch, release) if release['target_type'] == 'revision': revision_to_release[release['target']].update( branches_to_update ) revisions = service.lookup_revision_multiple( set(revision_to_branch.keys()) | set(revision_to_release.keys()) ) for revision in revisions: if not revision: continue for branch in revision_to_branch[revision['id']]: _enrich_revision_branch(branch, revision) for release in revision_to_release[revision['id']]: releases[release]['directory'] = revision['directory'] for branch_alias, branch_target in branch_aliases.items(): if branch_target in branches: branches[branch_alias] = dict(branches[branch_target]) else: snp = service.lookup_snapshot(snapshot['id'], branches_from=branch_target, branches_count=1) if snp and branch_target in snp['branches']: if snp['branches'][branch_target] is None: continue target_type = snp['branches'][branch_target]['target_type'] target = snp['branches'][branch_target]['target'] if target_type == 'revision': branches[branch_alias] = snp['branches'][branch_target] revision = service.lookup_revision(target) _enrich_revision_branch(branch_alias, revision) elif target_type == 'release': release = service.lookup_release(target) _enrich_release_branch(branch_alias, release) if branch_alias in branches: branches[branch_alias]['name'] = branch_alias ret_branches = list(sorted(branches.values(), key=lambda b: b['name'])) ret_releases = list(sorted(releases.values(), key=lambda b: b['name'])) return ret_branches, ret_releases def get_snapshot_content(snapshot_id): """Returns the lists of branches and releases associated to a swh snapshot. That list is put in cache in order to speedup the navigation in the swh-web/browse ui. .. warning:: At most 1000 branches contained in the snapshot will be returned for performance reasons. Args: snapshot_id (str): hexadecimal representation of the snapshot identifier Returns: A tuple with two members. The first one is a list of dict describing the snapshot branches. The second one is a list of dict describing the snapshot releases. Raises: NotFoundExc if the snapshot does not exist """ cache_entry_id = 'swh_snapshot_%s' % snapshot_id cache_entry = cache.get(cache_entry_id) if cache_entry: return cache_entry['branches'], cache_entry['releases'] branches = [] releases = [] if snapshot_id: snapshot = service.lookup_snapshot( snapshot_id, branches_count=snapshot_content_max_size) branches, releases = process_snapshot_branches(snapshot) cache.set(cache_entry_id, { 'branches': branches, 'releases': releases, }) return branches, releases def get_origin_visit_snapshot(origin_info, visit_ts=None, visit_id=None, snapshot_id=None): """Returns the lists of branches and releases associated to a swh origin for a given visit. The visit is expressed by a timestamp. In the latter case, the closest visit from the provided timestamp will be used. If no visit parameter is provided, it returns the list of branches found for the latest visit. That list is put in cache in order to speedup the navigation in the swh-web/browse ui. .. warning:: At most 1000 branches contained in the snapshot will be returned for performance reasons. Args: origin_info (dict): a dict filled with origin information (id, url, type) visit_ts (int or str): an ISO date string or Unix timestamp to parse visit_id (int): optional visit id for disambiguation in case several visits have the same timestamp Returns: A tuple with two members. The first one is a list of dict describing the origin branches for the given visit. The second one is a list of dict describing the origin releases for the given visit. Raises: NotFoundExc if the origin or its visit are not found """ visit_info = get_origin_visit(origin_info, visit_ts, visit_id, snapshot_id) return get_snapshot_content(visit_info['snapshot']) def gen_link(url, link_text=None, link_attrs=None): """ Utility function for generating an HTML link to insert in Django templates. Args: url (str): an url link_text (str): optional text for the produced link, if not provided the url will be used link_attrs (dict): optional attributes (e.g. class) to add to the link Returns: An HTML link in the form 'link_text' """ attrs = ' ' if link_attrs: for k, v in link_attrs.items(): attrs += '%s="%s" ' % (k, v) if not link_text: link_text = url link = '%s' \ % (attrs, escape(url), escape(link_text)) return mark_safe(link) def _snapshot_context_query_params(snapshot_context): query_params = None if snapshot_context and snapshot_context['origin_info']: origin_info = snapshot_context['origin_info'] query_params = {'origin': origin_info['url']} if 'timestamp' in snapshot_context['url_args']: query_params['timestamp'] = \ snapshot_context['url_args']['timestamp'] if 'visit_id' in snapshot_context['query_params']: query_params['visit_id'] = \ snapshot_context['query_params']['visit_id'] elif snapshot_context: query_params = {'snapshot_id': snapshot_context['snapshot_id']} return query_params def gen_revision_url(revision_id, snapshot_context=None): """ Utility function for generating an url to a revision. Args: revision_id (str): a revision id snapshot_context (dict): if provided, generate snapshot-dependent browsing url Returns: str: The url to browse the revision """ query_params = _snapshot_context_query_params(snapshot_context) return reverse('browse-revision', url_args={'sha1_git': revision_id}, query_params=query_params) def gen_revision_link(revision_id, shorten_id=False, snapshot_context=None, link_text='Browse', link_attrs={'class': 'btn btn-default btn-sm', 'role': 'button'}): """ Utility function for generating a link to a revision HTML view to insert in Django templates. Args: revision_id (str): a revision id shorten_id (boolean): whether to shorten the revision id to 7 characters for the link text snapshot_context (dict): if provided, generate snapshot-dependent browsing link link_text (str): optional text for the generated link (the revision id will be used by default) link_attrs (dict): optional attributes (e.g. class) to add to the link Returns: str: An HTML link in the form 'revision_id' """ if not revision_id: return None revision_url = gen_revision_url(revision_id, snapshot_context) if shorten_id: return gen_link(revision_url, revision_id[:7], link_attrs) else: if not link_text: link_text = revision_id return gen_link(revision_url, link_text, link_attrs) def gen_directory_link(sha1_git, snapshot_context=None, link_text='Browse', link_attrs={'class': 'btn btn-default btn-sm', 'role': 'button'}): """ Utility function for generating a link to a directory HTML view to insert in Django templates. Args: sha1_git (str): directory identifier link_text (str): optional text for the generated link (the directory id will be used by default) link_attrs (dict): optional attributes (e.g. class) to add to the link Returns: An HTML link in the form 'link_text' """ if not sha1_git: return None query_params = _snapshot_context_query_params(snapshot_context) directory_url = reverse('browse-directory', url_args={'sha1_git': sha1_git}, query_params=query_params) if not link_text: link_text = sha1_git return gen_link(directory_url, link_text, link_attrs) def gen_snapshot_link(snapshot_id, snapshot_context=None, link_text='Browse', link_attrs={'class': 'btn btn-default btn-sm', 'role': 'button'}): """ Utility function for generating a link to a snapshot HTML view to insert in Django templates. Args: snapshot_id (str): snapshot identifier link_text (str): optional text for the generated link (the snapshot id will be used by default) link_attrs (dict): optional attributes (e.g. class) to add to the link Returns: An HTML link in the form 'link_text' """ query_params = _snapshot_context_query_params(snapshot_context) snapshot_url = reverse('browse-snapshot', url_args={'snapshot_id': snapshot_id}, query_params=query_params) if not link_text: link_text = snapshot_id return gen_link(snapshot_url, link_text, link_attrs) def gen_content_link(sha1_git, snapshot_context=None, link_text='Browse', link_attrs={'class': 'btn btn-default btn-sm', 'role': 'button'}): """ Utility function for generating a link to a content HTML view to insert in Django templates. Args: sha1_git (str): content identifier link_text (str): optional text for the generated link (the content sha1_git will be used by default) link_attrs (dict): optional attributes (e.g. class) to add to the link Returns: An HTML link in the form 'link_text' """ if not sha1_git: return None query_params = _snapshot_context_query_params(snapshot_context) content_url = reverse('browse-content', url_args={'query_string': 'sha1_git:' + sha1_git}, query_params=query_params) if not link_text: link_text = sha1_git return gen_link(content_url, link_text, link_attrs) def get_revision_log_url(revision_id, snapshot_context=None): """ Utility function for getting the URL for a revision log HTML view (possibly in the context of an origin). Args: revision_id (str): revision identifier the history heads to snapshot_context (dict): if provided, generate snapshot-dependent browsing link Returns: The revision log view URL """ query_params = {'revision': revision_id} if snapshot_context and snapshot_context['origin_info']: origin_info = snapshot_context['origin_info'] url_args = {'origin_url': origin_info['url']} if 'timestamp' in snapshot_context['url_args']: url_args['timestamp'] = \ snapshot_context['url_args']['timestamp'] if 'visit_id' in snapshot_context['query_params']: query_params['visit_id'] = \ snapshot_context['query_params']['visit_id'] revision_log_url = reverse('browse-origin-log', url_args=url_args, query_params=query_params) elif snapshot_context: url_args = {'snapshot_id': snapshot_context['snapshot_id']} revision_log_url = reverse('browse-snapshot-log', url_args=url_args, query_params=query_params) else: revision_log_url = reverse('browse-revision-log', url_args={'sha1_git': revision_id}) return revision_log_url def gen_revision_log_link(revision_id, snapshot_context=None, link_text='Browse', link_attrs={'class': 'btn btn-default btn-sm', 'role': 'button'}): """ Utility function for generating a link to a revision log HTML view (possibly in the context of an origin) to insert in Django templates. Args: revision_id (str): revision identifier the history heads to snapshot_context (dict): if provided, generate snapshot-dependent browsing link link_text (str): optional text to use for the generated link (the revision id will be used by default) link_attrs (dict): optional attributes (e.g. class) to add to the link Returns: An HTML link in the form 'link_text' """ if not revision_id: return None revision_log_url = get_revision_log_url(revision_id, snapshot_context) if not link_text: link_text = revision_id return gen_link(revision_log_url, link_text, link_attrs) def gen_person_mail_link(person, link_text=None): """ Utility function for generating a mail link to a person to insert in Django templates. Args: person (dict): dictionary containing person data (*name*, *email*, *fullname*) link_text (str): optional text to use for the generated mail link (the person name will be used by default) Returns: str: A mail link to the person or the person name if no email is present in person data """ person_name = person['name'] or person['fullname'] or 'None' if link_text is None: link_text = person_name person_email = person['email'] if person['email'] else None if person_email is None and '@' in person_name and ' ' not in person_name: person_email = person_name if person_email: return gen_link(url='mailto:%s' % person_email, link_text=link_text) else: return person_name def gen_release_link(sha1_git, snapshot_context=None, link_text='Browse', link_attrs={'class': 'btn btn-default btn-sm', 'role': 'button'}): """ Utility function for generating a link to a release HTML view to insert in Django templates. Args: sha1_git (str): release identifier link_text (str): optional text for the generated link (the release id will be used by default) link_attrs (dict): optional attributes (e.g. class) to add to the link Returns: An HTML link in the form 'link_text' """ query_params = _snapshot_context_query_params(snapshot_context) release_url = reverse('browse-release', url_args={'sha1_git': sha1_git}, query_params=query_params) if not link_text: link_text = sha1_git return gen_link(release_url, link_text, link_attrs) def format_log_entries(revision_log, per_page, snapshot_context=None): """ Utility functions that process raw revision log data for HTML display. Its purpose is to: * add links to relevant browse views * format date in human readable format * truncate the message log Args: revision_log (list): raw revision log as returned by the swh-web api per_page (int): number of log entries per page snapshot_context (dict): if provided, generate snapshot-dependent browsing link """ revision_log_data = [] for i, rev in enumerate(revision_log): if i == per_page: break author_name = 'None' author_fullname = 'None' committer_fullname = 'None' if rev['author']: author_name = gen_person_mail_link(rev['author']) author_fullname = rev['author']['fullname'] if rev['committer']: committer_fullname = rev['committer']['fullname'] author_date = format_utc_iso_date(rev['date']) committer_date = format_utc_iso_date(rev['committer_date']) tooltip = 'revision %s\n' % rev['id'] tooltip += 'author: %s\n' % author_fullname tooltip += 'author date: %s\n' % author_date tooltip += 'committer: %s\n' % committer_fullname tooltip += 'committer date: %s\n\n' % committer_date if rev['message']: tooltip += textwrap.indent(rev['message'], ' '*4) revision_log_data.append({ 'author': author_name, 'id': rev['id'][:7], 'message': rev['message'], 'date': author_date, 'commit_date': committer_date, 'url': gen_revision_url(rev['id'], snapshot_context), 'tooltip': tooltip }) return revision_log_data def get_snapshot_context(snapshot_id=None, origin_url=None, timestamp=None, visit_id=None): """ Utility function to compute relevant information when navigating the archive in a snapshot context. The snapshot is either referenced by its id or it will be retrieved from an origin visit. Args: snapshot_id (str): hexadecimal representation of a snapshot identifier, all other parameters will be ignored if it is provided origin_url (str): the origin_url (e.g. https://github.com/(user)/(repo)/) timestamp (str): a datetime string for retrieving the closest visit of the origin visit_id (int): optional visit id for disambiguation in case of several visits with the same timestamp Returns: A dict with the following entries: * origin_info: dict containing origin information * visit_info: dict containing visit information * branches: the list of branches for the origin found during the visit * releases: the list of releases for the origin found during the visit * origin_browse_url: the url to browse the origin * origin_branches_url: the url to browse the origin branches * origin_releases_url': the url to browse the origin releases * origin_visit_url: the url to browse the snapshot of the origin found during the visit * url_args: dict containing url arguments to use when browsing in the context of the origin and its visit Raises: NotFoundExc: if no snapshot is found for the visit of an origin. """ origin_info = None visit_info = None url_args = None query_params = {} branches = [] releases = [] browse_url = None visit_url = None branches_url = None releases_url = None swh_type = 'snapshot' if origin_url: swh_type = 'origin' origin_info = service.lookup_origin({'url': origin_url}) visit_info = get_origin_visit(origin_info, timestamp, visit_id, snapshot_id) fmt_date = format_utc_iso_date(visit_info['date']) visit_info['fmt_date'] = fmt_date snapshot_id = visit_info['snapshot'] if not snapshot_id: raise NotFoundExc('No snapshot associated to the visit of origin ' '%s on %s' % (escape(origin_url), fmt_date)) # provided timestamp is not necessarily equals to the one # of the retrieved visit, so get the exact one in order # use it in the urls generated below if timestamp: timestamp = visit_info['date'] branches, releases = \ get_origin_visit_snapshot(origin_info, timestamp, visit_id, snapshot_id) url_args = {'origin_url': origin_info['url']} query_params = {'visit_id': visit_id} browse_url = reverse('browse-origin-visits', url_args=url_args) if timestamp: url_args['timestamp'] = format_utc_iso_date(timestamp, '%Y-%m-%dT%H:%M:%S') visit_url = reverse('browse-origin-directory', url_args=url_args, query_params=query_params) visit_info['url'] = visit_url branches_url = reverse('browse-origin-branches', url_args=url_args, query_params=query_params) releases_url = reverse('browse-origin-releases', url_args=url_args, query_params=query_params) elif snapshot_id: branches, releases = get_snapshot_content(snapshot_id) url_args = {'snapshot_id': snapshot_id} browse_url = reverse('browse-snapshot', url_args=url_args) branches_url = reverse('browse-snapshot-branches', url_args=url_args) releases_url = reverse('browse-snapshot-releases', url_args=url_args) releases = list(reversed(releases)) snapshot_sizes = service.lookup_snapshot_sizes(snapshot_id) is_empty = sum(snapshot_sizes.values()) == 0 swh_snp_id = persistent_identifier('snapshot', snapshot_id) return { 'swh_type': swh_type, 'swh_object_id': swh_snp_id, 'snapshot_id': snapshot_id, 'snapshot_sizes': snapshot_sizes, 'is_empty': is_empty, 'origin_info': origin_info, 'visit_info': visit_info, 'branches': branches, 'releases': releases, 'branch': None, 'release': None, 'browse_url': browse_url, 'branches_url': branches_url, 'releases_url': releases_url, 'url_args': url_args, 'query_params': query_params } # list of common readme names ordered by preference # (lower indices have higher priority) _common_readme_names = [ "readme.markdown", "readme.md", "readme.rst", "readme.txt", "readme" ] def get_readme_to_display(readmes): """ Process a list of readme files found in a directory in order to find the adequate one to display. Args: readmes: a list of dict where keys are readme file names and values are readme sha1s Returns: A tuple (readme_name, readme_sha1) """ readme_name = None readme_url = None readme_sha1 = None readme_html = None lc_readmes = {k.lower(): {'orig_name': k, 'sha1': v} for k, v in readmes.items()} # look for readme names according to the preference order # defined by the _common_readme_names list for common_readme_name in _common_readme_names: if common_readme_name in lc_readmes: readme_name = lc_readmes[common_readme_name]['orig_name'] readme_sha1 = lc_readmes[common_readme_name]['sha1'] readme_url = reverse('browse-content-raw', url_args={'query_string': readme_sha1}, query_params={'re_encode': 'true'}) break # otherwise pick the first readme like file if any if not readme_name and len(readmes.items()) > 0: readme_name = next(iter(readmes)) readme_sha1 = readmes[readme_name] readme_url = reverse('browse-content-raw', url_args={'query_string': readme_sha1}, query_params={'re_encode': 'true'}) # convert rst README to html server side as there is # no viable solution to perform that task client side if readme_name and readme_name.endswith('.rst'): cache_entry_id = 'readme_%s' % readme_sha1 cache_entry = cache.get(cache_entry_id) if cache_entry: readme_html = cache_entry else: try: rst_doc = request_content(readme_sha1) - readme_html = pypandoc.convert_text(rst_doc['raw_data'], - 'html', format='rst') + readme_html = rst_to_html(rst_doc['raw_data']) cache.set(cache_entry_id, readme_html) except Exception as exc: sentry_sdk.capture_exception(exc) readme_html = 'Readme bytes are not available' return readme_name, readme_url, readme_html def get_swh_persistent_ids(swh_objects, snapshot_context=None): """ Returns a list of dict containing info related to persistent identifiers of swh objects. Args: swh_objects (list): a list of dict with the following keys: * type: swh object type (content/directory/release/revision/snapshot) * id: swh object id snapshot_context (dict): optional parameter describing the snapshot in which the object has been found Returns: list: a list of dict with the following keys: * object_type: the swh object type (content/directory/release/revision/snapshot) * object_icon: the swh object icon to use in HTML views * swh_id: the computed swh object persistent identifier * swh_id_url: the url resolving the persistent identifier * show_options: boolean indicating if the persistent id options must be displayed in persistent ids HTML view """ swh_ids = [] for swh_object in swh_objects: if not swh_object['id']: continue swh_id = get_swh_persistent_id(swh_object['type'], swh_object['id']) show_options = swh_object['type'] == 'content' or \ (snapshot_context and snapshot_context['origin_info'] is not None) object_icon = swh_object_icons[swh_object['type']] swh_ids.append({ 'object_type': swh_object['type'], 'object_id': swh_object['id'], 'object_icon': object_icon, 'swh_id': swh_id, 'swh_id_url': reverse('browse-swh-id', url_args={'swh_id': swh_id}), 'show_options': show_options }) return swh_ids diff --git a/swh/web/common/swh_templatetags.py b/swh/web/common/swh_templatetags.py index 81fdb4e6..18e20c69 100644 --- a/swh/web/common/swh_templatetags.py +++ b/swh/web/common/swh_templatetags.py @@ -1,183 +1,160 @@ # Copyright (C) 2017-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information -from inspect import cleandoc import json import re +from inspect import cleandoc + from django import template from django.core.serializers.json import DjangoJSONEncoder from django.utils.safestring import mark_safe -from docutils.core import publish_parts -from docutils.writers.html4css1 import Writer, HTMLTranslator - import sentry_sdk from swh.web.common.origin_save import get_savable_visit_types +from swh.web.common.utils import rst_to_html register = template.Library() -class NoHeaderHTMLTranslator(HTMLTranslator): - """ - Docutils translator subclass to customize the generation of HTML - from reST-formatted docstrings - """ - def __init__(self, document): - super().__init__(document) - self.body_prefix = [] - self.body_suffix = [] - - def visit_bullet_list(self, node): - self.context.append((self.compact_simple, self.compact_p)) - self.compact_p = None - self.compact_simple = self.is_compactable(node) - self.body.append(self.starttag(node, 'ul', CLASS='docstring')) - - -DOCSTRING_WRITER = Writer() -DOCSTRING_WRITER.translator_class = NoHeaderHTMLTranslator - - @register.filter def safe_docstring_display(docstring): """ Utility function to htmlize reST-formatted documentation in browsable api. """ - docstring = cleandoc(docstring) - return publish_parts(docstring, writer=DOCSTRING_WRITER)['html_body'] + return rst_to_html(cleandoc(docstring)) @register.filter def urlize_links_and_mails(text): """Utility function for decorating api links in browsable api. Args: text: whose content matching links should be transformed into contextual API or Browse html links. Returns The text transformed if any link is found. The text as is otherwise. """ try: if 'href="' not in text: text = re.sub(r'(http.*)', r'\1', text) return re.sub(r'([^ <>"]+@[^ <>"]+)', r'\1', text) except Exception as exc: sentry_sdk.capture_exception(exc) return text @register.filter def urlize_header_links(text): """Utility function for decorating headers links in browsable api. Args text: Text whose content contains Link header value Returns: The text transformed with html link if any link is found. The text as is otherwise. """ links = text.split(',') ret = '' for i, link in enumerate(links): ret += re.sub(r'<(http.*)>', r'<\1>', link) # add one link per line and align them if i != len(links) - 1: ret += '\n ' return ret @register.filter def jsonify(obj): """Utility function for converting a django template variable to JSON in order to use it in script tags. Args obj: Any django template context variable Returns: JSON representation of the variable. """ return mark_safe(json.dumps(obj, cls=DjangoJSONEncoder)) @register.filter def sub(value, arg): """Django template filter for subtracting two numbers Args: value (int/float): the value to subtract from arg (int/float): the value to subtract to Returns: int/float: The subtraction result """ return value - arg @register.filter def mul(value, arg): """Django template filter for multiplying two numbers Args: value (int/float): the value to multiply from arg (int/float): the value to multiply with Returns: int/float: The multiplication result """ return value * arg @register.filter def key_value(dict, key): """Django template filter to get a value in a dictionary. Args: dict (dict): a dictionary key (str): the key to lookup value Returns: The requested value in the dictionary """ return dict[key] @register.filter def visit_type_savable(visit_type): """Django template filter to check if a save request can be created for a given visit type. Args: visit_type (str): the type of visit Returns: If the visit type is saveable or not """ return visit_type in get_savable_visit_types() @register.filter def split(value, arg): """Django template filter to split a string. Args: value (str): the string to split arg (str): the split separator Returns: list: the split string parts """ return value.split(arg) diff --git a/swh/web/common/utils.py b/swh/web/common/utils.py index bf0cc8c6..36781821 100644 --- a/swh/web/common/utils.py +++ b/swh/web/common/utils.py @@ -1,439 +1,479 @@ # Copyright (C) 2017-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information -import docutils.parsers.rst -import docutils.utils import re from datetime import datetime, timezone from dateutil import parser as date_parser from dateutil import tz from typing import Optional, Dict, Any +import docutils.parsers.rst +import docutils.utils + +from docutils.core import publish_parts +from docutils.writers.html5_polyglot import Writer, HTMLTranslator + from django.urls import reverse as django_reverse from django.http import QueryDict, HttpRequest from prometheus_client.registry import CollectorRegistry from rest_framework.authentication import SessionAuthentication from swh.model.exceptions import ValidationError from swh.model.hashutil import hash_to_bytes from swh.model.identifiers import ( persistent_identifier, parse_persistent_identifier, CONTENT, DIRECTORY, ORIGIN, RELEASE, REVISION, SNAPSHOT ) from swh.web.common.exc import BadInputExc from swh.web.config import get_config SWH_WEB_METRICS_REGISTRY = CollectorRegistry(auto_describe=True) swh_object_icons = { 'branch': 'fa fa-code-fork', 'branches': 'fa fa-code-fork', 'content': 'fa fa-file-text', 'directory': 'fa fa-folder', 'person': 'fa fa-user', 'revisions history': 'fa fa-history', 'release': 'fa fa-tag', 'releases': 'fa fa-tag', 'revision': 'octicon-git-commit', 'snapshot': 'fa fa-camera', 'visits': 'fa fa-calendar', } def reverse(viewname: str, url_args: Optional[Dict[str, Any]] = None, query_params: Optional[Dict[str, Any]] = None, current_app: Optional[str] = None, urlconf: Optional[str] = None, request: Optional[HttpRequest] = None) -> str: """An override of django reverse function supporting query parameters. Args: viewname: the name of the django view from which to compute a url url_args: dictionary of url arguments indexed by their names query_params: dictionary of query parameters to append to the reversed url current_app: the name of the django app tighten to the view urlconf: url configuration module request: build an absolute URI if provided Returns: str: the url of the requested view with processed arguments and query parameters """ if url_args: url_args = {k: v for k, v in url_args.items() if v is not None} url = django_reverse(viewname, urlconf=urlconf, kwargs=url_args, current_app=current_app) if query_params: query_params = {k: v for k, v in query_params.items() if v} if query_params and len(query_params) > 0: query_dict = QueryDict('', mutable=True) for k in sorted(query_params.keys()): query_dict[k] = query_params[k] url += ('?' + query_dict.urlencode(safe='/;:')) if request is not None: url = request.build_absolute_uri(url) return url def datetime_to_utc(date): """Returns datetime in UTC without timezone info Args: date (datetime.datetime): input datetime with timezone info Returns: datetime.datetime: datetime in UTC without timezone info """ if date.tzinfo: return date.astimezone(tz.gettz('UTC')).replace(tzinfo=timezone.utc) else: return date def parse_timestamp(timestamp): """Given a time or timestamp (as string), parse the result as UTC datetime. Returns: datetime.datetime: a timezone-aware datetime representing the parsed value or None if the parsing fails. Samples: - 2016-01-12 - 2016-01-12T09:19:12+0100 - Today is January 1, 2047 at 8:21:00AM - 1452591542 """ if not timestamp: return None try: date = date_parser.parse(timestamp, ignoretz=False, fuzzy=True) return datetime_to_utc(date) except Exception: try: return datetime.utcfromtimestamp(float(timestamp)).replace( tzinfo=timezone.utc) except (ValueError, OverflowError) as e: raise BadInputExc(e) def shorten_path(path): """Shorten the given path: for each hash present, only return the first 8 characters followed by an ellipsis""" sha256_re = r'([0-9a-f]{8})[0-9a-z]{56}' sha1_re = r'([0-9a-f]{8})[0-9a-f]{32}' ret = re.sub(sha256_re, r'\1...', path) return re.sub(sha1_re, r'\1...', ret) def format_utc_iso_date(iso_date, fmt='%d %B %Y, %H:%M UTC'): """Turns a string representation of an ISO 8601 date string to UTC and format it into a more human readable one. For instance, from the following input string: '2017-05-04T13:27:13+02:00' the following one is returned: '04 May 2017, 11:27 UTC'. Custom format string may also be provided as parameter Args: iso_date (str): a string representation of an ISO 8601 date fmt (str): optional date formatting string Returns: str: a formatted string representation of the input iso date """ if not iso_date: return iso_date date = parse_timestamp(iso_date) return date.strftime(fmt) def gen_path_info(path): """Function to generate path data navigation for use with a breadcrumb in the swh web ui. For instance, from a path /folder1/folder2/folder3, it returns the following list:: [{'name': 'folder1', 'path': 'folder1'}, {'name': 'folder2', 'path': 'folder1/folder2'}, {'name': 'folder3', 'path': 'folder1/folder2/folder3'}] Args: path: a filesystem path Returns: list: a list of path data for navigation as illustrated above. """ path_info = [] if path: sub_paths = path.strip('/').split('/') path_from_root = '' for p in sub_paths: path_from_root += '/' + p path_info.append({'name': p, 'path': path_from_root.strip('/')}) return path_info def get_swh_persistent_id(object_type, object_id, scheme_version=1): """ Returns the persistent identifier for a swh object based on: * the object type * the object id * the swh identifiers scheme version Args: object_type (str): the swh object type (content/directory/release/revision/snapshot) object_id (str): the swh object id (hexadecimal representation of its hash value) scheme_version (int): the scheme version of the swh persistent identifiers Returns: str: the swh object persistent identifier Raises: BadInputExc: if the provided parameters do not enable to generate a valid identifier """ try: swh_id = persistent_identifier(object_type, object_id, scheme_version) except ValidationError as e: raise BadInputExc('Invalid object (%s) for swh persistent id. %s' % (object_id, e)) else: return swh_id def resolve_swh_persistent_id(swh_id, query_params=None): """ Try to resolve a Software Heritage persistent id into an url for browsing the pointed object. Args: swh_id (str): a Software Heritage persistent identifier query_params (django.http.QueryDict): optional dict filled with query parameters to append to the browse url Returns: dict: a dict with the following keys: * **swh_id_parsed (swh.model.identifiers.PersistentId)**: the parsed identifier * **browse_url (str)**: the url for browsing the pointed object """ swh_id_parsed = get_persistent_identifier(swh_id) object_type = swh_id_parsed.object_type object_id = swh_id_parsed.object_id browse_url = None query_dict = QueryDict('', mutable=True) if query_params and len(query_params) > 0: for k in sorted(query_params.keys()): query_dict[k] = query_params[k] if 'origin' in swh_id_parsed.metadata: query_dict['origin'] = swh_id_parsed.metadata['origin'] if object_type == CONTENT: query_string = 'sha1_git:' + object_id fragment = '' if 'lines' in swh_id_parsed.metadata: lines = swh_id_parsed.metadata['lines'].split('-') fragment += '#L' + lines[0] if len(lines) > 1: fragment += '-L' + lines[1] browse_url = reverse('browse-content', url_args={'query_string': query_string}, query_params=query_dict) + fragment elif object_type == DIRECTORY: browse_url = reverse('browse-directory', url_args={'sha1_git': object_id}, query_params=query_dict) elif object_type == RELEASE: browse_url = reverse('browse-release', url_args={'sha1_git': object_id}, query_params=query_dict) elif object_type == REVISION: browse_url = reverse('browse-revision', url_args={'sha1_git': object_id}, query_params=query_dict) elif object_type == SNAPSHOT: browse_url = reverse('browse-snapshot', url_args={'snapshot_id': object_id}, query_params=query_dict) elif object_type == ORIGIN: raise BadInputExc(('Origin PIDs (Persistent Identifiers) are not ' 'publicly resolvable because they are for ' 'internal usage only')) return {'swh_id_parsed': swh_id_parsed, 'browse_url': browse_url} def parse_rst(text, report_level=2): """ Parse a reStructuredText string with docutils. Args: text (str): string with reStructuredText markups in it report_level (int): level of docutils report messages to print (1 info 2 warning 3 error 4 severe 5 none) Returns: docutils.nodes.document: a parsed docutils document """ parser = docutils.parsers.rst.Parser() components = (docutils.parsers.rst.Parser,) settings = docutils.frontend.OptionParser( components=components).get_default_values() settings.report_level = report_level document = docutils.utils.new_document('rst-doc', settings=settings) parser.parse(text, document) return document def get_client_ip(request): """ Return the client IP address from an incoming HTTP request. Args: request (django.http.HttpRequest): the incoming HTTP request Returns: str: The client IP address """ x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(',')[0] else: ip = request.META.get('REMOTE_ADDR') return ip def context_processor(request): """ Django context processor used to inject variables in all swh-web templates. """ return { 'swh_object_icons': swh_object_icons, 'available_languages': None, 'swh_client_config': get_config()['client_config'], } class EnforceCSRFAuthentication(SessionAuthentication): """ Helper class to enforce CSRF validation on a DRF view when a user is not authenticated. """ + def authenticate(self, request): user = getattr(request._request, 'user', None) self.enforce_csrf(request) return (user, None) def resolve_branch_alias(snapshot: Dict[str, Any], branch: Optional[Dict[str, Any]] ) -> Optional[Dict[str, Any]]: """ Resolve branch alias in snapshot content. Args: snapshot: a full snapshot content branch: a branch alias contained in the snapshot Returns: The real snapshot branch that got aliased. """ while branch and branch['target_type'] == 'alias': if branch['target'] in snapshot['branches']: branch = snapshot['branches'][branch['target']] else: from swh.web.common import service snp = service.lookup_snapshot( snapshot['id'], branches_from=branch['target'], branches_count=1) if snp and branch['target'] in snp['branches']: branch = snp['branches'][branch['target']] else: branch = None return branch def get_persistent_identifier(persistent_id): """Check if a persistent identifier is valid. Args: persistent_id: A string representing a Software Heritage persistent identifier. Raises: BadInputExc: if the provided persistent identifier can not be parsed. Return: A persistent identifier object. """ try: pid_object = parse_persistent_identifier(persistent_id) except ValidationError as ve: raise BadInputExc('Error when parsing identifier: %s' % ' '.join(ve.messages)) else: return pid_object def group_swh_persistent_identifiers(persistent_ids): """ Groups many Software Heritage persistent identifiers into a dictionary depending on their type. Args: persistent_ids (list): a list of Software Heritage persistent identifier objects Returns: A dictionary with: keys: persistent identifier types values: list(bytes) persistent identifiers id Raises: BadInputExc: if one of the provided persistent identifier can not be parsed. """ pids_by_type = { - CONTENT: [], - DIRECTORY: [], - REVISION: [], - RELEASE: [], - SNAPSHOT: [] - } + CONTENT: [], + DIRECTORY: [], + REVISION: [], + RELEASE: [], + SNAPSHOT: [] + } for pid in persistent_ids: obj_id = pid.object_id obj_type = pid.object_type pids_by_type[obj_type].append(hash_to_bytes(obj_id)) return pids_by_type + + +class _NoHeaderHTMLTranslator(HTMLTranslator): + """ + Docutils translator subclass to customize the generation of HTML + from reST-formatted docstrings + """ + + def __init__(self, document): + super().__init__(document) + self.body_prefix = [] + self.body_suffix = [] + + +_HTML_WRITER = Writer() +_HTML_WRITER.translator_class = _NoHeaderHTMLTranslator + + +def rst_to_html(rst: str) -> str: + """ + Convert reStructuredText document into HTML. + + Args: + rst: A string containing a reStructuredText document + + Returns: + Body content of the produced HTML conversion. + + """ + settings = { + 'initial_header_level': 2, + } + pp = publish_parts(rst, writer=_HTML_WRITER, + settings_overrides=settings) + return f'
{pp["html_body"]}
' diff --git a/swh/web/templates/includes/readme-display.html b/swh/web/templates/includes/readme-display.html index 4fe8e2dd..1f922790 100644 --- a/swh/web/templates/includes/readme-display.html +++ b/swh/web/templates/includes/readme-display.html @@ -1,38 +1,38 @@ {% comment %} Copyright (C) 2017-2019 The Software Heritage developers See the AUTHORS file at the top-level directory of this distribution License: GNU Affero General Public License version 3, or any later version See top-level LICENSE file for more information {% endcomment %} {% load swh_templatetags %} {% if readme_name %}

{{ readme_name }}

-
+
{% if readme_html %} {% elif readme_name.lower == 'readme' or readme_name.lower == 'readme.txt' %} {% elif readme_name.lower == 'readme.org' %} {% else %} {% endif %} {% endif %} diff --git a/swh/web/tests/common/test_templatetags.py b/swh/web/tests/common/test_templatetags.py index e039dd9f..d4076eb9 100644 --- a/swh/web/tests/common/test_templatetags.py +++ b/swh/web/tests/common/test_templatetags.py @@ -1,64 +1,61 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information -import pytest - from swh.web.common.swh_templatetags import ( urlize_links_and_mails, urlize_header_links, safe_docstring_display ) def test_urlize_http_link(): link = 'https://example.com/api/1/abc/' expected_content = f'{link}' assert urlize_links_and_mails(link) == expected_content def test_urlize_email(): email = 'someone@example.com' expected_content = f'{email}' assert urlize_links_and_mails(email) == expected_content def test_urlize_header_links(): next_link = 'https://example.com/api/1/abc/' prev_link = 'https://example.com/api/1/def/' content = f'<{next_link}>; rel="next"\n<{prev_link}>; rel="prev"' expected_content = ( f'<{next_link}>; rel="next"\n' f'<{prev_link}>; rel="prev"') assert urlize_header_links(content) == expected_content -# remove deprecation warnings related to docutils -@pytest.mark.filterwarnings( - 'ignore:.*U.*mode is deprecated:DeprecationWarning') def test_safe_docstring_display(): # update api link with html links content with links docstring = ( 'This is my list header:\n\n' ' - Here is item 1, with a continuation\n' ' line right here\n' ' - Here is item 2\n\n' ' Here is something that is not part of the list' ) expected_docstring = ( + '
' '

This is my list header:

\n' - '
    \n' - '
  • Here is item 1, with a continuation\n' - 'line right here
  • \n' - '
  • Here is item 2
  • \n' + '
      \n' + '
    • Here is item 1, with a continuation\n' + 'line right here

    • \n' + '
    • Here is item 2

    • \n' '
    \n' '

    Here is something that is not part of the list

    \n' + '
' ) assert safe_docstring_display(docstring) == expected_docstring diff --git a/swh/web/tests/common/test_utils.py b/swh/web/tests/common/test_utils.py index b1ca03f1..3fae5065 100644 --- a/swh/web/tests/common/test_utils.py +++ b/swh/web/tests/common/test_utils.py @@ -1,114 +1,155 @@ # Copyright (C) 2017-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import pytest from swh.web.common import utils from swh.web.common.exc import BadInputExc def test_shorten_path_noop(): noops = [ '/api/', '/browse/', '/content/symbol/foobar/' ] for noop in noops: assert utils.shorten_path(noop) == noop def test_shorten_path_sha1(): sha1 = 'aafb16d69fd30ff58afdd69036a26047f3aebdc6' short_sha1 = sha1[:8] + '...' templates = [ '/api/1/content/sha1:%s/', '/api/1/content/sha1_git:%s/', '/api/1/directory/%s/', '/api/1/content/sha1:%s/ctags/', ] for template in templates: assert utils.shorten_path(template % sha1) == template % short_sha1 def test_shorten_path_sha256(): sha256 = ('aafb16d69fd30ff58afdd69036a26047' '213add102934013a014dfca031c41aef') short_sha256 = sha256[:8] + '...' templates = [ '/api/1/content/sha256:%s/', '/api/1/directory/%s/', '/api/1/content/sha256:%s/filetype/', ] for template in templates: assert utils.shorten_path(template % sha256) == template % short_sha256 def test_parse_timestamp(): input_timestamps = [ None, '2016-01-12', '2016-01-12T09:19:12+0100', 'Today is January 1, 2047 at 8:21:00AM', '1452591542', ] output_dates = [ None, datetime.datetime(2016, 1, 12, 0, 0), datetime.datetime(2016, 1, 12, 8, 19, 12, tzinfo=datetime.timezone.utc), datetime.datetime(2047, 1, 1, 8, 21), datetime.datetime(2016, 1, 12, 9, 39, 2, tzinfo=datetime.timezone.utc), ] for ts, exp_date in zip(input_timestamps, output_dates): assert utils.parse_timestamp(ts) == exp_date def test_format_utc_iso_date(): assert (utils.format_utc_iso_date('2017-05-04T13:27:13+02:00') == '04 May 2017, 11:27 UTC') def test_gen_path_info(): input_path = '/home/user/swh-environment/swh-web/' expected_result = [ {'name': 'home', 'path': 'home'}, {'name': 'user', 'path': 'home/user'}, {'name': 'swh-environment', 'path': 'home/user/swh-environment'}, {'name': 'swh-web', 'path': 'home/user/swh-environment/swh-web'} ] path_info = utils.gen_path_info(input_path) assert path_info == expected_result input_path = 'home/user/swh-environment/swh-web' path_info = utils.gen_path_info(input_path) assert path_info == expected_result def test_get_swh_persistent_id(): swh_object_type = 'content' sha1_git = 'aafb16d69fd30ff58afdd69036a26047f3aebdc6' expected_swh_id = 'swh:1:cnt:' + sha1_git assert (utils.get_swh_persistent_id(swh_object_type, sha1_git) == expected_swh_id) with pytest.raises(BadInputExc) as e: utils.get_swh_persistent_id('foo', sha1_git) assert e.match('Invalid object') with pytest.raises(BadInputExc) as e: utils.get_swh_persistent_id(swh_object_type, 'not a valid id') assert e.match('Invalid object') + + +def test_rst_to_html(): + rst = ( + 'Section\n' + '=======\n\n' + '**Some strong text**\n\n' + 'Subsection\n' + '----------\n\n' + '* This is a bulleted list.\n' + '* It has two items, the second\n' + ' item uses two lines.\n' + '\n' + '1. This is a numbered list.\n' + '2. It has two items too.\n' + '\n' + '#. This is a numbered list.\n' + '#. It has two items too.\n' + ) + + expected_html = ( + '

Section

\n' + '

Some strong text

\n' + '
\n' + '

Subsection

\n' + '
    \n' + '
  • This is a bulleted list.

  • \n' + '
  • It has two items, the second\n' + 'item uses two lines.

  • \n' + '
\n' + '
    \n' + '
  1. This is a numbered list.

  2. \n' + '
  3. It has two items too.

  4. \n' + '
  5. This is a numbered list.

  6. \n' + '
  7. It has two items too.

  8. \n' + '
\n' + '
\n' + '
' + ) + + assert utils.rst_to_html(rst) == expected_html