diff --git a/swh/web/add_forge_now/api_views.py b/swh/web/add_forge_now/api_views.py index 99fd8a93..46778707 100644 --- a/swh/web/add_forge_now/api_views.py +++ b/swh/web/add_forge_now/api_views.py @@ -1,391 +1,391 @@ # Copyright (C) 2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information import json from typing import Any, Dict, Union from django.core.exceptions import ObjectDoesNotExist from django.core.paginator import Paginator from django.db import transaction from django.forms import CharField, ModelForm from django.http import HttpResponseBadRequest from django.http.request import HttpRequest from django.http.response import HttpResponse, HttpResponseForbidden from rest_framework import serializers from rest_framework.request import Request from rest_framework.response import Response from swh.web.add_forge_now.models import Request as AddForgeRequest from swh.web.add_forge_now.models import RequestActorRole as AddForgeNowRequestActorRole from swh.web.add_forge_now.models import RequestHistory as AddForgeNowRequestHistory from swh.web.add_forge_now.models import RequestStatus as AddForgeNowRequestStatus from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.auth.utils import is_add_forge_now_moderator from swh.web.utils import reverse from swh.web.utils.exc import BadInputExc def _block_while_testing(): """Replaced by tests to check concurrency behavior""" pass class AddForgeNowRequestForm(ModelForm): forge_contact_comment = CharField( required=False, ) class Meta: model = AddForgeRequest fields = ( "forge_type", "forge_url", "forge_contact_email", "forge_contact_name", "forge_contact_comment", "submitter_forward_username", ) class AddForgeNowRequestHistoryForm(ModelForm): new_status = CharField( max_length=200, required=False, ) class Meta: model = AddForgeNowRequestHistory fields = ("text", "new_status") class AddForgeNowRequestSerializer(serializers.ModelSerializer): inbound_email_address = serializers.CharField() forge_domain = serializers.CharField() class Meta: model = AddForgeRequest fields = "__all__" class AddForgeNowRequestPublicSerializer(serializers.ModelSerializer): """Serializes AddForgeRequest without private fields.""" class Meta: model = AddForgeRequest fields = ("id", "forge_url", "forge_type", "status", "submission_date") class AddForgeNowRequestHistorySerializer(serializers.ModelSerializer): message_source_url = serializers.SerializerMethodField() class Meta: model = AddForgeNowRequestHistory exclude = ("request", "message_source") def get_message_source_url(self, request_history): if request_history.message_source is None: return None return reverse( "forge-add-message-source", url_args={"id": request_history.pk}, request=self.context["request"], ) class AddForgeNowRequestHistoryPublicSerializer(serializers.ModelSerializer): class Meta: model = AddForgeNowRequestHistory fields = ("id", "date", "new_status", "actor_role") @api_route( r"/add-forge/request/create/", "api-1-add-forge-request-create", methods=["POST"], ) -@api_doc("/add-forge/request/create") +@api_doc("/add-forge/request/create", category="Request archival") @format_docstring() @transaction.atomic def api_add_forge_request_create(request: Union[HttpRequest, Request]) -> HttpResponse: """ .. http:post:: /api/1/add-forge/request/create/ Create a new request to add a forge to the list of those crawled regularly by Software Heritage. .. warning:: That endpoint is not publicly available and requires authentication in order to be able to request it. {common_headers} :[0-9]+)/update/", "api-1-add-forge-request-update", methods=["POST"], ) -@api_doc("/add-forge/request/update", tags=["hidden"]) +@api_doc("/add-forge/request/update", category="Request archival", tags=["hidden"]) @format_docstring() @transaction.atomic def api_add_forge_request_update( request: Union[HttpRequest, Request], id: int ) -> HttpResponse: """ .. http:post:: /api/1/add-forge/request/update/ Update a request to add a forge to the list of those crawled regularly by Software Heritage. .. warning:: That endpoint is not publicly available and requires authentication in order to be able to request it. {common_headers} :[0-9]+)/get/", "api-1-add-forge-request-get", methods=["GET"], ) -@api_doc("/add-forge/request/get") +@api_doc("/add-forge/request/get", category="Request archival") @format_docstring() def api_add_forge_request_get(request: Request, id: int): """ .. http:get:: /api/1/add-forge/request/get/ Return all details about an add-forge request. {common_headers} :param int id: add-forge request identifier :statuscode 200: request details successfully returned :statuscode 400: request identifier does not exist """ try: add_forge_request = AddForgeRequest.objects.get(id=id) except ObjectDoesNotExist: raise BadInputExc("Request id does not exist") request_history = AddForgeNowRequestHistory.objects.filter( request=add_forge_request ).order_by("id") if is_add_forge_now_moderator(request.user): data = AddForgeNowRequestSerializer(add_forge_request).data history = AddForgeNowRequestHistorySerializer( request_history, many=True, context={"request": request} ).data else: data = AddForgeNowRequestPublicSerializer(add_forge_request).data history = AddForgeNowRequestHistoryPublicSerializer( request_history, many=True ).data return {"request": data, "history": history} diff --git a/swh/web/api/apidoc.py b/swh/web/api/apidoc.py index fefef39e..be5c1b6d 100644 --- a/swh/web/api/apidoc.py +++ b/swh/web/api/apidoc.py @@ -1,472 +1,475 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict import functools from functools import wraps import os import re import textwrap from typing import List import docutils.nodes import docutils.parsers.rst import docutils.utils from rest_framework.decorators import api_view from swh.web.api.apiresponse import make_api_response -from swh.web.api.apiurls import APIUrls +from swh.web.api.apiurls import APIUrls, CategoryId from swh.web.utils import parse_rst class _HTTPDomainDocVisitor(docutils.nodes.NodeVisitor): """ docutils visitor for walking on a parsed docutils document containing sphinx httpdomain roles. Its purpose is to extract relevant info regarding swh api endpoints (for instance url arguments) from their docstring written using sphinx httpdomain; and produce the main description back into a ReST string """ # httpdomain roles we want to parse (based on sphinxcontrib.httpdomain 1.6) parameter_roles = ("param", "parameter", "arg", "argument") request_json_object_roles = ("reqjsonobj", "reqjson", "jsonobj", ">json") response_json_array_roles = ("resjsonarr", ">jsonarr") query_parameter_roles = ("queryparameter", "queryparam", "qparam", "query") request_header_roles = ("header", "resheader", "responseheader") status_code_roles = ("statuscode", "status", "code") def __init__(self, document, data): super().__init__(document) self.data = data self.args_set = set() self.params_set = set() self.inputs_set = set() self.returns_set = set() self.status_codes_set = set() self.reqheaders_set = set() self.resheaders_set = set() self.current_json_obj = None self.current_field_name = None def _default_visit(self, node: docutils.nodes.Element) -> str: """Simply visits a text node, drops its start and end tags, visits the children, and concatenates their results.""" return "".join(map(self.dispatch_visit, node.children)) def visit_emphasis(self, node: docutils.nodes.emphasis) -> str: return f"*{self._default_visit(node)}*" def visit_strong(self, node: docutils.nodes.emphasis) -> str: return f"**{self._default_visit(node)}**" def visit_reference(self, node: docutils.nodes.reference) -> str: text = self._default_visit(node) refuri = node.attributes.get("refuri") if refuri is not None: return f"`{text} <{refuri}>`__" else: return f"`{text}`_" def visit_target(self, node: docutils.nodes.reference) -> str: parts = ["\n"] parts.extend( f".. _{name}: {node.attributes['refuri']}" for name in node.attributes["names"] ) return "\n".join(parts) def visit_literal(self, node: docutils.nodes.literal) -> str: return f"``{self._default_visit(node)}``" def visit_field_name(self, node: docutils.nodes.field_name) -> str: self.current_field_name = node.astext() return "" def visit_field_body(self, node: docutils.nodes.field_body) -> str: text = self._default_visit(node).strip() assert text, str(node) field_data = self.current_field_name.split(" ") # Parameters if field_data[0] in self.parameter_roles: if field_data[2] not in self.args_set: self.data["args"].append( {"name": field_data[2], "type": field_data[1], "doc": text} ) self.args_set.add(field_data[2]) # Query Parameters if field_data[0] in self.query_parameter_roles: if field_data[2] not in self.params_set: self.data["params"].append( {"name": field_data[2], "type": field_data[1], "doc": text} ) self.params_set.add(field_data[2]) # Request data type if ( field_data[0] in self.request_json_array_roles or field_data[0] in self.request_json_object_roles ): # array if field_data[0] in self.request_json_array_roles: self.data["input_type"] = "array" # object else: self.data["input_type"] = "object" # input object field if field_data[2] not in self.inputs_set: self.data["inputs"].append( {"name": field_data[2], "type": field_data[1], "doc": text} ) self.inputs_set.add(field_data[2]) self.current_json_obj = self.data["inputs"][-1] # Response type if ( field_data[0] in self.response_json_array_roles or field_data[0] in self.response_json_object_roles ): # array if field_data[0] in self.response_json_array_roles: self.data["return_type"] = "array" # object else: self.data["return_type"] = "object" # returned object field if field_data[2] not in self.returns_set: self.data["returns"].append( {"name": field_data[2], "type": field_data[1], "doc": text} ) self.returns_set.add(field_data[2]) self.current_json_obj = self.data["returns"][-1] # Status Codes if field_data[0] in self.status_code_roles: if field_data[1] not in self.status_codes_set: self.data["status_codes"].append({"code": field_data[1], "doc": text}) self.status_codes_set.add(field_data[1]) # Request Headers if field_data[0] in self.request_header_roles: if field_data[1] not in self.reqheaders_set: self.data["reqheaders"].append({"name": field_data[1], "doc": text}) self.reqheaders_set.add(field_data[1]) # Response Headers if field_data[0] in self.response_header_roles: if field_data[1] not in self.resheaders_set: resheader = {"name": field_data[1], "doc": text} self.data["resheaders"].append(resheader) self.resheaders_set.add(field_data[1]) if ( resheader["name"] == "Content-Type" and resheader["doc"] == "application/octet-stream" ): self.data["return_type"] = "octet stream" # Don't return anything in the description; these nodes only add text # to other fields return "" # We ignore these nodes and handle their subtrees directly in # visit_field_name and visit_field_body visit_field = visit_field_list = _default_visit def visit_paragraph(self, node: docutils.nodes.paragraph) -> str: """ Visit relevant paragraphs to parse """ # only parsed top level paragraphs text = self._default_visit(node) return "\n\n" + text def visit_literal_block(self, node: docutils.nodes.literal_block) -> str: """ Visit literal blocks """ text = node.astext() return f"\n\n::\n\n{textwrap.indent(text, ' ')}\n" def visit_bullet_list(self, node: docutils.nodes.bullet_list) -> str: parts = ["\n\n"] for child in node.traverse(): # process list item if isinstance(child, docutils.nodes.paragraph): line_text = self.dispatch_visit(child) parts.append("\t* %s\n" % textwrap.indent(line_text, "\t ").strip()) return "".join(parts) # visit_bullet_list collects and handles this with a more global view: visit_list_item = _default_visit def visit_warning(self, node: docutils.nodes.warning) -> str: text = self._default_visit(node) return "\n\n.. warning::\n%s\n" % textwrap.indent(text, "\t") def visit_Text(self, node: docutils.nodes.Text) -> str: """Leaf node""" return str(node).replace("\n", " ") # Prettier in generated HTML def visit_problematic(self, node: docutils.nodes.problematic) -> str: # api urls cleanup to generate valid links afterwards text = self._default_visit(node) subs_made = 1 while subs_made: (text, subs_made) = re.subn(r"(:http:.*)(\(\w+\))", r"\1", text) subs_made = 1 while subs_made: (text, subs_made) = re.subn(r"(:http:.*)(\[.*\])", r"\1", text) text = re.sub(r"([^:])//", r"\1/", text) # transform references to api endpoints doc into valid rst links text = re.sub(":http:get:`([^,`]*)`", r"`\1 <\1doc/>`_", text) # transform references to some elements into bold text text = re.sub(":http:header:`(.*)`", r"**\1**", text) text = re.sub(":func:`(.*)`", r"**\1**", text) text = re.sub(":mod:`(.*)`", r"**\1**", text) # extract example urls if ":swh_web_api:" in text: # Extract examples to their own section examples_str = re.sub(":swh_web_api:`(.+)`.*", r"/api/1/\1", text) self.data["examples"] += examples_str.split("\n") return text def visit_block_quote(self, node: docutils.nodes.block_quote) -> str: return self._default_visit(node) return ( f".. code-block::\n" f"{textwrap.indent(self._default_visit(node), ' ')}\n" ) def visit_title_reference(self, node: docutils.nodes.title_reference) -> str: text = self._default_visit(node) raise Exception( f"Unexpected title reference. " f"Possible cause: you used `{text}` instead of ``{text}``" ) def visit_document(self, node: docutils.nodes.document) -> None: text = self._default_visit(node) # Strip examples; they are displayed separately text = re.split("\n\\*\\*Examples?:\\*\\*\n", text)[0] self.data["description"] = text.strip() def visit_system_message(self, node): return "" def unknown_visit(self, node) -> str: raise NotImplementedError( f"Unknown node type: {node.__class__.__name__}. Value: {node}" ) def unknown_departure(self, node): pass def _parse_httpdomain_doc(doc, data): doc_lines = doc.split("\n") doc_lines_filtered = [] urls = defaultdict(list) default_http_methods = ["HEAD", "OPTIONS"] # httpdomain is a sphinx extension that is unknown to docutils but # fortunately we can still parse its directives' content, # so remove lines with httpdomain directives before executing the # rst parser from docutils for doc_line in doc_lines: if ".. http" not in doc_line: doc_lines_filtered.append(doc_line) else: url = doc_line[doc_line.find("/") :] # emphasize url arguments for html rendering url = re.sub(r"\((\w+)\)", r" **\(\1\)** ", url) method = re.search(r"http:(\w+)::", doc_line).group(1) urls[url].append(method.upper()) for url, methods in urls.items(): data["urls"].append({"rule": url, "methods": methods + default_http_methods}) # parse the rst docstring and do not print system messages about # unknown httpdomain roles document = parse_rst("\n".join(doc_lines_filtered), report_level=5) # remove the system_message nodes from the parsed document for node in document.traverse(docutils.nodes.system_message): node.parent.remove(node) # visit the document nodes to extract relevant endpoint info visitor = _HTTPDomainDocVisitor(document, data) document.walkabout(visitor) class APIDocException(Exception): """ Custom exception to signal errors in the use of the APIDoc decorators """ def api_doc( route: str, + *, + category: CategoryId, noargs: bool = False, tags: List[str] = [], api_version: str = "1", ): """ Decorator for an API endpoint implementation used to generate a dedicated view displaying its HTML documentation. The documentation will be generated from the endpoint docstring based on sphinxcontrib-httpdomain format. Args: route: documentation page's route noargs: set to True if the route has no arguments, and its result should be displayed anytime its documentation is requested. Default to False tags: Further information on api endpoints. Two values are possibly expected: * hidden: remove the entry points from the listing * upcoming: display the entry point but it is not followable * deprecated: display the entry point as deprecated in the index api_version: api version string """ tags_set = set(tags) # @api_doc() Decorator call def decorator(f): # if the route is not hidden, add it to the index if "hidden" not in tags_set: doc_data = get_doc_data(f, route, noargs) doc_desc = doc_data["description"] APIUrls.add_doc_route( route, + category, re.split(r"\.\s", doc_desc)[0], noargs=noargs, api_version=api_version, tags=tags_set, ) # create a dedicated view to display endpoint HTML doc @api_view(["GET", "HEAD"]) @wraps(f) def doc_view(request): doc_data = get_doc_data(f, route, noargs) return make_api_response(request, None, doc_data) route_name = "%s-doc" % route[1:-1].replace("/", "-") urlpattern = f"^{api_version}{route}doc/$" view_name = "api-%s-%s" % (api_version, route_name) APIUrls.add_url_pattern(urlpattern, doc_view, view_name) @wraps(f) def documented_view(request, **kwargs): doc_data = get_doc_data(f, route, noargs) try: return {"data": f(request, **kwargs), "doc_data": doc_data} except Exception as exc: exc.doc_data = doc_data raise exc return documented_view return decorator @functools.lru_cache(maxsize=32) def get_doc_data(f, route, noargs): """ Build documentation data for the decorated api endpoint function """ data = { "description": "", "response_data": None, "urls": [], "args": [], "params": [], "input_type": "", "inputs": [], "resheaders": [], "reqheaders": [], "return_type": "", "returns": [], "status_codes": [], "examples": [], "route": route, "noargs": noargs, } if not f.__doc__: raise APIDocException( "apidoc: expected a docstring" " for function %s" % (f.__name__,) ) # use raw docstring as endpoint documentation if sphinx # httpdomain is not used if ".. http" not in f.__doc__: data["description"] = f.__doc__ # else parse the sphinx httpdomain docstring with docutils # (except when building the swh-web documentation through autodoc # sphinx extension, not needed and raise errors with sphinx >= 1.7) elif "SWH_DOC_BUILD" not in os.environ: _parse_httpdomain_doc(f.__doc__, data) # process input/returned object info for nicer html display inputs_list = "" returns_list = "" for inp in data["inputs"]: # special case for array of non object type, for instance # :jsonarr string -: an array of string if ret["name"] != "-": returns_list += "\t* **%s (%s)**: %s\n" % ( ret["name"], ret["type"], textwrap.indent(ret["doc"], "\t "), ) data["inputs_list"] = inputs_list data["returns_list"] = returns_list return data DOC_COMMON_HEADERS = """ :reqheader Accept: the requested response content type, either ``application/json`` (default) or ``application/yaml`` :resheader Content-Type: this depends on :http:header:`Accept` header of request""" DOC_RESHEADER_LINK = """ :resheader Link: indicates that a subsequent result page is available and contains the url pointing to it """ DEFAULT_SUBSTITUTIONS = { "common_headers": DOC_COMMON_HEADERS, "resheader_link": DOC_RESHEADER_LINK, } def format_docstring(**substitutions): def decorator(f): f.__doc__ = f.__doc__.format(**{**DEFAULT_SUBSTITUTIONS, **substitutions}) return f return decorator diff --git a/swh/web/api/apiurls.py b/swh/web/api/apiurls.py index eac41e28..3d389204 100644 --- a/swh/web/api/apiurls.py +++ b/swh/web/api/apiurls.py @@ -1,125 +1,133 @@ # Copyright (C) 2017-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information import functools from typing import Dict, List, Optional +from typing_extensions import Literal + from django.http.response import HttpResponseBase from rest_framework.decorators import api_view from swh.web.api import throttling from swh.web.api.apiresponse import make_api_response from swh.web.utils.urlsindex import UrlsIndex +CategoryId = Literal[ + "Archive", "Batch download", "Metadata", "Request archival", "Miscellaneous", "test" +] + class APIUrls(UrlsIndex): """ Class to manage API documentation URLs. - Indexes all routes documented using apidoc's decorators. - Tracks endpoint/request processing method relationships for use in generating related urls in API documentation """ _apidoc_routes: Dict[str, Dict[str, str]] = {} scope = "api" @classmethod def get_app_endpoints(cls) -> Dict[str, Dict[str, str]]: return cls._apidoc_routes @classmethod def add_doc_route( cls, route: str, + category: CategoryId, docstring: str, noargs: bool = False, api_version: str = "1", **kwargs, ) -> None: """ Add a route to the self-documenting API reference """ route_name = route[1:-1].replace("/", "-") if not noargs: route_name = "%s-doc" % route_name route_view_name = "api-%s-%s" % (api_version, route_name) if route not in cls._apidoc_routes: d = { + "category": category, "docstring": docstring, "route": "/api/%s%s" % (api_version, route), "route_view_name": route_view_name, } for k, v in kwargs.items(): d[k] = v cls._apidoc_routes[route] = d def api_route( url_pattern: str, - view_name: Optional[str] = None, + view_name: str, methods: List[str] = ["GET", "HEAD", "OPTIONS"], throttle_scope: str = "swh_api", api_version: str = "1", checksum_args: Optional[List[str]] = None, never_cache: bool = False, ): """ Decorator to ease the registration of an API endpoint using the Django REST Framework. Args: url_pattern: the url pattern used by DRF to identify the API route view_name: the name of the API view associated to the route used to reverse the url methods: array of HTTP methods supported by the API route throttle_scope: Named scope for rate limiting api_version: web API version checksum_args: list of view argument names holding checksum values never_cache: define if api response must be cached """ url_pattern = "^api/" + api_version + url_pattern + "$" def decorator(f): # create a DRF view from the wrapped function @api_view(methods) @throttling.throttle_scope(throttle_scope) @functools.wraps(f) def api_view_f(request, **kwargs): # never_cache will be handled in apiresponse module request.never_cache = never_cache response = f(request, **kwargs) doc_data = None # check if response has been forwarded by api_doc decorator if isinstance(response, dict) and "doc_data" in response: doc_data = response["doc_data"] response = response["data"] # check if HTTP response needs to be created if not isinstance(response, HttpResponseBase): api_response = make_api_response( request, data=response, doc_data=doc_data ) else: api_response = response return api_response # small hacks for correctly generating API endpoints index doc api_view_f.__name__ = f.__name__ api_view_f.http_method_names = methods # register the route and its view in the endpoints index APIUrls.add_url_pattern(url_pattern, api_view_f, view_name) if checksum_args: APIUrls.add_redirect_for_checksum_args( view_name, [url_pattern], checksum_args ) return f return decorator diff --git a/swh/web/api/templates/api-endpoints.html b/swh/web/api/templates/api-endpoints.html index b46cfe42..7928949b 100644 --- a/swh/web/api/templates/api-endpoints.html +++ b/swh/web/api/templates/api-endpoints.html @@ -1,83 +1,78 @@ {% extends "layout.html" %} {% comment %} Copyright (C) 2015-2019 The Software Heritage developers See the AUTHORS file at the top-level directory of this distribution License: GNU Affero General Public License version 3, or any later version See top-level LICENSE file for more information {% endcomment %} {% load swh_templatetags %} {% block title %} Endpoints – Software Heritage API {% endblock %} {% block navbar-content %} {% endblock %} {% block content %}

Below you can find a list of the available endpoints for version 1 of the Software Heritage API. For a more general introduction please refer to the API overview.

Endpoints marked "available" are considered stable for the current version of the API; endpoints marked "upcoming" are work in progress that will be stabilized in the near future.

-
+{% for category, routes in doc_routes %} +

{{ category }}

+ - {% for route, doc in doc_routes %} + {% for doc in routes %} {% if "upcoming" in doc.tags %} - {% else %} - {% endif %} {% endfor %}
Endpoint Description
+ {% url doc.route_view_name %} + {{ doc.route }} {% if "deprecated" in doc.tags %}(deprecated){% endif %} {{ doc.doc_intro | docstring_display | safe }}
-
+{% endfor %} {% endblock %} diff --git a/swh/web/api/views/content.py b/swh/web/api/views/content.py index a537ac11..6936b23e 100644 --- a/swh/web/api/views/content.py +++ b/swh/web/api/views/content.py @@ -1,342 +1,342 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information import functools import io from typing import Optional from django.http import FileResponse from rest_framework.request import Request from swh.web.api import utils from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.api.views.utils import api_lookup from swh.web.utils import archive from swh.web.utils.exc import NotFoundExc @api_route( r"/content/(?P[0-9a-z_:]*[0-9a-f]+)/filetype/", "api-1-content-filetype", checksum_args=["q"], ) -@api_doc("/content/filetype/") +@api_doc("/content/filetype/", category="Metadata") @format_docstring() def api_content_filetype(request: Request, q: str): """ .. http:get:: /api/1/content/[(hash_type):](hash)/filetype/ Get information about the detected MIME type of a content object. :param string hash_type: optional parameter specifying which hashing algorithm has been used to compute the content checksum. It can be either ``sha1``, ``sha1_git``, ``sha256`` or ``blake2s256``. If that parameter is not provided, it is assumed that the hashing algorithm used is ``sha1``. :param string hash: hexadecimal representation of the checksum value computed with the specified hashing algorithm. :>json object content_url: link to :http:get:`/api/1/content/[(hash_type):](hash)/` for getting information about the content :>json string encoding: the detected content encoding :>json string id: the **sha1** identifier of the content :>json string mimetype: the detected MIME type of the content :>json object tool: information about the tool used to detect the content filetype {common_headers} :statuscode 200: no error :statuscode 400: an invalid **hash_type** or **hash** has been provided :statuscode 404: requested content can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`content/sha1:dc2830a9e72f23c1dfebef4413003221baa5fb62/filetype/` """ return api_lookup( archive.lookup_content_filetype, q, notfound_msg="No filetype information found for content {}.".format(q), enrich_fn=utils.enrich_metadata_endpoint, request=request, ) @api_route( r"/content/(?P[0-9a-z_:]*[0-9a-f]+)/language/", "api-1-content-language", checksum_args=["q"], ) -@api_doc("/content/language/") +@api_doc("/content/language/", category="Metadata") @format_docstring() def api_content_language(request: Request, q: str): """ .. http:get:: /api/1/content/[(hash_type):](hash)/language/ Get information about the programming language used in a content object. Note: this endpoint currently returns no data. :param string hash_type: optional parameter specifying which hashing algorithm has been used to compute the content checksum. It can be either ``sha1``, ``sha1_git``, ``sha256`` or ``blake2s256``. If that parameter is not provided, it is assumed that the hashing algorithm used is ``sha1``. :param string hash: hexadecimal representation of the checksum value computed with the specified hashing algorithm. :>json object content_url: link to :http:get:`/api/1/content/[(hash_type):](hash)/` for getting information about the content :>json string id: the **sha1** identifier of the content :>json string lang: the detected programming language if any :>json object tool: information about the tool used to detect the programming language {common_headers} :statuscode 200: no error :statuscode 400: an invalid **hash_type** or **hash** has been provided :statuscode 404: requested content can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`content/sha1:dc2830a9e72f23c1dfebef4413003221baa5fb62/language/` """ return api_lookup( archive.lookup_content_language, q, notfound_msg="No language information found for content {}.".format(q), enrich_fn=utils.enrich_metadata_endpoint, request=request, ) @api_route( r"/content/(?P[0-9a-z_:]*[0-9a-f]+)/license/", "api-1-content-license", checksum_args=["q"], ) -@api_doc("/content/license/") +@api_doc("/content/license/", category="Metadata") @format_docstring() def api_content_license(request: Request, q: str): """ .. http:get:: /api/1/content/[(hash_type):](hash)/license/ Get information about the license of a content object. :param string hash_type: optional parameter specifying which hashing algorithm has been used to compute the content checksum. It can be either ``sha1``, ``sha1_git``, ``sha256`` or ``blake2s256``. If that parameter is not provided, it is assumed that the hashing algorithm used is ``sha1``. :param string hash: hexadecimal representation of the checksum value computed with the specified hashing algorithm. :>json object content_url: link to :http:get:`/api/1/content/[(hash_type):](hash)/` for getting information about the content :>json string id: the **sha1** identifier of the content :>json array licenses: array of strings containing the detected license names :>json object tool: information about the tool used to detect the license {common_headers} :statuscode 200: no error :statuscode 400: an invalid **hash_type** or **hash** has been provided :statuscode 404: requested content can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`content/sha1:dc2830a9e72f23c1dfebef4413003221baa5fb62/license/` """ return api_lookup( archive.lookup_content_license, q, notfound_msg="No license information found for content {}.".format(q), enrich_fn=utils.enrich_metadata_endpoint, request=request, ) @api_route( r"/content/(?P[0-9a-z_:]*[0-9a-f]+)/raw/", "api-1-content-raw", checksum_args=["q"], ) -@api_doc("/content/raw/") +@api_doc("/content/raw/", category="Archive") def api_content_raw(request: Request, q: str): """ .. http:get:: /api/1/content/[(hash_type):](hash)/raw/ Get the raw content of a content object (aka a "blob"), as a byte sequence. :param string hash_type: optional parameter specifying which hashing algorithm has been used to compute the content checksum. It can be either ``sha1``, ``sha1_git``, ``sha256`` or ``blake2s256``. If that parameter is not provided, it is assumed that the hashing algorithm used is ``sha1``. :param string hash: hexadecimal representation of the checksum value computed with the specified hashing algorithm. :query string filename: if provided, the downloaded content will get that filename :resheader Content-Type: application/octet-stream :statuscode 200: no error :statuscode 400: an invalid **hash_type** or **hash** has been provided :statuscode 404: requested content can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`content/sha1:dc2830a9e72f23c1dfebef4413003221baa5fb62/raw/` """ content_raw = archive.lookup_content_raw(q) if not content_raw: raise NotFoundExc("Content %s is not found." % q) filename = request.query_params.get("filename") if not filename: filename = "content_%s_raw" % q.replace(":", "_") return FileResponse( io.BytesIO(content_raw["data"]), # not copied, as this is never modified filename=filename, content_type="application/octet-stream", as_attachment=True, ) @api_route(r"/content/known/search/", "api-1-content-known", methods=["POST"]) @api_route(r"/content/known/(?P(?!search).+)/", "api-1-content-known") -@api_doc("/content/known/", tags=["hidden"]) +@api_doc("/content/known/", category="Archive", tags=["hidden"]) @format_docstring() def api_check_content_known(request: Request, q: Optional[str] = None): """ .. http:get:: /api/1/content/known/(sha1)[,(sha1), ...,(sha1)]/ Check whether some content(s) (aka "blob(s)") is present in the archive based on its **sha1** checksum. :param string sha1: hexadecimal representation of the **sha1** checksum value for the content to check existence. Multiple values can be provided separated by ','. {common_headers} :>json array search_res: array holding the search result for each provided **sha1** :>json object search_stats: some statistics regarding the number of **sha1** provided and the percentage of those found in the archive :statuscode 200: no error :statuscode 400: an invalid **sha1** has been provided **Example:** .. parsed-literal:: :swh_web_api:`content/known/dc2830a9e72f23c1dfebef4413003221baa5fb62,0c3f19cb47ebfbe643fb19fa94c874d18fa62d12/` """ search_stats = {"nbfiles": 0, "pct": 0} search_res = None queries = [] # GET: Many hash separated values request if q: hashes = q.split(",") for v in hashes: queries.append({"filename": None, "sha1": v}) # POST: Many hash requests in post form submission elif request.method == "POST": data = request.data # Remove potential inputs with no associated value for k, v in data.items(): if v is not None: if k == "q" and len(v) > 0: queries.append({"filename": None, "sha1": v}) elif v != "": queries.append({"filename": k, "sha1": v}) if queries: lookup = archive.lookup_multiple_hashes(queries) result = [] nb_queries = len(queries) for el in lookup: res_d = {"sha1": el["sha1"], "found": el["found"]} if "filename" in el and el["filename"]: res_d["filename"] = el["filename"] result.append(res_d) search_res = result nbfound = len([x for x in lookup if x["found"]]) search_stats["nbfiles"] = nb_queries search_stats["pct"] = int((nbfound / nb_queries) * 100) return {"search_res": search_res, "search_stats": search_stats} @api_route( r"/content/(?P[0-9a-z_:]*[0-9a-f]+)/", "api-1-content", checksum_args=["q"] ) -@api_doc("/content/") +@api_doc("/content/", category="Archive") @format_docstring() def api_content_metadata(request: Request, q: str): """ .. http:get:: /api/1/content/[(hash_type):](hash)/ Get information about a content (aka a "blob") object. In the archive, a content object is identified based on checksum values computed using various hashing algorithms. :param string hash_type: optional parameter specifying which hashing algorithm has been used to compute the content checksum. It can be either ``sha1``, ``sha1_git``, ``sha256`` or ``blake2s256``. If that parameter is not provided, it is assumed that the hashing algorithm used is ``sha1``. :param string hash: hexadecimal representation of the checksum value computed with the specified hashing algorithm. {common_headers} :>json object checksums: object holding the computed checksum values for the requested content :>json string data_url: link to :http:get:`/api/1/content/[(hash_type):](hash)/raw/` for downloading the content raw bytes :>json string filetype_url: link to :http:get:`/api/1/content/[(hash_type):](hash)/filetype/` for getting information about the content MIME type :>json string language_url: link to :http:get:`/api/1/content/[(hash_type):](hash)/language/` for getting information about the programming language used in the content :>json number length: length of the content in bytes :>json string license_url: link to :http:get:`/api/1/content/[(hash_type):](hash)/license/` for getting information about the license of the content :statuscode 200: no error :statuscode 400: an invalid **hash_type** or **hash** has been provided :statuscode 404: requested content can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`content/sha1_git:fe95a46679d128ff167b7c55df5d02356c5a1ae1/` """ return api_lookup( archive.lookup_content, q, notfound_msg="Content with {} not found.".format(q), enrich_fn=functools.partial(utils.enrich_content, query_string=q), request=request, ) diff --git a/swh/web/api/views/directory.py b/swh/web/api/views/directory.py index 3bafcf95..93145664 100644 --- a/swh/web/api/views/directory.py +++ b/swh/web/api/views/directory.py @@ -1,97 +1,97 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Optional from rest_framework.request import Request from swh.web.api import utils from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.api.views.utils import api_lookup from swh.web.utils import archive @api_route( r"/directory/(?P[0-9a-f]+)/", "api-1-directory", checksum_args=["sha1_git"], ) @api_route( r"/directory/(?P[0-9a-f]+)/(?P.+)/", "api-1-directory", checksum_args=["sha1_git"], ) -@api_doc("/directory/") +@api_doc("/directory/", category="Archive") @format_docstring() def api_directory(request: Request, sha1_git: str, path: Optional[str] = None): """ .. http:get:: /api/1/directory/(sha1_git)/[(path)/] Get information about directory objects. Directories are identified by **sha1** checksums, compatible with Git directory identifiers. See :func:`swh.model.git_objects.directory_git_object` in our data model module for details about how they are computed. When given only a directory identifier, this endpoint returns information about the directory itself, returning its content (usually a list of directory entries). When given a directory identifier and a path, this endpoint returns information about the directory entry pointed by the relative path, starting path resolution from the given directory. :param string sha1_git: hexadecimal representation of the directory **sha1_git** identifier :param string path: optional parameter to get information about the directory entry pointed by that relative path {common_headers} :>jsonarr object checksums: object holding the computed checksum values for a directory entry (only for file entries) :>jsonarr string dir_id: **sha1_git** identifier of the requested directory :>jsonarr number length: length of a directory entry in bytes (only for file entries) for getting information about the content MIME type :>jsonarr string name: the directory entry name :>jsonarr number perms: permissions for the directory entry :>jsonarr string target: **sha1_git** identifier of the directory entry :>jsonarr string target_url: link to :http:get:`/api/1/content/[(hash_type):](hash)/` or :http:get:`/api/1/directory/(sha1_git)/[(path)/]` depending on the directory entry type :>jsonarr string type: the type of the directory entry, can be either ``dir``, ``file`` or ``rev`` :statuscode 200: no error :statuscode 400: an invalid **hash_type** or **hash** has been provided :statuscode 404: requested directory can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`directory/977fc4b98c0e85816348cebd3b12026407c368b6/` """ if path: error_msg_path = ( "Entry with path %s relative to directory " "with sha1_git %s not found." ) % (path, sha1_git) return api_lookup( archive.lookup_directory_with_path, sha1_git, path, notfound_msg=error_msg_path, enrich_fn=utils.enrich_directory_entry, request=request, ) else: error_msg_nopath = "Directory with sha1_git %s not found." % sha1_git return api_lookup( archive.lookup_directory, sha1_git, notfound_msg=error_msg_nopath, enrich_fn=utils.enrich_directory_entry, request=request, ) diff --git a/swh/web/api/views/graph.py b/swh/web/api/views/graph.py index 216542f8..4989a7b9 100644 --- a/swh/web/api/views/graph.py +++ b/swh/web/api/views/graph.py @@ -1,193 +1,193 @@ # Copyright (C) 2020-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from distutils.util import strtobool import json from typing import Dict, Iterator, Union from urllib.parse import unquote, urlparse, urlunparse import requests from django.http import QueryDict from django.http.response import StreamingHttpResponse from rest_framework.decorators import renderer_classes from rest_framework.renderers import JSONRenderer from rest_framework.request import Request from rest_framework.response import Response from swh.model.hashutil import hash_to_hex from swh.model.model import Sha1Git from swh.model.swhids import ExtendedObjectType, ExtendedSWHID from swh.web.api.apidoc import api_doc from swh.web.api.apiurls import api_route from swh.web.api.renderers import PlainTextRenderer from swh.web.config import SWH_WEB_INTERNAL_SERVER_NAME, get_config from swh.web.utils import archive API_GRAPH_PERM = "swh.web.api.graph" def _resolve_origin_swhid(swhid: str, origin_urls: Dict[Sha1Git, str]) -> str: """ Resolve origin url from its swhid sha1 representation. """ parsed_swhid = ExtendedSWHID.from_string(swhid) if parsed_swhid.object_type == ExtendedObjectType.ORIGIN: if parsed_swhid.object_id in origin_urls: return origin_urls[parsed_swhid.object_id] else: origin_info = list( archive.lookup_origins_by_sha1s([hash_to_hex(parsed_swhid.object_id)]) )[0] assert origin_info is not None origin_urls[parsed_swhid.object_id] = origin_info["url"] return origin_info["url"] else: return swhid def _resolve_origin_swhids_in_graph_response( response: requests.Response, ) -> Iterator[bytes]: """ Resolve origin urls from their swhid sha1 representations in graph service responses. """ content_type = response.headers["Content-Type"] origin_urls: Dict[Sha1Git, str] = {} if content_type == "application/x-ndjson": for line in response.iter_lines(): swhids = json.loads(line.decode("utf-8")) processed_line = [] for swhid in swhids: processed_line.append(_resolve_origin_swhid(swhid, origin_urls)) yield (json.dumps(processed_line) + "\n").encode() elif content_type == "text/plain": for line in response.iter_lines(): processed_line = [] swhids = line.decode("utf-8").split(" ") for swhid in swhids: processed_line.append(_resolve_origin_swhid(swhid, origin_urls)) yield (" ".join(processed_line) + "\n").encode() else: for line in response.iter_lines(): yield line + b"\n" @api_route(r"/graph/", "api-1-graph-doc") -@api_doc("/graph/") +@api_doc("/graph/", category="Miscellaneous") def api_graph(request: Request) -> None: """ .. http:get:: /api/1/graph/(graph_query)/ Provide fast access to the graph representation of the Software Heritage archive. That endpoint acts as a proxy for the `Software Heritage Graph service `_. It provides fast access to the `graph representation `_ of the Software Heritage archive. For more details please refer to the `Graph RPC API documentation `_. .. warning:: That endpoint is not publicly available and requires authentication and special user permission in order to be able to request it. :param string graph_query: query to forward to the Software Heritage Graph archive (see its `documentation `_) :query boolean resolve_origins: extra parameter defined by that proxy enabling to resolve origin urls from their sha1 representations :statuscode 200: no error :statuscode 400: an invalid graph query has been provided :statuscode 404: provided graph node cannot be found **Examples:** .. parsed-literal:: :swh_web_api:`graph/leaves/swh:1:dir:432d1b21c1256f7408a07c577b6974bbdbcc1323/` :swh_web_api:`graph/neighbors/swh:1:rev:f39d7d78b70e0f39facb1e4fab77ad3df5c52a35/` :swh_web_api:`graph/randomwalk/swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2/ori?direction=backward` :swh_web_api:`graph/randomwalk/swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2/ori?direction=backward&limit=-2` :swh_web_api:`graph/visit/nodes/swh:1:snp:40f9f177b8ab0b7b3d70ee14bbc8b214e2b2dcfc?direction=backward&resolve_origins=true` :swh_web_api:`graph/visit/edges/swh:1:snp:40f9f177b8ab0b7b3d70ee14bbc8b214e2b2dcfc?direction=backward&resolve_origins=true` :swh_web_api:`graph/visit/paths/swh:1:dir:644dd466d8ad527ea3a609bfd588a3244e6dafcb?direction=backward&resolve_origins=true` """ return None @api_route(r"/graph/(?P.+)/", "api-1-graph") @renderer_classes([JSONRenderer, PlainTextRenderer]) def api_graph_proxy( request: Request, graph_query: str ) -> Union[Response, StreamingHttpResponse]: if request.get_host() != SWH_WEB_INTERNAL_SERVER_NAME: if not bool(request.user and request.user.is_authenticated): return Response("Authentication credentials were not provided.", status=401) if not request.user.has_perm(API_GRAPH_PERM): return Response( "You do not have permission to perform this action.", status=403 ) graph_config = get_config()["graph"] graph_query = unquote(graph_query) graph_query_url = graph_config["server_url"] graph_query_url += graph_query parsed_url = urlparse(graph_query_url) query_dict = QueryDict(parsed_url.query, mutable=True) query_dict.update(request.GET) # clamp max_edges query parameter according to authentication if request.user.is_staff: max_edges = graph_config["max_edges"]["staff"] elif request.user.is_authenticated: max_edges = graph_config["max_edges"]["user"] else: max_edges = graph_config["max_edges"]["anonymous"] query_dict["max_edges"] = min( max_edges, int(query_dict.get("max_edges", max_edges + 1)) ) if query_dict: graph_query_url = urlunparse( parsed_url._replace(query=query_dict.urlencode(safe="/;:")) ) response = requests.get(graph_query_url, stream=True) if response.status_code != 200: return Response( response.content, status=response.status_code, content_type=response.headers["Content-Type"], ) # graph stats and counter endpoint responses are not streamed if response.headers.get("Transfer-Encoding") != "chunked": return Response( response.json(), status=response.status_code, content_type=response.headers["Content-Type"], ) # other endpoint responses are streamed else: resolve_origins = strtobool(request.GET.get("resolve_origins", "false")) if response.status_code == 200 and resolve_origins: response_stream = _resolve_origin_swhids_in_graph_response(response) else: response_stream = map(lambda line: line + b"\n", response.iter_lines()) return StreamingHttpResponse( response_stream, status=response.status_code, content_type=response.headers["Content-Type"], ) diff --git a/swh/web/api/views/identifiers.py b/swh/web/api/views/identifiers.py index 021d1614..2612a034 100644 --- a/swh/web/api/views/identifiers.py +++ b/swh/web/api/views/identifiers.py @@ -1,126 +1,126 @@ # Copyright (C) 2018-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Dict, Set from rest_framework.request import Request from swh.model.hashutil import hash_to_bytes, hash_to_hex from swh.model.swhids import ObjectType from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.utils import archive from swh.web.utils.exc import LargePayloadExc from swh.web.utils.identifiers import get_swhid, group_swhids, resolve_swhid @api_route(r"/resolve/(?P.+)/", "api-1-resolve-swhid") -@api_doc("/resolve/") +@api_doc("/resolve/", category="Archive") @format_docstring() def api_resolve_swhid(request: Request, swhid: str): """ .. http:get:: /api/1/resolve/(swhid)/ Resolve a SoftWare Heritage persistent IDentifier (SWHID) Try to resolve a provided `SoftWare Heritage persistent IDentifier `_ into an url for browsing the pointed archive object. If the provided identifier is valid, the existence of the object in the archive will also be checked. :param string swhid: a SoftWare Heritage persistent IDentifier :>json string browse_url: the url for browsing the pointed object :>json object metadata: object holding optional parts of the SWHID :>json string namespace: the SWHID namespace :>json string object_id: the hash identifier of the pointed object :>json string object_type: the type of the pointed object :>json number scheme_version: the scheme version of the SWHID {common_headers} :statuscode 200: no error :statuscode 400: an invalid SWHID has been provided :statuscode 404: the pointed object does not exist in the archive **Example:** .. parsed-literal:: :swh_web_api:`resolve/swh:1:rev:96db9023b881d7cd9f379b0c154650d6c108e9a3;origin=https://github.com/openssl/openssl/` """ # try to resolve the provided swhid swhid_resolved = resolve_swhid(swhid) # id is well-formed, now check that the pointed # object is present in the archive, NotFoundExc # will be raised otherwise swhid_parsed = swhid_resolved["swhid_parsed"] object_type = swhid_parsed.object_type object_id = hash_to_hex(swhid_parsed.object_id) archive.lookup_object(swhid_parsed.object_type, object_id) # id is well-formed and the pointed object exists return { "namespace": swhid_parsed.namespace, "scheme_version": swhid_parsed.scheme_version, "object_type": object_type.name.lower(), "object_id": object_id, "metadata": swhid_parsed.qualifiers(), "browse_url": request.build_absolute_uri(swhid_resolved["browse_url"]), } @api_route(r"/known/", "api-1-known", methods=["POST"]) -@api_doc("/known/") +@api_doc("/known/", category="Archive") @format_docstring() def api_swhid_known(request: Request): """ .. http:post:: /api/1/known/ Check if a list of objects are present in the Software Heritage archive. The objects to check existence must be provided using `SoftWare Heritage persistent IDentifiers `_. :json object : an object whose keys are input SWHIDs and values objects with the following keys: * **known (bool)**: whether the object was found {common_headers} :statuscode 200: no error :statuscode 400: an invalid SWHID was provided :statuscode 413: the input array of SWHIDs is too large """ limit = 1000 if len(request.data) > limit: raise LargePayloadExc( "The maximum number of SWHIDs this endpoint can receive is %s" % limit ) swhids = [get_swhid(swhid) for swhid in request.data] response = {str(swhid): {"known": False} for swhid in swhids} # group swhids by their type swhids_by_type = group_swhids(swhids) # search for hashes not present in the storage missing_hashes: Dict[ObjectType, Set[bytes]] = { k: set(map(hash_to_bytes, archive.lookup_missing_hashes({k: v}))) for k, v in swhids_by_type.items() } for swhid in swhids: if swhid.object_id not in missing_hashes[swhid.object_type]: response[str(swhid)]["known"] = True return response diff --git a/swh/web/api/views/metadata.py b/swh/web/api/views/metadata.py index edabb6a2..df491ab8 100644 --- a/swh/web/api/views/metadata.py +++ b/swh/web/api/views/metadata.py @@ -1,324 +1,324 @@ # Copyright (C) 2021-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information import base64 import re from typing import Dict, Optional import iso8601 from django.http import HttpResponse from django.shortcuts import redirect from rest_framework.request import Request from swh.model import hashutil, swhids from swh.model.model import MetadataAuthority, MetadataAuthorityType, Origin from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.utils import SWHID_RE, archive, converters, reverse from swh.web.utils.exc import BadInputExc, NotFoundExc @api_route( f"/raw-extrinsic-metadata/swhid/(?P{SWHID_RE})/", "api-1-raw-extrinsic-metadata-swhid", ) -@api_doc("/raw-extrinsic-metadata/swhid/") +@api_doc("/raw-extrinsic-metadata/swhid/", category="Metadata") @format_docstring() def api_raw_extrinsic_metadata_swhid(request: Request, target: str): """ .. http:get:: /api/1/raw-extrinsic-metadata/swhid/(target) Returns raw `extrinsic metadata `__ collected on a given object. :param string target: The core SWHID of the object whose metadata should be returned :query string authority: A metadata authority identifier, formatted as `` ``. Required. :query string after: ISO8601 representation of the minimum timestamp of metadata to fetch. Defaults to allowing all metadata. :query int limit: Maximum number of metadata objects to return. {common_headers} :>jsonarr string target: SWHID of the object described by this metadata (absent when ``target`` is not a core SWHID (ie. it does not have type ``cnt``/``dir``/``rev``/``rel``/``snp``) :>jsonarr string discovery_date: ISO8601/RFC3339 timestamp of the moment this metadata was collected. :>jsonarr object authority: authority this metadata is coming from :>jsonarr object fetcher: tool used to fetch the metadata :>jsonarr string format: short identifier of the format of the metadata :>jsonarr string metadata_url: link to download the metadata "blob" itself :>jsonarr string origin: URL of the origin in which context's the metadata is valid, if any :>jsonarr int visit: identifier of the visit in which context's the metadata is valid, if any :>jsonarr string snapshot: SWHID of the snapshot in which context's the metadata is valid, if any :>jsonarr string release: SWHID of the release in which context's the metadata is valid, if any :>jsonarr string revision: SWHID of the revision in which context's the metadata is valid, if any :>jsonarr string path: SWHID of the path in which context's is valid if any, relative to a release or revision as anchor :>jsonarr string directory: SWHID of the directory in which context's the metadata is valid, if any :statuscode 200: no error **Example:** .. parsed-literal:: :swh_web_api:`raw-extrinsic-metadata/swhid/swh:1:dir:a2faa28028657859c16ff506924212b33f0e1307/?authority=forge%20https://pypi.org/` """ # noqa authority_str: Optional[str] = request.query_params.get("authority") after_str: Optional[str] = request.query_params.get("after") limit_str: str = request.query_params.get("limit", "100") page_token_str: Optional[str] = request.query_params.get("page_token") if authority_str is None: raise BadInputExc("The 'authority' query parameter is required.") if " " not in authority_str.strip(): raise BadInputExc("The 'authority' query parameter should contain a space.") (authority_type_str, authority_url) = authority_str.split(" ", 1) try: authority_type = MetadataAuthorityType(authority_type_str) except ValueError: raise BadInputExc( f"Invalid 'authority' type, should be one of: " f"{', '.join(member.value for member in MetadataAuthorityType)}" ) authority = MetadataAuthority(authority_type, authority_url) if after_str: try: after = iso8601.parse_date(after_str) except iso8601.ParseError: raise BadInputExc("Invalid format for 'after' parameter.") from None else: after = None try: limit = int(limit_str) except ValueError: raise BadInputExc("'limit' parameter must be an integer.") from None limit = min(limit, 10000) try: parsed_target = swhids.ExtendedSWHID.from_string(target) except swhids.ValidationError as e: raise BadInputExc(f"Invalid target SWHID: {e}") from None try: swhids.CoreSWHID.from_string(target) except swhids.ValidationError: # Can be parsed as an extended SWHID, but not as a core SWHID extended_swhid = True else: extended_swhid = False if page_token_str is not None: page_token = base64.urlsafe_b64decode(page_token_str) else: page_token = None result_page = archive.storage.raw_extrinsic_metadata_get( target=parsed_target, authority=authority, after=after, page_token=page_token, limit=limit, ) filename = None if parsed_target.object_type == swhids.ExtendedObjectType.ORIGIN: origin_sha1 = hashutil.hash_to_hex(parsed_target.object_id) (origin_info,) = list(archive.lookup_origins_by_sha1s([origin_sha1])) if origin_info is not None: filename = re.sub("[:/_.]+", "_", origin_info["url"]) + "_metadata" if filename is None: filename = f"{target}_metadata" results = [] for metadata in result_page.results: result = converters.from_raw_extrinsic_metadata(metadata) if extended_swhid: # Keep extended SWHIDs away from the public API as much as possible. # (It is part of the URL, but not documented, and only accessed via # the link in the response of api-1-origin) del result["target"] # We can't reliably send metadata directly, because it is a bytestring, # and we have to return JSON documents. result["metadata_url"] = reverse( "api-1-raw-extrinsic-metadata-get", url_args={"id": hashutil.hash_to_hex(metadata.id)}, query_params={"filename": filename}, request=request, ) results.append(result) headers: Dict[str, str] = {} if result_page.next_page_token is not None: headers["link-next"] = reverse( "api-1-raw-extrinsic-metadata-swhid", url_args={"target": target}, query_params=dict( authority=authority_str, after=after_str, limit=limit_str, page_token=base64.urlsafe_b64encode( result_page.next_page_token.encode() ).decode(), ), request=request, ) return { "results": results, "headers": headers, } @api_route( "/raw-extrinsic-metadata/get/(?P[0-9a-z]+)/", "api-1-raw-extrinsic-metadata-get", ) def api_raw_extrinsic_metadata_get(request: Request, id: str): # This is an internal endpoint that should only be accessed via URLs given # by /raw-extrinsic-metadata/swhid/; so it is not documented. metadata = archive.storage.raw_extrinsic_metadata_get_by_ids( [hashutil.hash_to_bytes(id)] ) if not metadata: raise NotFoundExc( "Metadata not found. Use /raw-extrinsic-metadata/swhid/ to access metadata." ) response = HttpResponse( metadata[0].metadata, content_type="application/octet-stream" ) filename = request.query_params.get("filename") if filename and re.match("[a-zA-Z0-9:._-]+", filename): response["Content-disposition"] = f'attachment; filename="{filename}"' else: # It should always be not-None and match the regexp if the URL was created by # /raw-extrinsic-metadata/swhid/, but we're better safe than sorry. response["Content-disposition"] = "attachment" return response @api_route( f"/raw-extrinsic-metadata/swhid/(?P{SWHID_RE})/authorities/", "api-1-raw-extrinsic-metadata-swhid-authorities", ) -@api_doc("/raw-extrinsic-metadata/swhid/authorities/") +@api_doc("/raw-extrinsic-metadata/swhid/authorities/", category="Metadata") @format_docstring() def api_raw_extrinsic_metadata_swhid_authorities(request: Request, target: str): """ .. http:get:: /api/1/raw-extrinsic-metadata/swhid/(target)/authorities/ Returns a list of metadata authorities that provided metadata on the given target. They can then be used to get the raw `extrinsic metadata `__ collected on that object from each of the authorities. This endpoint should only be used directly to retrieve metadata from core SWHIDs (with type ``cnt``, ``dir``, ``rev``, ``rel``, and ``snp``). For "extended" SWHIDs such as origins, :http:get:`/api/1/raw-extrinsic-metadata/origin/(origin_url)/authorities/` should be used instead of building this URL directly. :param string target: The core SWHID of the object whose metadata-providing authorities should be returned {common_headers} :>jsonarr string type: Type of authority (deposit_client, forge, registry) :>jsonarr string url: Unique IRI identifying the authority :>jsonarr object metadata_list_url: URL to get the list of metadata objects on the given object from this authority :statuscode 200: no error **Example:** .. parsed-literal:: :swh_web_api:`raw-extrinsic-metadata/swhid/swh:1:dir:a2faa28028657859c16ff506924212b33f0e1307/authorities/` """ # noqa try: parsed_target = swhids.ExtendedSWHID.from_string(target) except swhids.ValidationError as e: raise BadInputExc(f"Invalid target SWHID: {e}") from None authorities = archive.storage.raw_extrinsic_metadata_get_authorities( target=parsed_target ) results = [ { **authority.to_dict(), "metadata_list_url": reverse( "api-1-raw-extrinsic-metadata-swhid", url_args={"target": target}, query_params={"authority": f"{authority.type.value} {authority.url}"}, request=request, ), } for authority in authorities ] return { "results": results, "headers": {}, } @api_route( "/raw-extrinsic-metadata/origin/(?P.*)/authorities/", "api-1-raw-extrinsic-metadata-origin-authorities", ) -@api_doc("/raw-extrinsic-metadata/origin/authorities/") +@api_doc("/raw-extrinsic-metadata/origin/authorities/", category="Metadata") @format_docstring() def api_raw_extrinsic_metadata_origin_authorities(request: Request, origin_url: str): """ .. http:get:: /api/1/raw-extrinsic-metadata/origin/(origin_url)/authorities/ Similar to :http:get:`/api/1/raw-extrinsic-metadata/swhid/(target)/authorities/` but to get metadata on origins instead of objects :param string origin_url: The URL of the origin whose metadata-providing authorities should be returned {common_headers} :>jsonarr string type: Type of authority (deposit_client, forge, registry) :>jsonarr string url: Unique IRI identifying the authority :>jsonarr object metadata_list_url: URL to get the list of metadata objects on the given object from this authority :statuscode 200: no error **Example:** .. parsed-literal:: :swh_web_api:`raw-extrinsic-metadata/origin/https://github.com/rdicosmo/parmap/authorities/` """ # noqa url = reverse( "api-1-raw-extrinsic-metadata-swhid-authorities", url_args={"target": Origin(url=origin_url).swhid()}, ) return redirect(url) diff --git a/swh/web/api/views/origin.py b/swh/web/api/views/origin.py index 6cbf6088..0435e7d7 100644 --- a/swh/web/api/views/origin.py +++ b/swh/web/api/views/origin.py @@ -1,509 +1,509 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from distutils.util import strtobool from functools import partial from typing import Dict from rest_framework.request import Request from swh.search.exc import SearchQuerySyntaxError from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.api.utils import ( enrich_origin, enrich_origin_search_result, enrich_origin_visit, ) from swh.web.api.views.utils import api_lookup from swh.web.utils import archive, origin_visit_types, reverse from swh.web.utils.exc import BadInputExc from swh.web.utils.origin_visits import get_origin_visits from swh.web.utils.typing import OriginInfo DOC_RETURN_ORIGIN = """ :>json string origin_visits_url: link to in order to get information about the visits for that origin :>json string url: the origin canonical url """ DOC_RETURN_ORIGIN_ARRAY = DOC_RETURN_ORIGIN.replace(":>json", ":>jsonarr") DOC_RETURN_ORIGIN_VISIT = """ :>json string date: ISO8601/RFC3339 representation of the visit date (in UTC) :>json str origin: the origin canonical url :>json string origin_url: link to get information about the origin :>jsonarr string snapshot: the snapshot identifier of the visit (may be null if status is not **full**). :>jsonarr string snapshot_url: link to :http:get:`/api/1/snapshot/(snapshot_id)/` in order to get information about the snapshot of the visit (may be null if status is not **full**). :>json string status: status of the visit (either **full**, **partial** or **ongoing**) :>json number visit: the unique identifier of the visit :>json string metadata_authorities_url: link to :http:get:`/api/1/raw-extrinsic-metadata/swhid/(target)/authorities/` to get the list of metadata authorities providing extrinsic metadata on this origin (and, indirectly, to the origin's extrinsic metadata itself) """ DOC_RETURN_ORIGIN_VISIT_ARRAY = DOC_RETURN_ORIGIN_VISIT.replace(":>json", ":>jsonarr") DOC_RETURN_ORIGIN_VISIT_ARRAY += """ :>jsonarr number id: the unique identifier of the origin :>jsonarr string origin_visit_url: link to :http:get:`/api/1/origin/(origin_url)/visit/(visit_id)/` in order to get information about the visit """ @api_route(r"/origins/", "api-1-origins") -@api_doc("/origins/", noargs=True) +@api_doc("/origins/", category="Archive", noargs=True) @format_docstring(return_origin_array=DOC_RETURN_ORIGIN_ARRAY) def api_origins(request: Request): """ .. http:get:: /api/1/origins/ Get list of archived software origins. .. warning:: This endpoint used to provide an ``origin_from`` query parameter, and guarantee an order on results. This is no longer true, and only the Link header should be used for paginating through results. :query int origin_count: The maximum number of origins to return (default to 100, can not exceed 10000) {return_origin_array} {common_headers} {resheader_link} :statuscode 200: no error **Example:** .. parsed-literal:: :swh_web_api:`origins?origin_count=500` """ old_param_origin_from = request.query_params.get("origin_from") if old_param_origin_from: raise BadInputExc("Please use the Link header to browse through result") page_token = request.query_params.get("page_token", None) limit = min(int(request.query_params.get("origin_count", "100")), 10000) page_result = archive.lookup_origins(page_token, limit) origins = [enrich_origin(o, request=request) for o in page_result.results] next_page_token = page_result.next_page_token headers: Dict[str, str] = {} if next_page_token is not None: headers["link-next"] = reverse( "api-1-origins", query_params={"page_token": next_page_token, "origin_count": str(limit)}, request=request, ) return {"results": origins, "headers": headers} @api_route(r"/origin/(?P.+)/get/", "api-1-origin") -@api_doc("/origin/") +@api_doc("/origin/", category="Archive") @format_docstring(return_origin=DOC_RETURN_ORIGIN) def api_origin(request: Request, origin_url: str): """ .. http:get:: /api/1/origin/(origin_url)/get/ Get information about a software origin. :param string origin_url: the origin url {return_origin} {common_headers} :statuscode 200: no error :statuscode 404: requested origin can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`origin/https://github.com/python/cpython/get/` """ ori_dict = {"url": origin_url} error_msg = "Origin with url %s not found." % ori_dict["url"] return api_lookup( archive.lookup_origin, ori_dict, lookup_similar_urls=False, notfound_msg=error_msg, enrich_fn=enrich_origin, request=request, ) def _visit_types() -> str: docstring = "" # available visit types are queried using swh-search so we do it in a try # block in case of failure (for instance in docker environment when # elasticsearch service is not available) try: visit_types = [f"**{visit_type}**" for visit_type in origin_visit_types()] docstring = ", ".join(visit_types[:-1]) + f", and {visit_types[-1]}" except Exception: docstring = "???" pass return docstring @api_route( r"/origin/search/(?P.*)/", "api-1-origin-search", throttle_scope="swh_api_origin_search", ) -@api_doc("/origin/search/") +@api_doc("/origin/search/", category="Archive") @format_docstring( return_origin_array=DOC_RETURN_ORIGIN_ARRAY, visit_types=_visit_types() ) def api_origin_search(request: Request, url_pattern: str): """ .. http:get:: /api/1/origin/search/(url_pattern)/ Search for software origins whose urls contain a provided string pattern or match a provided regular expression. The search is performed in a case insensitive way. .. warning:: This endpoint used to provide an ``offset`` query parameter, and guarantee an order on results. This is no longer true, and only the Link header should be used for paginating through results. :param string url_pattern: a string pattern :query boolean use_ql: whether to use swh search query language or not :query int limit: the maximum number of found origins to return (bounded to 1000) :query boolean with_visit: if true, only return origins with at least one visit by Software heritage :query string visit_type: if provided, only return origins with that specific visit type (currently the supported types are {visit_types}) {return_origin_array} {common_headers} {resheader_link} :statuscode 200: no error **Example:** .. parsed-literal:: :swh_web_api:`origin/search/python/?limit=2` """ result = {} limit = min(int(request.query_params.get("limit", "70")), 1000) page_token = request.query_params.get("page_token") use_ql = request.query_params.get("use_ql", "false") with_visit = request.query_params.get("with_visit", "false") visit_type = request.query_params.get("visit_type") try: (results, page_token) = api_lookup( archive.search_origin, url_pattern, bool(strtobool(use_ql)), limit, bool(strtobool(with_visit)), [visit_type] if visit_type else None, page_token, enrich_fn=enrich_origin_search_result, request=request, ) except SearchQuerySyntaxError as e: raise BadInputExc(f"Syntax error in search query: {e.args[0]}") if page_token is not None: query_params = {k: v for (k, v) in request.GET.dict().items()} query_params["page_token"] = page_token result["headers"] = { "link-next": reverse( "api-1-origin-search", url_args={"url_pattern": url_pattern}, query_params=query_params, request=request, ) } result.update({"results": results}) return result @api_route(r"/origin/metadata-search/", "api-1-origin-metadata-search") -@api_doc("/origin/metadata-search/", noargs=True) +@api_doc("/origin/metadata-search/", category="Metadata", noargs=True) @format_docstring(return_origin_array=DOC_RETURN_ORIGIN_ARRAY) def api_origin_metadata_search(request: Request): """ .. http:get:: /api/1/origin/metadata-search/ Search for software origins whose metadata (expressed as a JSON-LD/CodeMeta dictionary) match the provided criteria. For now, only full-text search on this dictionary is supported. :query str fulltext: a string that will be matched against origin metadata; results are ranked and ordered starting with the best ones. :query int limit: the maximum number of found origins to return (bounded to 100) {return_origin_array} {common_headers} :statuscode 200: no error **Example:** .. parsed-literal:: :swh_web_api:`origin/metadata-search/?limit=2&fulltext=Jane%20Doe` """ fulltext = request.query_params.get("fulltext", None) limit = min(int(request.query_params.get("limit", "70")), 100) if not fulltext: content = '"fulltext" must be provided and non-empty.' raise BadInputExc(content) results = api_lookup( archive.search_origin_metadata, fulltext, limit, request=request ) return { "results": results, } @api_route(r"/origin/(?P.+)/visits/", "api-1-origin-visits") -@api_doc("/origin/visits/") +@api_doc("/origin/visits/", category="Archive") @format_docstring(return_origin_visit_array=DOC_RETURN_ORIGIN_VISIT_ARRAY) def api_origin_visits(request: Request, origin_url: str): """ .. http:get:: /api/1/origin/(origin_url)/visits/ Get information about all visits of a software origin. Visits are returned sorted in descending order according to their date. :param str origin_url: a software origin URL :query int per_page: specify the number of visits to list, for pagination purposes :query int last_visit: visit to start listing from, for pagination purposes {common_headers} {resheader_link} {return_origin_visit_array} :statuscode 200: no error :statuscode 404: requested origin can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`origin/https://github.com/hylang/hy/visits/` """ result = {} origin_query = OriginInfo(url=origin_url) notfound_msg = "No origin {} found".format(origin_url) url_args_next = {"origin_url": origin_url} per_page = int(request.query_params.get("per_page", "10")) last_visit_str = request.query_params.get("last_visit") last_visit = int(last_visit_str) if last_visit_str else None def _lookup_origin_visits(origin_query, last_visit=last_visit, per_page=per_page): all_visits = get_origin_visits(origin_query, lookup_similar_urls=False) all_visits.reverse() visits = [] if not last_visit: visits = all_visits[:per_page] else: for i, v in enumerate(all_visits): if v["visit"] == last_visit: visits = all_visits[i + 1 : i + 1 + per_page] break for v in visits: yield v results = api_lookup( _lookup_origin_visits, origin_query, notfound_msg=notfound_msg, enrich_fn=partial( enrich_origin_visit, with_origin_link=False, with_origin_visit_link=True ), request=request, ) if results: nb_results = len(results) if nb_results == per_page: new_last_visit = results[-1]["visit"] query_params = {} query_params["last_visit"] = new_last_visit if request.query_params.get("per_page"): query_params["per_page"] = per_page result["headers"] = { "link-next": reverse( "api-1-origin-visits", url_args=url_args_next, query_params=query_params, request=request, ) } result.update({"results": results}) return result @api_route( r"/origin/(?P.+)/visit/latest/", "api-1-origin-visit-latest", throttle_scope="swh_api_origin_visit_latest", ) -@api_doc("/origin/visit/latest/") +@api_doc("/origin/visit/latest/", category="Archive") @format_docstring(return_origin_visit=DOC_RETURN_ORIGIN_VISIT) def api_origin_visit_latest(request: Request, origin_url: str): """ .. http:get:: /api/1/origin/(origin_url)/visit/latest/ Get information about the latest visit of a software origin. :param str origin_url: a software origin URL :query boolean require_snapshot: if true, only return a visit with a snapshot {common_headers} {return_origin_visit} :statuscode 200: no error :statuscode 404: requested origin or visit can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`origin/https://github.com/hylang/hy/visit/latest/` """ require_snapshot = request.query_params.get("require_snapshot", "false") return api_lookup( archive.lookup_origin_visit_latest, origin_url, bool(strtobool(require_snapshot)), lookup_similar_urls=False, notfound_msg=("No visit for origin {} found".format(origin_url)), enrich_fn=partial( enrich_origin_visit, with_origin_link=True, with_origin_visit_link=False ), request=request, ) @api_route( r"/origin/(?P.+)/visit/(?P[0-9]+)/", "api-1-origin-visit" ) -@api_doc("/origin/visit/") +@api_doc("/origin/visit/", category="Archive") @format_docstring(return_origin_visit=DOC_RETURN_ORIGIN_VISIT) def api_origin_visit(request: Request, visit_id: str, origin_url: str): """ .. http:get:: /api/1/origin/(origin_url)/visit/(visit_id)/ Get information about a specific visit of a software origin. :param str origin_url: a software origin URL :param int visit_id: a visit identifier {common_headers} {return_origin_visit} :statuscode 200: no error :statuscode 404: requested origin or visit can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`origin/https://github.com/hylang/hy/visit/1/` """ return api_lookup( archive.lookup_origin_visit, origin_url, int(visit_id), lookup_similar_urls=False, notfound_msg=("No visit {} for origin {} found".format(visit_id, origin_url)), enrich_fn=partial( enrich_origin_visit, with_origin_link=True, with_origin_visit_link=False ), request=request, ) @api_route( r"/origin/(?P.+)/intrinsic-metadata/", "api-origin-intrinsic-metadata" ) -@api_doc("/origin/intrinsic-metadata/") +@api_doc("/origin/intrinsic-metadata/", category="Metadata") @format_docstring() def api_origin_intrinsic_metadata(request: Request, origin_url: str): """ .. http:get:: /api/1/origin/(origin_url)/intrinsic-metadata Get intrinsic metadata of a software origin (as a JSON-LD/CodeMeta dictionary). :param string origin_url: the origin url :>json string ???: intrinsic metadata field of the origin {common_headers} :statuscode 200: no error :statuscode 404: requested origin can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`origin/https://github.com/python/cpython/intrinsic-metadata` """ return api_lookup( archive.lookup_origin_intrinsic_metadata, origin_url, notfound_msg=f"Origin with url {origin_url} not found", enrich_fn=enrich_origin, request=request, ) diff --git a/swh/web/api/views/ping.py b/swh/web/api/views/ping.py index b82472fa..7e297f00 100644 --- a/swh/web/api/views/ping.py +++ b/swh/web/api/views/ping.py @@ -1,23 +1,23 @@ # Copyright (C) 2020-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from rest_framework.request import Request from swh.web.api.apidoc import api_doc from swh.web.api.apiurls import api_route @api_route(r"/ping/", "api-1-ping") -@api_doc("/ping/", noargs=True) +@api_doc("/ping/", category="Miscellaneous", noargs=True) def ping(request: Request): """ .. http:get:: /api/1/ping/ A simple endpoint used to check if the API is working. :statuscode 200: no error """ return "pong" diff --git a/swh/web/api/views/raw.py b/swh/web/api/views/raw.py index e3f72019..424b9ec2 100644 --- a/swh/web/api/views/raw.py +++ b/swh/web/api/views/raw.py @@ -1,118 +1,118 @@ # Copyright (C) 2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from django.http import HttpResponse from rest_framework.exceptions import PermissionDenied from rest_framework.request import Request from swh.model import model from swh.model.git_objects import ( content_git_object, directory_git_object, release_git_object, revision_git_object, snapshot_git_object, ) from swh.model.hashutil import hash_to_hex from swh.model.swhids import CoreSWHID, ObjectType from swh.storage.algos.directory import directory_get from swh.storage.algos.snapshot import snapshot_get_all_branches from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.auth.utils import API_RAW_OBJECT_PERMISSION from swh.web.utils import SWHID_RE, archive from swh.web.utils.exc import NotFoundExc @api_route( f"/raw/(?P{SWHID_RE})/", "api-1-raw-object", throttle_scope="swh_raw_object", ) -@api_doc("/raw/") +@api_doc("/raw/", category="Archive") @format_docstring() def api_raw_object(request: Request, swhid: str): """ .. http:get:: /api/1/raw/(swhid)/ Get the object corresponding to the SWHID in raw form. This endpoint exposes the internal representation (see the ``*_git_object`` functions in :mod:`swh.model.git_objects`), and so can be used to fetch a binary blob which hashes to the same identifier. .. warning:: That endpoint is not publicly available and requires authentication and special user permission in order to be able to request it. :param string swhid: the object's SWHID :resheader Content-Type: application/octet-stream :statuscode 200: no error :statuscode 404: the requested object can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`raw/swh:1:snp:6a3a2cf0b2b90ce7ae1cf0a221ed68035b686f5a` """ if not (request.user.is_staff or request.user.has_perm(API_RAW_OBJECT_PERMISSION)): raise PermissionDenied() parsed_swhid = CoreSWHID.from_string(swhid) object_id = parsed_swhid.object_id object_type = parsed_swhid.object_type def not_found(): return NotFoundExc(f"Object with id {swhid} not found.") if object_type == ObjectType.CONTENT: results = archive.storage.content_find({"sha1_git": object_id}) if len(results) == 0: raise not_found() cnt = results[0] # `cnt.with_data()` unfortunately doesn't seem to work. if cnt.data is None: d = cnt.to_dict() d["data"] = archive.storage.content_get_data(cnt.sha1) cnt = model.Content.from_dict(d) assert cnt.data, f"Content {hash_to_hex(cnt.sha1)} ceased to exist" result = content_git_object(cnt) elif object_type == ObjectType.DIRECTORY: dir_ = directory_get(archive.storage, object_id) if dir_ is None: raise not_found() result = directory_git_object(dir_) elif object_type == ObjectType.REVISION: rev = archive.storage.revision_get([object_id], ignore_displayname=True)[0] if rev is None: raise not_found() result = revision_git_object(rev) elif object_type == ObjectType.RELEASE: rel = archive.storage.release_get([object_id], ignore_displayname=True)[0] if rel is None: raise not_found() result = release_git_object(rel) elif object_type == ObjectType.SNAPSHOT: snp = snapshot_get_all_branches(archive.storage, object_id) if snp is None: raise not_found() result = snapshot_git_object(snp) else: raise ValueError(f"Unexpected object type variant: {object_type}") response = HttpResponse(result, content_type="application/octet-stream") filename = swhid.replace(":", "_") + "_raw" response["Content-disposition"] = f"attachment; filename={filename}" return response diff --git a/swh/web/api/views/release.py b/swh/web/api/views/release.py index a276a7b4..edc6a5f5 100644 --- a/swh/web/api/views/release.py +++ b/swh/web/api/views/release.py @@ -1,62 +1,62 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from rest_framework.request import Request from swh.web.api import utils from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.api.views.utils import api_lookup from swh.web.utils import archive @api_route( r"/release/(?P[0-9a-f]+)/", "api-1-release", checksum_args=["sha1_git"] ) -@api_doc("/release/") +@api_doc("/release/", category="Archive") @format_docstring() def api_release(request: Request, sha1_git: str): """ .. http:get:: /api/1/release/(sha1_git)/ Get information about a release in the archive. Releases are identified by **sha1** checksums, compatible with Git tag identifiers. See :func:`swh.model.git_objects.release_git_object` in our data model module for details about how they are computed. :param string sha1_git: hexadecimal representation of the release **sha1_git** identifier {common_headers} :>json object author: information about the author of the release :>json string date: RFC3339 representation of the release date :>json string id: the release unique identifier :>json string message: the message associated to the release :>json string name: the name of the release :>json string target: the target identifier of the release :>json string target_type: the type of the target, can be either **release**, **revision**, **content**, **directory** :>json string target_url: a link to the adequate api url based on the target type :statuscode 200: no error :statuscode 400: an invalid **sha1_git** value has been provided :statuscode 404: requested release can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`release/208f61cc7a5dbc9879ae6e5c2f95891e270f09ef/` """ error_msg = "Release with sha1_git %s not found." % sha1_git return api_lookup( archive.lookup_release, sha1_git, notfound_msg=error_msg, enrich_fn=utils.enrich_release, request=request, ) diff --git a/swh/web/api/views/revision.py b/swh/web/api/views/revision.py index 09a74ac6..8d4ec8b0 100644 --- a/swh/web/api/views/revision.py +++ b/swh/web/api/views/revision.py @@ -1,218 +1,218 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Optional from django.http import HttpResponse from rest_framework.request import Request from swh.web.api import utils from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.api.views.utils import api_lookup from swh.web.utils import archive DOC_RETURN_REVISION = """ :>json object author: information about the author of the revision :>json object committer: information about the committer of the revision :>json string committer_date: RFC3339 representation of the commit date :>json string date: RFC3339 representation of the revision date :>json string directory: the unique identifier that revision points to :>json string directory_url: link to :http:get:`/api/1/directory/(sha1_git)/[(path)/]` to get information about the directory associated to the revision :>json string id: the revision unique identifier :>json boolean merge: whether or not the revision corresponds to a merge commit :>json string message: the message associated to the revision :>json array parents: the parents of the revision, i.e. the previous revisions that head directly to it, each entry of that array contains an unique parent revision identifier but also a link to :http:get:`/api/1/revision/(sha1_git)/` to get more information about it :>json string type: the type of the revision """ DOC_RETURN_REVISION_ARRAY = DOC_RETURN_REVISION.replace(":>json", ":>jsonarr") @api_route( r"/revision/(?P[0-9a-f]+)/", "api-1-revision", checksum_args=["sha1_git"] ) -@api_doc("/revision/") +@api_doc("/revision/", category="Archive") @format_docstring(return_revision=DOC_RETURN_REVISION) def api_revision(request: Request, sha1_git: str): """ .. http:get:: /api/1/revision/(sha1_git)/ Get information about a revision in the archive. Revisions are identified by **sha1** checksums, compatible with Git commit identifiers. See :func:`swh.model.git_objects.revision_git_object` in our data model module for details about how they are computed. :param string sha1_git: hexadecimal representation of the revision **sha1_git** identifier {common_headers} {return_revision} :statuscode 200: no error :statuscode 400: an invalid **sha1_git** value has been provided :statuscode 404: requested revision can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`revision/aafb16d69fd30ff58afdd69036a26047f3aebdc6/` """ return api_lookup( archive.lookup_revision, sha1_git, notfound_msg="Revision with sha1_git {} not found.".format(sha1_git), enrich_fn=utils.enrich_revision, request=request, ) @api_route( r"/revision/(?P[0-9a-f]+)/raw/", "api-1-revision-raw-message", checksum_args=["sha1_git"], ) -@api_doc("/revision/raw/", tags=["hidden"]) +@api_doc("/revision/raw/", category="Archive", tags=["hidden"]) def api_revision_raw_message(request: Request, sha1_git: str): """Return the raw data of the message of revision identified by sha1_git""" raw = archive.lookup_revision_message(sha1_git) response = HttpResponse(raw["message"], content_type="application/octet-stream") response["Content-disposition"] = "attachment;filename=rev_%s_raw" % sha1_git return response @api_route( r"/revision/(?P[0-9a-f]+)/directory/", "api-1-revision-directory", checksum_args=["sha1_git"], ) @api_route( r"/revision/(?P[0-9a-f]+)/directory/(?P.+)/", "api-1-revision-directory", checksum_args=["sha1_git"], ) -@api_doc("/revision/directory/") +@api_doc("/revision/directory/", category="Archive") @format_docstring() def api_revision_directory( request: Request, sha1_git: str, dir_path: Optional[str] = None ): """ .. http:get:: /api/1/revision/(sha1_git)/directory/[(path)/] Get information about directory (entry) objects associated to revisions. Each revision is associated to a single "root" directory. This endpoint behaves like :http:get:`/api/1/directory/(sha1_git)/[(path)/]`, but operates on the root directory associated to a given revision. :param string sha1_git: hexadecimal representation of the revision **sha1_git** identifier :param string path: optional parameter to get information about the directory entry pointed by that relative path {common_headers} :>json array content: directory entries as returned by :http:get:`/api/1/directory/(sha1_git)/[(path)/]` :>json string path: path of directory from the revision root one :>json string revision: the unique revision identifier :>json string type: the type of the directory :statuscode 200: no error :statuscode 400: an invalid **sha1_git** value has been provided :statuscode 404: requested revision can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`revision/f1b94134a4b879bc55c3dacdb496690c8ebdc03f/directory/` """ rev_id, result = archive.lookup_directory_through_revision( {"sha1_git": sha1_git}, dir_path ) content = result["content"] if result["type"] == "dir": # dir_entries result["content"] = [ utils.enrich_directory_entry(entry, request=request) for entry in content ] elif result["type"] == "file": # content result["content"] = utils.enrich_content(content, request=request) elif result["type"] == "rev": # revision result["content"] = utils.enrich_revision(content, request=request) return result @api_route( r"/revision/(?P[0-9a-f]+)/log/", "api-1-revision-log", checksum_args=["sha1_git"], ) -@api_doc("/revision/log/") +@api_doc("/revision/log/", category="Archive") @format_docstring(return_revision_array=DOC_RETURN_REVISION_ARRAY) def api_revision_log(request: Request, sha1_git: str): """ .. http:get:: /api/1/revision/(sha1_git)/log/ Get a list of all revisions heading to a given one, in other words show the commit log. The revisions are returned in the breadth-first search order while visiting the revision graph. The number of revisions to return is also bounded by the **limit** query parameter. .. warning:: To get the full BFS traversal of the revision graph when the total number of revisions is greater than 1000, it is up to the client to keep track of the multiple branches of history when there's merge revisions in the returned objects. In other words, identify all the continuation points that need to be followed to get the full history through recursion. :param string sha1_git: hexadecimal representation of the revision **sha1_git** identifier :query int limit: maximum number of revisions to return when performing BFS traversal on the revision graph (default to 10, can not exceed 1000) {common_headers} {return_revision_array} :statuscode 200: no error :statuscode 400: an invalid **sha1_git** value has been provided :statuscode 404: head revision can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`revision/e1a315fa3fa734e2a6154ed7b5b9ae0eb8987aad/log/` """ limit = int(request.query_params.get("limit", "10")) limit = min(limit, 1000) error_msg = "Revision with sha1_git %s not found." % sha1_git revisions = api_lookup( archive.lookup_revision_log, sha1_git, limit, notfound_msg=error_msg, enrich_fn=utils.enrich_revision, request=request, ) return {"results": revisions} diff --git a/swh/web/api/views/snapshot.py b/swh/web/api/views/snapshot.py index 85e4771c..e64227fa 100644 --- a/swh/web/api/views/snapshot.py +++ b/swh/web/api/views/snapshot.py @@ -1,105 +1,105 @@ # Copyright (C) 2018-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from rest_framework.request import Request from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.api.utils import enrich_snapshot from swh.web.api.views.utils import api_lookup from swh.web.config import get_config from swh.web.utils import archive, reverse @api_route( r"/snapshot/(?P[0-9a-f]+)/", "api-1-snapshot", checksum_args=["snapshot_id"], ) -@api_doc("/snapshot/") +@api_doc("/snapshot/", category="Archive") @format_docstring() def api_snapshot(request: Request, snapshot_id: str): """ .. http:get:: /api/1/snapshot/(snapshot_id)/ Get information about a snapshot in the archive. A snapshot is a set of named branches, which are pointers to objects at any level of the Software Heritage DAG. It represents a full picture of an origin at a given time. As well as pointing to other objects in the Software Heritage DAG, branches can also be aliases, in which case their target is the name of another branch in the same snapshot, or dangling, in which case the target is unknown. A snapshot identifier is a salted sha1. See :func:`swh.model.git_objects.snapshot_git_object` in our data model module for details about how they are computed. :param sha1 snapshot_id: a snapshot identifier :query str branches_from: optional parameter used to skip branches whose name is lesser than it before returning them :query int branches_count: optional parameter used to restrain the amount of returned branches (default to 1000) :query str target_types: optional comma separated list parameter used to filter the target types of branch to return (possible values that can be contained in that list are ``content``, ``directory``, ``revision``, ``release``, ``snapshot`` or ``alias``) {common_headers} {resheader_link} :>json object branches: object containing all branches associated to the snapshot,for each of them the associated target type and id are given but also a link to get information about that target :>json string id: the unique identifier of the snapshot :statuscode 200: no error :statuscode 400: an invalid snapshot identifier has been provided :statuscode 404: requested snapshot can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`snapshot/6a3a2cf0b2b90ce7ae1cf0a221ed68035b686f5a/` """ snapshot_content_max_size = get_config()["snapshot_content_max_size"] branches_from = request.GET.get("branches_from", "") branches_count = int(request.GET.get("branches_count", snapshot_content_max_size)) target_types_str = request.GET.get("target_types", None) target_types = target_types_str.split(",") if target_types_str else None results = api_lookup( archive.lookup_snapshot, snapshot_id, branches_from, branches_count, target_types, branch_name_exclude_prefix=None, notfound_msg="Snapshot with id {} not found.".format(snapshot_id), enrich_fn=enrich_snapshot, request=request, ) response = {"results": results, "headers": {}} if results["next_branch"] is not None: response["headers"]["link-next"] = reverse( "api-1-snapshot", url_args={"snapshot_id": snapshot_id}, query_params={ "branches_from": results["next_branch"], "branches_count": str(branches_count), "target_types": ",".join(target_types) if target_types else None, }, request=request, ) return response diff --git a/swh/web/api/views/stat.py b/swh/web/api/views/stat.py index 70bff321..2508df3a 100644 --- a/swh/web/api/views/stat.py +++ b/swh/web/api/views/stat.py @@ -1,52 +1,52 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from rest_framework.request import Request from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.utils import archive @api_route(r"/stat/counters/", "api-1-stat-counters") -@api_doc("/stat/counters/", noargs=True) +@api_doc("/stat/counters/", category="Miscellaneous", noargs=True) @format_docstring() def api_stats(request: Request): """ .. http:get:: /api/1/stat/counters/ Get statistics about the content of the archive. :>json number content: current number of content objects (aka files) in the archive :>json number directory: current number of directory objects in the archive :>json number origin: current number of software origins (an origin is a "place" where code source can be found, e.g. a git repository, a tarball, ...) in the archive :>json number origin_visit: current number of visits on software origins to fill the archive :>json number person: current number of persons (code source authors or committers) in the archive :>json number release: current number of releases objects in the archive :>json number revision: current number of revision objects (aka commits) in the archive :>json number skipped_content: current number of content objects (aka files) which where not inserted in the archive :>json number snapshot: current number of snapshot objects (aka set of named branches) in the archive {common_headers} :statuscode 200: no error **Example:** .. parsed-literal:: :swh_web_api:`stat/counters/` """ return archive.stat_counters() diff --git a/swh/web/api/views/utils.py b/swh/web/api/views/utils.py index 8f0147d2..f3b4346c 100644 --- a/swh/web/api/views/utils.py +++ b/swh/web/api/views/utils.py @@ -1,91 +1,100 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from types import GeneratorType from typing import Any, Callable, Dict, List, Optional, Tuple, Union from django.http import HttpRequest from rest_framework.decorators import api_view from rest_framework.request import Request from rest_framework.response import Response from swh.web.api.apiurls import APIUrls, api_route from swh.web.utils.exc import NotFoundExc EnrichFunction = Callable[[Dict[str, str], Optional[HttpRequest]], Dict[str, str]] EnrichFunctionSearchResult = Callable[ [Tuple[List[Dict[str, Any]], Optional[str]], Optional[HttpRequest]], Tuple[List[Dict[str, Any]], Optional[str]], ] def api_lookup( lookup_fn: Callable[..., Any], *args: Any, notfound_msg: Optional[str] = "Object not found", enrich_fn: Optional[Union[EnrichFunction, EnrichFunctionSearchResult]] = None, request: Optional[HttpRequest] = None, **kwargs: Any, ): r""" Capture a redundant behavior of: - looking up the backend with a criteria (be it an identifier or checksum) passed to the function lookup_fn - if nothing is found, raise an NotFoundExc exception with error message notfound_msg. - Otherwise if something is returned: - either as list, map or generator, map the enrich_fn function to it and return the resulting data structure as list. - either as dict and pass to enrich_fn and return the dict enriched. Args: - lookup_fn: function expects one criteria and optional supplementary \*args. - \*args: supplementary arguments to pass to lookup_fn. - notfound_msg: if nothing matching the criteria is found, raise NotFoundExc with this error message. - enrich_fn: Function to use to enrich the result returned by lookup_fn. Default to the identity function if not provided. - request: Input HTTP request that will be provided as parameter to enrich_fn. Raises: NotFoundExp or whatever `lookup_fn` raises. """ def _enrich_fn_noop(x, request): return x if enrich_fn is None: enrich_fn = _enrich_fn_noop res = lookup_fn(*args, **kwargs) if res is None: raise NotFoundExc(notfound_msg) if isinstance(res, (list, GeneratorType)) or type(res) == map: return [enrich_fn(x, request) for x in res] return enrich_fn(res, request) @api_view(["GET", "HEAD"]) def api_home(request: Request): return Response({}, template_name="api.html") APIUrls.add_url_pattern(r"^api/$", api_home, view_name="api-1-homepage") @api_route(r"/", "api-1-endpoints") def api_endpoints(request): """Display the list of opened api endpoints.""" - routes = APIUrls.get_app_endpoints().copy() - for route, doc in routes.items(): + routes_by_category = {} + for route, doc in APIUrls.get_app_endpoints().items(): doc["doc_intro"] = doc["docstring"].split("\n\n")[0] - # Return a list of routes with consistent ordering - env = {"doc_routes": sorted(routes.items())} + routes_by_category.setdefault(doc["category"], []).append(doc) + + for routes in routes_by_category.values(): + routes.sort(key=lambda route: route["route"]) + + # sort routes by alphabetical category name, with 'miscellaneous' at the end + misc_routes = routes_by_category.pop("Miscellaneous") + sorted_routes = sorted(routes_by_category.items()) + sorted_routes.append(("Miscellaneous", misc_routes)) + + env = {"doc_routes": sorted_routes} return Response(env, template_name="api-endpoints.html") diff --git a/swh/web/api/views/vault.py b/swh/web/api/views/vault.py index 78e5abe4..df07c9b6 100644 --- a/swh/web/api/views/vault.py +++ b/swh/web/api/views/vault.py @@ -1,512 +1,518 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Any, Dict from django.http import HttpResponse from django.shortcuts import redirect from rest_framework.request import Request from swh.model.hashutil import hash_to_hex from swh.model.swhids import CoreSWHID, ObjectType from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.api.views.utils import api_lookup from swh.web.utils import SWHID_RE, archive, query, reverse from swh.web.utils.exc import BadInputExc ###################################################### # Common # XXX: a bit spaghetti. Would be better with class-based views. def _dispatch_cook_progress(request, bundle_type: str, swhid: CoreSWHID): if request.method == "GET": return api_lookup( archive.vault_progress, bundle_type, swhid, notfound_msg=f"Cooking of {swhid} was never requested.", request=request, ) elif request.method == "POST": email = request.POST.get("email", request.GET.get("email", None)) return api_lookup( archive.vault_cook, bundle_type, swhid, email, notfound_msg=f"{swhid} not found.", request=request, ) def _vault_response( vault_response: Dict[str, Any], add_legacy_items: bool ) -> Dict[str, Any]: d = { "fetch_url": vault_response["fetch_url"], "progress_message": vault_response["progress_msg"], "id": vault_response["task_id"], "status": vault_response["task_status"], "swhid": str(vault_response["swhid"]), } if add_legacy_items: d["obj_type"] = vault_response["swhid"].object_type.name.lower() d["obj_id"] = hash_to_hex(vault_response["swhid"].object_id) return d ###################################################### # Flat bundles @api_route( f"/vault/flat/(?P{SWHID_RE})/", "api-1-vault-cook-flat", methods=["GET", "POST"], throttle_scope="swh_vault_cooking", never_cache=True, ) -@api_doc("/vault/flat/") +@api_doc("/vault/flat/", category="Batch download") @format_docstring() def api_vault_cook_flat(request: Request, swhid: str): """ .. http:get:: /api/1/vault/flat/(swhid)/ .. http:post:: /api/1/vault/flat/(swhid)/ Request the cooking of a simple archive, typically for a directory. That endpoint enables to create a vault cooking task for a directory through a POST request or check the status of a previously created one through a GET request. Once the cooking task has been executed, the resulting archive can be downloaded using the dedicated endpoint :http:get:`/api/1/vault/flat/(swhid)/raw/`. Then to extract the cooked directory in the current one, use:: $ tar xvf path/to/swh_1_*.tar.gz :param string swhid: the object's SWHID :query string email: e-mail to notify when the archive is ready {common_headers} :>json string fetch_url: the url from which to download the archive once it has been cooked (see :http:get:`/api/1/vault/flat/(swhid)/raw/`) :>json string progress_message: message describing the cooking task progress :>json number id: the cooking task id :>json string status: the cooking task status (either **new**, **pending**, **done** or **failed**) :>json string swhid: the identifier of the object to cook :statuscode 200: no error :statuscode 400: an invalid directory identifier has been provided :statuscode 404: requested directory did not receive any cooking request yet (in case of GET) or can not be found in the archive (in case of POST) """ parsed_swhid = CoreSWHID.from_string(swhid) if parsed_swhid.object_type == ObjectType.DIRECTORY: res = _dispatch_cook_progress(request, "flat", parsed_swhid) res["fetch_url"] = reverse( "api-1-vault-fetch-flat", url_args={"swhid": swhid}, request=request, ) return _vault_response(res, add_legacy_items=False) elif parsed_swhid.object_type == ObjectType.CONTENT: raise BadInputExc( "Content objects do not need to be cooked, " "use `/api/1/content/raw/` instead." ) elif parsed_swhid.object_type == ObjectType.REVISION: # TODO: support revisions too? (the vault allows it) raise BadInputExc( "Only directories can be cooked as 'flat' bundles. " "Use `/api/1/vault/gitfast/` to cook revisions, as gitfast bundles." ) else: raise BadInputExc("Only directories can be cooked as 'flat' bundles.") @api_route( r"/vault/directory/(?P[0-9a-f]+)/", "api-1-vault-cook-directory", methods=["GET", "POST"], checksum_args=["dir_id"], throttle_scope="swh_vault_cooking", never_cache=True, ) -@api_doc("/vault/directory/", tags=["deprecated"]) +@api_doc("/vault/directory/", category="Batch download", tags=["deprecated"]) @format_docstring() def api_vault_cook_directory(request: Request, dir_id: str): """ .. http:get:: /api/1/vault/directory/(dir_id)/ This endpoint was replaced by :http:get:`/api/1/vault/flat/(swhid)/` """ _, obj_id = query.parse_hash_with_algorithms_or_throws( dir_id, ["sha1"], "Only sha1_git is supported." ) swhid = f"swh:1:dir:{obj_id.hex()}" res = _dispatch_cook_progress(request, "flat", CoreSWHID.from_string(swhid)) res["fetch_url"] = reverse( "api-1-vault-fetch-flat", url_args={"swhid": swhid}, request=request, ) return _vault_response(res, add_legacy_items=True) @api_route( f"/vault/flat/(?P{SWHID_RE})/raw/", "api-1-vault-fetch-flat", ) -@api_doc("/vault/flat/raw/") +@api_doc("/vault/flat/raw/", category="Batch download") def api_vault_fetch_flat(request: Request, swhid: str): """ .. http:get:: /api/1/vault/flat/(swhid)/raw/ Fetch the cooked archive for a flat bundle. See :http:get:`/api/1/vault/flat/(swhid)/` to get more details on 'flat' bundle cooking. :param string swhid: the SWHID of the object to cook :resheader Content-Type: application/gzip :statuscode 200: no error :statuscode 404: requested directory did not receive any cooking request yet (in case of GET) or can not be found in the archive (in case of POST) """ res = api_lookup( archive.vault_fetch, "flat", CoreSWHID.from_string(swhid), notfound_msg=f"Cooked archive for {swhid} not found.", request=request, ) fname = "{}.tar.gz".format(swhid) response = HttpResponse(res, content_type="application/gzip") response["Content-disposition"] = "attachment; filename={}".format( fname.replace(":", "_") ) return response @api_route( r"/vault/directory/(?P[0-9a-f]+)/raw/", "api-1-vault-fetch-directory", checksum_args=["dir_id"], ) -@api_doc("/vault/directory/raw/", tags=["hidden", "deprecated"]) +@api_doc( + "/vault/directory/raw/", category="Batch download", tags=["hidden", "deprecated"] +) def api_vault_fetch_directory(request: Request, dir_id: str): """ .. http:get:: /api/1/vault/directory/(dir_id)/raw/ This endpoint was replaced by :http:get:`/api/1/vault/flat/(swhid)/raw/` """ _, obj_id = query.parse_hash_with_algorithms_or_throws( dir_id, ["sha1"], "Only sha1_git is supported." ) rev_flat_raw_url = reverse( "api-1-vault-fetch-flat", url_args={"swhid": f"swh:1:dir:{dir_id}"} ) return redirect(rev_flat_raw_url) ###################################################### # gitfast bundles @api_route( f"/vault/gitfast/(?P{SWHID_RE})/", "api-1-vault-cook-gitfast", methods=["GET", "POST"], throttle_scope="swh_vault_cooking", never_cache=True, ) -@api_doc("/vault/gitfast/") +@api_doc("/vault/gitfast/", category="Batch download") @format_docstring() def api_vault_cook_gitfast(request: Request, swhid: str): """ .. http:get:: /api/1/vault/gitfast/(swhid)/ .. http:post:: /api/1/vault/gitfast/(swhid)/ Request the cooking of a gitfast archive for a revision or check its cooking status. That endpoint enables to create a vault cooking task for a revision through a POST request or check the status of a previously created one through a GET request. Once the cooking task has been executed, the resulting gitfast archive can be downloaded using the dedicated endpoint :http:get:`/api/1/vault/gitfast/(swhid)/raw/`. Then to import the revision in the current directory, use:: $ git init $ zcat path/to/swh_1_rev_*.gitfast.gz | git fast-import $ git checkout HEAD :param string swhid: the revision's permanent identifiers :query string email: e-mail to notify when the gitfast archive is ready {common_headers} :>json string fetch_url: the url from which to download the archive once it has been cooked (see :http:get:`/api/1/vault/gitfast/(swhid)/raw/`) :>json string progress_message: message describing the cooking task progress :>json number id: the cooking task id :>json string status: the cooking task status (new/pending/done/failed) :>json string swhid: the identifier of the object to cook :statuscode 200: no error :statuscode 404: requested directory did not receive any cooking request yet (in case of GET) or can not be found in the archive (in case of POST) """ parsed_swhid = CoreSWHID.from_string(swhid) if parsed_swhid.object_type == ObjectType.REVISION: res = _dispatch_cook_progress(request, "gitfast", parsed_swhid) res["fetch_url"] = reverse( "api-1-vault-fetch-gitfast", url_args={"swhid": swhid}, request=request, ) return _vault_response(res, add_legacy_items=False) elif parsed_swhid.object_type == ObjectType.CONTENT: raise BadInputExc( "Content objects do not need to be cooked, " "use `/api/1/content/raw/` instead." ) elif parsed_swhid.object_type == ObjectType.DIRECTORY: raise BadInputExc( "Only revisions can be cooked as 'gitfast' bundles. " "Use `/api/1/vault/flat/` to cook directories, as flat bundles." ) else: raise BadInputExc("Only revisions can be cooked as 'gitfast' bundles.") @api_route( r"/vault/revision/(?P[0-9a-f]+)/gitfast/", "api-1-vault-cook-revision_gitfast", methods=["GET", "POST"], checksum_args=["rev_id"], throttle_scope="swh_vault_cooking", never_cache=True, ) -@api_doc("/vault/revision/gitfast/", tags=["deprecated"]) +@api_doc("/vault/revision/gitfast/", category="Batch download", tags=["deprecated"]) @format_docstring() def api_vault_cook_revision_gitfast(request: Request, rev_id: str): """ .. http:get:: /api/1/vault/revision/(rev_id)/gitfast/ This endpoint was replaced by :http:get:`/api/1/vault/gitfast/(swhid)/` """ _, obj_id = query.parse_hash_with_algorithms_or_throws( rev_id, ["sha1"], "Only sha1_git is supported." ) swhid = f"swh:1:rev:{obj_id.hex()}" res = _dispatch_cook_progress(request, "gitfast", CoreSWHID.from_string(swhid)) res["fetch_url"] = reverse( "api-1-vault-fetch-gitfast", url_args={"swhid": swhid}, request=request, ) return _vault_response(res, add_legacy_items=True) @api_route( f"/vault/gitfast/(?P{SWHID_RE})/raw/", "api-1-vault-fetch-gitfast", ) -@api_doc("/vault/gitfast/raw/") +@api_doc("/vault/gitfast/raw/", category="Batch download") def api_vault_fetch_revision_gitfast(request: Request, swhid: str): """ .. http:get:: /api/1/vault/gitfast/(swhid)/raw/ Fetch the cooked gitfast archive for a revision. See :http:get:`/api/1/vault/gitfast/(swhid)/` to get more details on gitfast cooking. :param string rev_id: the revision's sha1 identifier :resheader Content-Type: application/gzip :statuscode 200: no error :statuscode 404: requested directory did not receive any cooking request yet (in case of GET) or can not be found in the archive (in case of POST) """ res = api_lookup( archive.vault_fetch, "gitfast", CoreSWHID.from_string(swhid), notfound_msg="Cooked archive for {} not found.".format(swhid), request=request, ) fname = "{}.gitfast.gz".format(swhid) response = HttpResponse(res, content_type="application/gzip") response["Content-disposition"] = "attachment; filename={}".format( fname.replace(":", "_") ) return response @api_route( r"/vault/revision/(?P[0-9a-f]+)/gitfast/raw/", "api-1-vault-fetch-revision_gitfast", checksum_args=["rev_id"], ) -@api_doc("/vault/revision_gitfast/raw/", tags=["hidden", "deprecated"]) +@api_doc( + "/vault/revision_gitfast/raw/", + category="Batch download", + tags=["hidden", "deprecated"], +) def _api_vault_revision_gitfast_raw(request: Request, rev_id: str): """ .. http:get:: /api/1/vault/revision/(rev_id)/gitfast/raw/ This endpoint was replaced by :http:get:`/api/1/vault/gitfast/(swhid)/raw/` """ rev_gitfast_raw_url = reverse( "api-1-vault-fetch-gitfast", url_args={"swhid": f"swh:1:rev:{rev_id}"} ) return redirect(rev_gitfast_raw_url) ###################################################### # git_bare bundles @api_route( f"/vault/git-bare/(?P{SWHID_RE})/", "api-1-vault-cook-git-bare", methods=["GET", "POST"], throttle_scope="swh_vault_cooking", never_cache=True, ) -@api_doc("/vault/git-bare/") +@api_doc("/vault/git-bare/", category="Batch download") @format_docstring() def api_vault_cook_git_bare(request: Request, swhid: str): """ .. http:get:: /api/1/vault/git-bare/(swhid)/ .. http:post:: /api/1/vault/git-bare/(swhid)/ Request the cooking of a git-bare archive for a revision or check its cooking status. That endpoint enables to create a vault cooking task for a revision through a POST request or check the status of a previously created one through a GET request. Once the cooking task has been executed, the resulting git-bare archive can be downloaded using the dedicated endpoint :http:get:`/api/1/vault/git-bare/(swhid)/raw/`. Then to import the revision in the current directory, use:: $ tar -xf path/to/swh_1_rev_*.git.tar $ git clone swh:1:rev:*.git new_repository (replace ``swh:1:rev:*`` with the SWHID of the requested revision) This will create a directory called ``new_repository``, which is a git repository containing the requested objects. :param string swhid: the revision's permanent identifier :query string email: e-mail to notify when the git-bare archive is ready {common_headers} :>json string fetch_url: the url from which to download the archive once it has been cooked (see :http:get:`/api/1/vault/git-bare/(swhid)/raw/`) :>json string progress_message: message describing the cooking task progress :>json number id: the cooking task id :>json string status: the cooking task status (new/pending/done/failed) :>json string swhid: the identifier of the object to cook :statuscode 200: no error :statuscode 404: requested directory did not receive any cooking request yet (in case of GET) or can not be found in the archive (in case of POST) """ parsed_swhid = CoreSWHID.from_string(swhid) if parsed_swhid.object_type == ObjectType.REVISION: res = _dispatch_cook_progress(request, "git_bare", parsed_swhid) res["fetch_url"] = reverse( "api-1-vault-fetch-git-bare", url_args={"swhid": swhid}, request=request, ) return _vault_response(res, add_legacy_items=False) elif parsed_swhid.object_type == ObjectType.CONTENT: raise BadInputExc( "Content objects do not need to be cooked, " "use `/api/1/content/raw/` instead." ) elif parsed_swhid.object_type == ObjectType.DIRECTORY: raise BadInputExc( "Only revisions can be cooked as 'git-bare' bundles. " "Use `/api/1/vault/flat/` to cook directories, as flat bundles." ) else: raise BadInputExc("Only revisions can be cooked as 'git-bare' bundles.") @api_route( f"/vault/git-bare/(?P{SWHID_RE})/raw/", "api-1-vault-fetch-git-bare", ) -@api_doc("/vault/git-bare/raw/") +@api_doc("/vault/git-bare/raw/", category="Batch download") def api_vault_fetch_revision_git_bare(request: Request, swhid: str): """ .. http:get:: /api/1/vault/git-bare/(swhid)/raw/ Fetch the cooked git-bare archive for a revision. See :http:get:`/api/1/vault/git-bare/(swhid)/` to get more details on git-bare cooking. :param string swhid: the revision's permanent identifier :resheader Content-Type: application/x-tar :statuscode 200: no error :statuscode 404: requested directory did not receive any cooking request yet (in case of GET) or can not be found in the archive (in case of POST) """ res = api_lookup( archive.vault_fetch, "git_bare", CoreSWHID.from_string(swhid), notfound_msg="Cooked archive for {} not found.".format(swhid), request=request, ) fname = "{}.git.tar".format(swhid) response = HttpResponse(res, content_type="application/x-tar") response["Content-disposition"] = "attachment; filename={}".format( fname.replace(":", "_") ) return response diff --git a/swh/web/save_code_now/api_views.py b/swh/web/save_code_now/api_views.py index 68d977aa..d42bb786 100644 --- a/swh/web/save_code_now/api_views.py +++ b/swh/web/save_code_now/api_views.py @@ -1,127 +1,127 @@ # Copyright (C) 2018-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information import os from typing import Optional, cast from rest_framework.request import Request from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.auth.utils import ( API_SAVE_ORIGIN_PERMISSION, SWH_AMBASSADOR_PERMISSION, privileged_user, ) from swh.web.save_code_now.origin_save import ( create_save_origin_request, get_savable_visit_types, get_save_origin_requests, ) def _savable_visit_types() -> str: docstring = "" if os.environ.get("DJANGO_SETTINGS_MODULE") != "swh.web.settings.tests": visit_types = sorted(get_savable_visit_types()) docstring = "" for visit_type in visit_types[:-1]: docstring += f"**{visit_type}**, " docstring += f"and **{visit_types[-1]}**" return docstring @api_route( r"/origin/save/(?P.+)/url/(?P.+)/", "api-1-save-origin", methods=["GET", "POST"], throttle_scope="swh_save_origin", never_cache=True, ) -@api_doc("/origin/save/") +@api_doc("/origin/save/", category="Request archival") @format_docstring(visit_types=_savable_visit_types()) def api_save_origin(request: Request, visit_type: str, origin_url: str): """ .. http:get:: /api/1/origin/save/(visit_type)/url/(origin_url)/ .. http:post:: /api/1/origin/save/(visit_type)/url/(origin_url)/ Request the saving of a software origin into the archive or check the status of previously created save requests. That endpoint enables to create a saving task for a software origin through a POST request. Depending of the provided origin url, the save request can either be: * immediately **accepted**, for well known code hosting providers like for instance GitHub or GitLab * **rejected**, in case the url is blacklisted by Software Heritage * **put in pending state** until a manual check is done in order to determine if it can be loaded or not Once a saving request has been accepted, its associated saving task status can then be checked through a GET request on the same url. Returned status can either be: * **not created**: no saving task has been created * **not yet scheduled**: saving task has been created but its execution has not yet been scheduled * **scheduled**: the task execution has been scheduled * **succeeded**: the saving task has been successfully executed * **failed**: the saving task has been executed but it failed When issuing a POST request an object will be returned while a GET request will return an array of objects (as multiple save requests might have been submitted for the same origin). :param string visit_type: the type of visit to perform (currently the supported types are {visit_types}) :param string origin_url: the url of the origin to save {common_headers} :>json string origin_url: the url of the origin to save :>json string visit_type: the type of visit to perform :>json string save_request_date: the date (in iso format) the save request was issued :>json string save_request_status: the status of the save request, either **accepted**, **rejected** or **pending** :>json string save_task_status: the status of the origin saving task, either **not created**, **not yet scheduled**, **scheduled**, **succeeded** or **failed** :>json string visit_date: the date (in iso format) of the visit if a visit occurred, null otherwise. :>json string visit_status: the status of the visit, either **full**, **partial**, **not_found** or **failed** if a visit occurred, null otherwise. :>json string note: optional note giving details about the save request, for instance why it has been rejected :statuscode 200: no error :statuscode 400: an invalid visit type or origin url has been provided :statuscode 403: the provided origin url is blacklisted :statuscode 404: no save requests have been found for a given origin """ data = request.data or {} if request.method == "POST": sor = create_save_origin_request( visit_type, origin_url, privileged_user( request, permissions=[SWH_AMBASSADOR_PERMISSION, API_SAVE_ORIGIN_PERMISSION], ), user_id=cast(Optional[int], request.user.id), **data, ) del sor["id"] return sor else: sors = get_save_origin_requests(visit_type, origin_url) for sor in sors: del sor["id"] return sors diff --git a/swh/web/tests/api/test_apidoc.py b/swh/web/tests/api/test_apidoc.py index e5259773..ae161774 100644 --- a/swh/web/tests/api/test_apidoc.py +++ b/swh/web/tests/api/test_apidoc.py @@ -1,481 +1,481 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information import textwrap import pytest from rest_framework.response import Response from swh.storage.exc import StorageAPIError, StorageDBError from swh.web.api.apidoc import _parse_httpdomain_doc, api_doc from swh.web.api.apiurls import api_route from swh.web.tests.helpers import check_api_get_responses, check_html_get_response from swh.web.utils import prettify_html, reverse from swh.web.utils.exc import BadInputExc, ForbiddenExc, NotFoundExc _httpdomain_doc = """ .. http:get:: /api/1/revision/(sha1_git)/ Get information about a revision in the archive. Revisions are identified by **sha1** checksums, compatible with Git commit identifiers. See :func:`swh.model.git_objects.revision_git_object` in our data model module for details about how they are computed. :param string sha1_git: hexadecimal representation of the revision **sha1_git** identifier :reqheader Accept: the requested response content type, either ``application/json`` (default) or ``application/yaml`` :resheader Content-Type: this depends on :http:header:`Accept` header of request :json object author: information about the author of the revision :>json object committer: information about the committer of the revision :>json string committer_date: RFC3339 representation of the commit date :>json string date: RFC3339 representation of the revision date :>json string directory: the unique identifier that revision points to :>json string directory_url: link to :http:get:`/api/1/directory/(sha1_git)/[(path)/]` to get information about the directory associated to the revision :>json string id: the revision unique identifier :>json boolean merge: whether or not the revision corresponds to a merge commit :>json string message: the message associated to the revision :>json array parents: the parents of the revision, i.e. the previous revisions that head directly to it, each entry of that array contains an unique parent revision identifier but also a link to :http:get:`/api/1/revision/(sha1_git)/` to get more information about it :>json string type: the type of the revision :statuscode 200: no error :statuscode 400: an invalid **sha1_git** value has been provided :statuscode 404: requested revision can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`revision/aafb16d69fd30ff58afdd69036a26047f3aebdc6/` """ _exception_http_code = { BadInputExc: 400, ForbiddenExc: 403, NotFoundExc: 404, Exception: 500, StorageAPIError: 503, StorageDBError: 503, } def test_apidoc_nodoc_failure(): with pytest.raises(Exception): - @api_doc("/my/nodoc/url/") + @api_doc("/my/nodoc/url/", "test") def apidoc_nodoc_tester(request, arga=0, argb=0): return Response(arga + argb) @api_route(r"/some/(?P[0-9]+)/(?P[0-9]+)/", "api-1-some-doc-route") -@api_doc("/some/doc/route/") +@api_doc("/some/doc/route/", category="test") def apidoc_route(request, myarg, myotherarg, akw=0): """ Sample doc """ return {"result": int(myarg) + int(myotherarg) + akw} def test_apidoc_route_doc(client): url = reverse("api-1-some-doc-route-doc") check_html_get_response(client, url, status_code=200, template_used="apidoc.html") def test_apidoc_route_fn(api_client): url = reverse("api-1-some-doc-route", url_args={"myarg": 1, "myotherarg": 1}) check_api_get_responses(api_client, url, status_code=200) @api_route(r"/test/error/(?P.+)/", "api-1-test-error") -@api_doc("/test/error/") +@api_doc("/test/error/", category="test") def apidoc_test_error_route(request, exc_name): """ Sample doc """ for e in _exception_http_code.keys(): if e.__name__ == exc_name: raise e("Error") def test_apidoc_error(api_client): for exc, code in _exception_http_code.items(): url = reverse("api-1-test-error", url_args={"exc_name": exc.__name__}) check_api_get_responses(api_client, url, status_code=code) @api_route( r"/some/full/(?P[0-9]+)/(?P[0-9]+)/", "api-1-some-complete-doc-route", ) -@api_doc("/some/complete/doc/route/") +@api_doc("/some/complete/doc/route/", category="test") def apidoc_full_stack(request, myarg, myotherarg, akw=0): """ Sample doc """ return {"result": int(myarg) + int(myotherarg) + akw} def test_apidoc_full_stack_doc(client): url = reverse("api-1-some-complete-doc-route-doc") check_html_get_response(client, url, status_code=200, template_used="apidoc.html") def test_apidoc_full_stack_fn(api_client): url = reverse( "api-1-some-complete-doc-route", url_args={"myarg": 1, "myotherarg": 1} ) check_api_get_responses(api_client, url, status_code=200) @api_route(r"/test/post/only/", "api-1-test-post-only", methods=["POST"]) -@api_doc("/test/post/only/") +@api_doc("/test/post/only/", category="test") def apidoc_test_post_only(request, exc_name): """ Sample doc """ return {"result": "some data"} def test_apidoc_post_only(client): # a dedicated view accepting GET requests should have # been created to display the HTML documentation url = reverse("api-1-test-post-only-doc") check_html_get_response(client, url, status_code=200, template_used="apidoc.html") def test_api_doc_parse_httpdomain(): doc_data = { "description": "", "urls": [], "args": [], "params": [], "resheaders": [], "reqheaders": [], "input_type": "", "inputs": [], "return_type": "", "returns": [], "status_codes": [], "examples": [], } _parse_httpdomain_doc(_httpdomain_doc, doc_data) expected_urls = [ { "rule": "/api/1/revision/ **\\(sha1_git\\)** /", "methods": ["GET", "HEAD", "OPTIONS"], } ] assert "urls" in doc_data assert doc_data["urls"] == expected_urls expected_description = ( "Get information about a revision in the archive. " "Revisions are identified by **sha1** checksums, " "compatible with Git commit identifiers. See " "**swh.model.git_objects.revision_git_object** in " "our data model module for details about how they " "are computed." ) assert "description" in doc_data assert doc_data["description"] == expected_description expected_args = [ { "name": "sha1_git", "type": "string", "doc": ( "hexadecimal representation of the revision " "**sha1_git** identifier" ), } ] assert "args" in doc_data assert doc_data["args"] == expected_args expected_params = [] assert "params" in doc_data assert doc_data["params"] == expected_params expected_reqheaders = [ { "doc": ( "the requested response content type, either " "``application/json`` (default) or ``application/yaml``" ), "name": "Accept", } ] assert "reqheaders" in doc_data assert doc_data["reqheaders"] == expected_reqheaders expected_resheaders = [ {"doc": "this depends on **Accept** header of request", "name": "Content-Type"} ] assert "resheaders" in doc_data assert doc_data["resheaders"] == expected_resheaders expected_statuscodes = [ {"code": "200", "doc": "no error"}, {"code": "400", "doc": "an invalid **sha1_git** value has been provided"}, {"code": "404", "doc": "requested revision can not be found in the archive"}, ] assert "status_codes" in doc_data assert doc_data["status_codes"] == expected_statuscodes expected_input_type = "object" assert "input_type" in doc_data assert doc_data["input_type"] == expected_input_type expected_inputs = [ {"name": "n", "type": "int", "doc": "sample input integer"}, {"name": "s", "type": "string", "doc": "sample input string"}, {"name": "a", "type": "array", "doc": "sample input array"}, ] assert "inputs" in doc_data assert doc_data["inputs"] == expected_inputs expected_return_type = "object" assert "return_type" in doc_data assert doc_data["return_type"] == expected_return_type expected_returns = [ { "name": "author", "type": "object", "doc": "information about the author of the revision", }, { "name": "committer", "type": "object", "doc": "information about the committer of the revision", }, { "name": "committer_date", "type": "string", "doc": "RFC3339 representation of the commit date", }, { "name": "date", "type": "string", "doc": "RFC3339 representation of the revision date", }, { "name": "directory", "type": "string", "doc": "the unique identifier that revision points to", }, { "name": "directory_url", "type": "string", "doc": ( "link to `/api/1/directory/ `_ " "to get information about the directory associated to " "the revision" ), }, {"name": "id", "type": "string", "doc": "the revision unique identifier"}, { "name": "merge", "type": "boolean", "doc": "whether or not the revision corresponds to a merge commit", }, { "name": "message", "type": "string", "doc": "the message associated to the revision", }, { "name": "parents", "type": "array", "doc": ( "the parents of the revision, i.e. the previous revisions " "that head directly to it, each entry of that array " "contains an unique parent revision identifier but also a " "link to `/api/1/revision/ `_ " "to get more information about it" ), }, {"name": "type", "type": "string", "doc": "the type of the revision"}, ] assert "returns" in doc_data assert doc_data["returns"] == expected_returns expected_examples = ["/api/1/revision/aafb16d69fd30ff58afdd69036a26047f3aebdc6/"] assert "examples" in doc_data assert doc_data["examples"] == expected_examples @api_route(r"/post/endpoint/", "api-1-post-endpoint", methods=["POST"]) -@api_doc("/post/endpoint/") +@api_doc("/post/endpoint/", category="test") def apidoc_test_post_endpoint(request): """ .. http:post:: /api/1/post/endpoint/ Endpoint documentation :json object : an object whose keys are input SWHIDs and values objects with the following keys: * **known (bool)**: whether the object was found """ pass def test_apidoc_input_output_doc(client): url = reverse("api-1-post-endpoint-doc") rv = check_html_get_response( client, url, status_code=200, template_used="apidoc.html" ) input_html_doc = textwrap.indent( ( '
\n' '
\n' " array\n" "
\n" '
\n' "

\n" " Input array of SWHIDs\n" "

\n" "
\n" "
\n" ), " " * 7, ) output_html_doc = textwrap.indent( ( '
\n' '
\n' " object\n" "
\n" '
\n' "

\n" " an object containing the following keys:\n" "

\n" '
\n' "
\n" "
    \n" "
  • \n" "

    \n" " \n" " <swhid> (object)\n" " \n" " : an object whose keys are input SWHIDs" " and values objects with the following keys:\n" "

    \n" "
    \n" '
      \n' "
    • \n" "

      \n" " \n" " known (bool)\n" " \n" " : whether the object was found\n" "

      \n" "
    • \n" "
    \n" "
    \n" "
  • \n" "
\n" "
\n" "
\n" "
\n" "
\n" ), " " * 7, ) html = prettify_html(rv.content) assert input_html_doc in html assert output_html_doc in html @api_route(r"/endpoint/links/in/doc/", "api-1-endpoint-links-in-doc") -@api_doc("/endpoint/links/in/doc/") +@api_doc("/endpoint/links/in/doc/", category="test") def apidoc_test_endpoint_with_links_in_doc(request): """ .. http:get:: /api/1/post/endpoint/ Endpoint documentation with links to :http:get:`/api/1/content/[(hash_type):](hash)/`, :http:get:`/api/1/directory/(sha1_git)/[(path)/]` and `archive `_. """ pass def test_apidoc_with_links(client): url = reverse("api-1-endpoint-links-in-doc") rv = check_html_get_response( client, url, status_code=200, template_used="apidoc.html" ) html = prettify_html(rv.content) first_link = textwrap.indent( ( '\n' " /api/1/content/\n" "" ), " " * 9, ) second_link = textwrap.indent( ( '\n' " /api/1/directory/\n" "" ), " " * 9, ) third_link = textwrap.indent( ( '\n' " archive\n" "" ), " " * 9, ) assert first_link in html assert second_link in html assert third_link in html