diff --git a/swh/web/api/views/snapshot.py b/swh/web/api/views/snapshot.py index c6994f29..dd2bd9db 100644 --- a/swh/web/api/views/snapshot.py +++ b/swh/web/api/views/snapshot.py @@ -1,103 +1,104 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.web.api.apidoc import api_doc, format_docstring from swh.web.api.apiurls import api_route from swh.web.api.utils import enrich_snapshot from swh.web.api.views.utils import api_lookup from swh.web.common import archive from swh.web.common.utils import reverse from swh.web.config import get_config @api_route( r"/snapshot/(?P[0-9a-f]+)/", "api-1-snapshot", checksum_args=["snapshot_id"], ) @api_doc("/snapshot/") @format_docstring() def api_snapshot(request, snapshot_id): """ .. http:get:: /api/1/snapshot/(snapshot_id)/ Get information about a snapshot in the archive. A snapshot is a set of named branches, which are pointers to objects at any level of the Software Heritage DAG. It represents a full picture of an origin at a given time. As well as pointing to other objects in the Software Heritage DAG, branches can also be aliases, in which case their target is the name of another branch in the same snapshot, or dangling, in which case the target is unknown. A snapshot identifier is a salted sha1. See :func:`swh.model.identifiers.snapshot_identifier` in our data model module for details about how they are computed. :param sha1 snapshot_id: a snapshot identifier :query str branches_from: optional parameter used to skip branches whose name is lesser than it before returning them :query int branches_count: optional parameter used to restrain the amount of returned branches (default to 1000) :query str target_types: optional comma separated list parameter used to filter the target types of branch to return (possible values that can be contained in that list are ``content``, ``directory``, ``revision``, ``release``, ``snapshot`` or ``alias``) {common_headers} {resheader_link} :>json object branches: object containing all branches associated to the snapshot,for each of them the associated target type and id are given but also a link to get information about that target :>json string id: the unique identifier of the snapshot :statuscode 200: no error :statuscode 400: an invalid snapshot identifier has been provided :statuscode 404: requested snapshot can not be found in the archive **Example:** .. parsed-literal:: :swh_web_api:`snapshot/6a3a2cf0b2b90ce7ae1cf0a221ed68035b686f5a/` """ snapshot_content_max_size = get_config()["snapshot_content_max_size"] branches_from = request.GET.get("branches_from", "") branches_count = int(request.GET.get("branches_count", snapshot_content_max_size)) target_types = request.GET.get("target_types", None) target_types = target_types.split(",") if target_types else None results = api_lookup( archive.lookup_snapshot, snapshot_id, branches_from, branches_count, target_types, + branch_name_exclude_prefix=None, notfound_msg="Snapshot with id {} not found.".format(snapshot_id), enrich_fn=enrich_snapshot, request=request, ) response = {"results": results, "headers": {}} if results["next_branch"] is not None: response["headers"]["link-next"] = reverse( "api-1-snapshot", url_args={"snapshot_id": snapshot_id}, query_params={ "branches_from": results["next_branch"], "branches_count": branches_count, "target_types": target_types, }, request=request, ) return response diff --git a/swh/web/api/views/utils.py b/swh/web/api/views/utils.py index 901ef605..a1db1274 100644 --- a/swh/web/api/views/utils.py +++ b/swh/web/api/views/utils.py @@ -1,93 +1,94 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from types import GeneratorType from typing import Any, Callable, Dict, Mapping, Optional from typing_extensions import Protocol from django.http import HttpRequest from rest_framework.decorators import api_view from rest_framework.response import Response from swh.web.api.apiurls import APIUrls, api_route from swh.web.common.exc import NotFoundExc class EnrichFunction(Protocol): def __call__( self, input: Mapping[str, str], request: Optional[HttpRequest] ) -> Dict[str, str]: ... def api_lookup( lookup_fn: Callable[..., Any], *args: Any, notfound_msg: Optional[str] = "Object not found", enrich_fn: Optional[EnrichFunction] = None, request: Optional[HttpRequest] = None, + **kwargs: Any, ): r""" Capture a redundant behavior of: - looking up the backend with a criteria (be it an identifier or checksum) passed to the function lookup_fn - if nothing is found, raise an NotFoundExc exception with error message notfound_msg. - Otherwise if something is returned: - either as list, map or generator, map the enrich_fn function to it and return the resulting data structure as list. - either as dict and pass to enrich_fn and return the dict enriched. Args: - lookup_fn: function expects one criteria and optional supplementary \*args. - \*args: supplementary arguments to pass to lookup_fn. - notfound_msg: if nothing matching the criteria is found, raise NotFoundExc with this error message. - enrich_fn: Function to use to enrich the result returned by lookup_fn. Default to the identity function if not provided. - request: Input HTTP request that will be provided as parameter to enrich_fn. Raises: NotFoundExp or whatever `lookup_fn` raises. """ def _enrich_fn_noop(x, request): return x if enrich_fn is None: enrich_fn = _enrich_fn_noop - res = lookup_fn(*args) + res = lookup_fn(*args, **kwargs) if res is None: raise NotFoundExc(notfound_msg) if isinstance(res, (list, GeneratorType)) or type(res) == map: return [enrich_fn(x, request=request) for x in res] return enrich_fn(res, request=request) @api_view(["GET", "HEAD"]) def api_home(request): return Response({}, template_name="api/api.html") APIUrls.add_url_pattern(r"^$", api_home, view_name="api-1-homepage") @api_route(r"/", "api-1-endpoints") def api_endpoints(request): """Display the list of opened api endpoints. """ routes = APIUrls.get_app_endpoints().copy() for route, doc in routes.items(): doc["doc_intro"] = doc["docstring"].split("\n\n")[0] # Return a list of routes with consistent ordering env = {"doc_routes": sorted(routes.items())} return Response(env, template_name="api/endpoints.html") diff --git a/swh/web/common/archive.py b/swh/web/common/archive.py index c1553333..3f9824c5 100644 --- a/swh/web/common/archive.py +++ b/swh/web/common/archive.py @@ -1,1417 +1,1421 @@ # Copyright (C) 2015-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict import itertools import os import re from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Union from urllib.parse import urlparse from swh.model import hashutil from swh.model.identifiers import CONTENT, DIRECTORY, RELEASE, REVISION, SNAPSHOT from swh.model.model import OriginVisit, Revision from swh.storage.algos import diff, revisions_walker from swh.storage.algos.origin import origin_get_latest_visit_status from swh.storage.algos.snapshot import snapshot_get_latest, snapshot_resolve_alias from swh.vault.exc import NotFoundExc as VaultNotFoundExc from swh.web import config from swh.web.common import converters, query from swh.web.common.exc import BadInputExc, NotFoundExc from swh.web.common.typing import ( OriginInfo, OriginMetadataInfo, OriginVisitInfo, PagedResult, ) search = config.search() storage = config.storage() vault = config.vault() idx_storage = config.indexer_storage() MAX_LIMIT = 50 # Top limit the users can ask for def _first_element(lst): """Returns the first element in the provided list or None if it is empty or None""" return next(iter(lst or []), None) def lookup_multiple_hashes(hashes): """Lookup the passed hashes in a single DB connection, using batch processing. Args: An array of {filename: X, sha1: Y}, string X, hex sha1 string Y. Returns: The same array with elements updated with elem['found'] = true if the hash is present in storage, elem['found'] = false if not. """ hashlist = [hashutil.hash_to_bytes(elem["sha1"]) for elem in hashes] content_missing = storage.content_missing_per_sha1(hashlist) missing = [hashutil.hash_to_hex(x) for x in content_missing] for x in hashes: x.update({"found": True}) for h in hashes: if h["sha1"] in missing: h["found"] = False return hashes def lookup_expression(expression, last_sha1, per_page): """Lookup expression in raw content. Args: expression (str): An expression to lookup through raw indexed content last_sha1 (str): Last sha1 seen per_page (int): Number of results per page Yields: ctags whose content match the expression """ limit = min(per_page, MAX_LIMIT) ctags = idx_storage.content_ctags_search( expression, last_sha1=last_sha1, limit=limit ) for ctag in ctags: ctag = converters.from_swh(ctag, hashess={"id"}) ctag["sha1"] = ctag["id"] ctag.pop("id") yield ctag def lookup_hash(q: str) -> Dict[str, Any]: """Check if the storage contains a given content checksum and return it if found. Args: q: query string of the form Returns: Dict with key found containing the hash info if the hash is present, None if not. """ algo, hash_ = query.parse_hash(q) found = _first_element(storage.content_find({algo: hash_})) if found: content = converters.from_content(found.to_dict()) else: content = None return {"found": content, "algo": algo} def search_hash(q: str) -> Dict[str, bool]: """Search storage for a given content checksum. Args: q: query string of the form Returns: Dict with key found to True or False, according to whether the checksum is present or not """ algo, hash_ = query.parse_hash(q) found = _first_element(storage.content_find({algo: hash_})) return {"found": found is not None} def _lookup_content_sha1(q: str) -> Optional[bytes]: """Given a possible input, query for the content's sha1. Args: q: query string of the form Returns: binary sha1 if found or None """ algo, hash_ = query.parse_hash(q) if algo != "sha1": hashes = _first_element(storage.content_find({algo: hash_})) if not hashes: return None return hashes.sha1 return hash_ def lookup_content_ctags(q): """Return ctags information from a specified content. Args: q: query string of the form Yields: ctags information (dict) list if the content is found. """ sha1 = _lookup_content_sha1(q) if not sha1: return None ctags = list(idx_storage.content_ctags_get([sha1])) if not ctags: return None for ctag in ctags: yield converters.from_swh(ctag, hashess={"id"}) def lookup_content_filetype(q): """Return filetype information from a specified content. Args: q: query string of the form Yields: filetype information (dict) list if the content is found. """ sha1 = _lookup_content_sha1(q) if not sha1: return None filetype = _first_element(list(idx_storage.content_mimetype_get([sha1]))) if not filetype: return None return converters.from_filetype(filetype.to_dict()) def lookup_content_language(q): """Always returns None. This used to return language information from a specified content, but this is currently disabled. Args: q: query string of the form Yields: language information (dict) list if the content is found. """ return None def lookup_content_license(q): """Return license information from a specified content. Args: q: query string of the form Yields: license information (dict) list if the content is found. """ sha1 = _lookup_content_sha1(q) if not sha1: return None licenses = list(idx_storage.content_fossology_license_get([sha1])) if not licenses: return None license_dicts = [license.to_dict() for license in licenses] for license_dict in license_dicts: del license_dict["id"] lic = { "id": sha1, "facts": license_dicts, } return converters.from_swh(lic, hashess={"id"}) def lookup_origin(origin: OriginInfo) -> OriginInfo: """Return information about the origin matching dict origin. Args: origin: origin's dict with 'url' key Returns: origin information as dict. """ origin_urls = [origin["url"]] if origin["url"]: # handle case when user provided an origin url with a trailing # slash while the url in storage does not have it (e.g. GitHub) if origin["url"].endswith("/"): origin_urls.append(origin["url"][:-1]) # handle case when user provided an origin url without a trailing # slash while the url in storage have it (e.g. Debian source package) else: origin_urls.append(f"{origin['url']}/") try: # handle case where the "://" character sequence was mangled into ":/" parsed_url = urlparse(origin["url"]) if ( parsed_url.scheme and not parsed_url.netloc and origin["url"].startswith(f"{parsed_url.scheme}:/") and not origin["url"].startswith(f"{parsed_url.scheme}://") ): origin_urls.append( origin["url"].replace( f"{parsed_url.scheme}:/", f"{parsed_url.scheme}://" ) ) except Exception: pass origins = [o for o in storage.origin_get(origin_urls) if o is not None] if not origins: msg = "Origin with url %s not found!" % origin["url"] raise NotFoundExc(msg) return converters.from_origin(origins[0].to_dict()) def lookup_origins( page_token: Optional[str], limit: int = 100 ) -> PagedResult[OriginInfo]: """Get list of archived software origins in a paginated way. Origins are sorted by id before returning them Args: origin_from (int): The minimum id of the origins to return origin_count (int): The maximum number of origins to return Returns: Page of OriginInfo """ page = storage.origin_list(page_token=page_token, limit=limit) return PagedResult( [converters.from_origin(o.to_dict()) for o in page.results], next_page_token=page.next_page_token, ) def search_origin( url_pattern: str, limit: int = 50, with_visit: bool = False, visit_types: Optional[List[str]] = None, page_token: Optional[str] = None, ) -> Tuple[List[OriginInfo], Optional[str]]: """Search for origins whose urls contain a provided string pattern or match a provided regular expression. Args: url_pattern: the string pattern to search for in origin urls limit: the maximum number of found origins to return with_visit: Whether origins with no visit are to be filtered out visit_types: Only origins having any of the provided visit types (e.g. git, svn, pypi) will be returned page_token: opaque string used to get the next results of a search Returns: list of origin information as dict. """ if page_token: assert isinstance(page_token, str) if search: page_result = search.origin_search( url_pattern=url_pattern, page_token=page_token, with_visit=with_visit, visit_types=visit_types, limit=limit, ) origins = [converters.from_origin(ori_dict) for ori_dict in page_result.results] else: # Fallback to swh-storage if swh-search is not configured search_words = [re.escape(word) for word in url_pattern.split()] if len(search_words) >= 7: url_pattern = ".*".join(search_words) else: pattern_parts = [] for permut in itertools.permutations(search_words): pattern_parts.append(".*".join(permut)) url_pattern = "|".join(pattern_parts) page_result = storage.origin_search( url_pattern, page_token=page_token, with_visit=with_visit, limit=limit, visit_types=visit_types, regexp=True, ) origins = [converters.from_origin(ori.to_dict()) for ori in page_result.results] return (origins, page_result.next_page_token) def search_origin_metadata( fulltext: str, limit: int = 50 ) -> Iterable[OriginMetadataInfo]: """Search for origins whose metadata match a provided string pattern. Args: fulltext: the string pattern to search for in origin metadata limit: the maximum number of found origins to return Returns: Iterable of origin metadata information for existing origins """ results = [] if search and config.get_config()["metadata_search_backend"] == "swh-search": page_result = search.origin_search(metadata_pattern=fulltext, limit=limit,) matches = idx_storage.origin_intrinsic_metadata_get( [r["url"] for r in page_result.results] ) else: matches = idx_storage.origin_intrinsic_metadata_search_fulltext( conjunction=[fulltext], limit=limit ) matches = [match.to_dict() for match in matches] origins = storage.origin_get([match["id"] for match in matches]) for origin, match in zip(origins, matches): if not origin: continue match["from_revision"] = hashutil.hash_to_hex(match["from_revision"]) del match["id"] results.append(OriginMetadataInfo(url=origin.url, metadata=match)) return results def lookup_origin_intrinsic_metadata(origin_url: str) -> Dict[str, Any]: """Return intrinsic metadata for origin whose origin matches given origin. Args: origin_url: origin url Raises: NotFoundExc when the origin is not found Returns: origin metadata. """ origins = [origin_url] origin_info = storage.origin_get(origins)[0] if not origin_info: raise NotFoundExc(f"Origin with url {origin_url} not found!") match = _first_element(idx_storage.origin_intrinsic_metadata_get(origins)) result = {} if match: result = match.metadata return result def _to_sha1_bin(sha1_hex): _, sha1_git_bin = query.parse_hash_with_algorithms_or_throws( sha1_hex, ["sha1"], "Only sha1_git is supported." # HACK: sha1_git really ) return sha1_git_bin def _check_directory_exists(sha1_git, sha1_git_bin): if len(list(storage.directory_missing([sha1_git_bin]))): raise NotFoundExc("Directory with sha1_git %s not found" % sha1_git) def lookup_directory(sha1_git): """Return information about the directory with id sha1_git. Args: sha1_git as string Returns: directory information as dict. """ empty_dir_sha1 = "4b825dc642cb6eb9a060e54bf8d69288fbee4904" if sha1_git == empty_dir_sha1: return [] sha1_git_bin = _to_sha1_bin(sha1_git) _check_directory_exists(sha1_git, sha1_git_bin) directory_entries = storage.directory_ls(sha1_git_bin) return map(converters.from_directory_entry, directory_entries) def lookup_directory_with_path(sha1_git: str, path: str) -> Dict[str, Any]: """Return directory information for entry with specified path w.r.t. root directory pointed by sha1_git Args: sha1_git: sha1_git corresponding to the directory to which we append paths to (hopefully) find the entry path: the relative path to the entry starting from the root directory pointed by sha1_git Returns: Directory entry information as dict. Raises: NotFoundExc if the directory entry is not found """ sha1_git_bin = _to_sha1_bin(sha1_git) _check_directory_exists(sha1_git, sha1_git_bin) paths = path.strip(os.path.sep).split(os.path.sep) queried_dir = storage.directory_entry_get_by_path( sha1_git_bin, [p.encode("utf-8") for p in paths] ) if not queried_dir: raise NotFoundExc( f"Directory entry with path {path} from root directory {sha1_git} not found" ) return converters.from_directory_entry(queried_dir) def lookup_release(release_sha1_git: str) -> Dict[str, Any]: """Return information about the release with sha1 release_sha1_git. Args: release_sha1_git: The release's sha1 as hexadecimal Returns: Release information as dict. Raises: ValueError if the identifier provided is not of sha1 nature. """ sha1_git_bin = _to_sha1_bin(release_sha1_git) release = _first_element(storage.release_get([sha1_git_bin])) if not release: raise NotFoundExc(f"Release with sha1_git {release_sha1_git} not found.") return converters.from_release(release) def lookup_release_multiple(sha1_git_list) -> Iterator[Optional[Dict[str, Any]]]: """Return information about the releases identified with their sha1_git identifiers. Args: sha1_git_list: A list of release sha1_git identifiers Returns: Iterator of Release metadata information as dict. Raises: ValueError if the identifier provided is not of sha1 nature. """ sha1_bin_list = [_to_sha1_bin(sha1_git) for sha1_git in sha1_git_list] releases = storage.release_get(sha1_bin_list) for r in releases: if r is not None: yield converters.from_release(r) else: yield None def lookup_revision(rev_sha1_git) -> Dict[str, Any]: """Return information about the revision with sha1 revision_sha1_git. Args: revision_sha1_git: The revision's sha1 as hexadecimal Returns: Revision information as dict. Raises: ValueError if the identifier provided is not of sha1 nature. NotFoundExc if there is no revision with the provided sha1_git. """ sha1_git_bin = _to_sha1_bin(rev_sha1_git) revision = storage.revision_get([sha1_git_bin])[0] if not revision: raise NotFoundExc(f"Revision with sha1_git {rev_sha1_git} not found.") return converters.from_revision(revision) def lookup_revision_multiple(sha1_git_list) -> Iterator[Optional[Dict[str, Any]]]: """Return information about the revisions identified with their sha1_git identifiers. Args: sha1_git_list: A list of revision sha1_git identifiers Yields: revision information as dict if the revision exists, None otherwise. Raises: ValueError if the identifier provided is not of sha1 nature. """ sha1_bin_list = [_to_sha1_bin(sha1_git) for sha1_git in sha1_git_list] revisions = storage.revision_get(sha1_bin_list) for revision in revisions: if revision is not None: yield converters.from_revision(revision) else: yield None def lookup_revision_message(rev_sha1_git) -> Dict[str, bytes]: """Return the raw message of the revision with sha1 revision_sha1_git. Args: revision_sha1_git: The revision's sha1 as hexadecimal Returns: Decoded revision message as dict {'message': } Raises: ValueError if the identifier provided is not of sha1 nature. NotFoundExc if the revision is not found, or if it has no message """ sha1_git_bin = _to_sha1_bin(rev_sha1_git) revision = storage.revision_get([sha1_git_bin])[0] if not revision: raise NotFoundExc(f"Revision with sha1_git {rev_sha1_git} not found.") if not revision.message: raise NotFoundExc(f"No message for revision with sha1_git {rev_sha1_git}.") return {"message": revision.message} def _lookup_revision_id_by(origin, branch_name, timestamp): def _get_snapshot_branch(snapshot, branch_name): snapshot = lookup_snapshot( - visit["snapshot"], branches_from=branch_name, branches_count=10 + visit["snapshot"], + branches_from=branch_name, + branches_count=10, + branch_name_exclude_prefix=None, ) branch = None if branch_name in snapshot["branches"]: branch = snapshot["branches"][branch_name] return branch if isinstance(origin, int): origin = {"id": origin} elif isinstance(origin, str): origin = {"url": origin} else: raise TypeError('"origin" must be an int or a string.') from swh.web.common.origin_visits import get_origin_visit visit = get_origin_visit(origin, visit_ts=timestamp) branch = _get_snapshot_branch(visit["snapshot"], branch_name) rev_id = None if branch and branch["target_type"] == "revision": rev_id = branch["target"] elif branch and branch["target_type"] == "alias": branch = _get_snapshot_branch(visit["snapshot"], branch["target"]) if branch and branch["target_type"] == "revision": rev_id = branch["target"] if not rev_id: raise NotFoundExc( "Revision for origin %s and branch %s not found." % (origin.get("url"), branch_name) ) return rev_id def lookup_revision_by(origin, branch_name="HEAD", timestamp=None): """Lookup revision by origin, snapshot branch name and visit timestamp. If branch_name is not provided, lookup using 'HEAD' as default. If timestamp is not provided, use the most recent. Args: origin (Union[int,str]): origin of the revision branch_name (str): snapshot branch name timestamp (str/int): origin visit time frame Returns: dict: The revision matching the criterions Raises: NotFoundExc if no revision corresponds to the criterion """ rev_id = _lookup_revision_id_by(origin, branch_name, timestamp) return lookup_revision(rev_id) def lookup_revision_log(rev_sha1_git, limit): """Lookup revision log by revision id. Args: rev_sha1_git (str): The revision's sha1 as hexadecimal limit (int): the maximum number of revisions returned Returns: list: Revision log as list of revision dicts Raises: ValueError: if the identifier provided is not of sha1 nature. swh.web.common.exc.NotFoundExc: if there is no revision with the provided sha1_git. """ lookup_revision(rev_sha1_git) sha1_git_bin = _to_sha1_bin(rev_sha1_git) revision_entries = storage.revision_log([sha1_git_bin], limit) return map(converters.from_revision, revision_entries) def lookup_revision_log_by(origin, branch_name, timestamp, limit): """Lookup revision by origin, snapshot branch name and visit timestamp. Args: origin (Union[int,str]): origin of the revision branch_name (str): snapshot branch timestamp (str/int): origin visit time frame limit (int): the maximum number of revisions returned Returns: list: Revision log as list of revision dicts Raises: swh.web.common.exc.NotFoundExc: if no revision corresponds to the criterion """ rev_id = _lookup_revision_id_by(origin, branch_name, timestamp) return lookup_revision_log(rev_id, limit) def lookup_revision_with_context_by( origin, branch_name, timestamp, sha1_git, limit=100 ): """Return information about revision sha1_git, limited to the sub-graph of all transitive parents of sha1_git_root. sha1_git_root being resolved through the lookup of a revision by origin, branch_name and ts. In other words, sha1_git is an ancestor of sha1_git_root. Args: - origin: origin of the revision. - branch_name: revision's branch. - timestamp: revision's time frame. - sha1_git: one of sha1_git_root's ancestors. - limit: limit the lookup to 100 revisions back. Returns: Pair of (root_revision, revision). Information on sha1_git if it is an ancestor of sha1_git_root including children leading to sha1_git_root Raises: - BadInputExc in case of unknown algo_hash or bad hash. - NotFoundExc if either revision is not found or if sha1_git is not an ancestor of sha1_git_root. """ rev_root_id = _lookup_revision_id_by(origin, branch_name, timestamp) rev_root_id_bin = hashutil.hash_to_bytes(rev_root_id) rev_root = storage.revision_get([rev_root_id_bin])[0] return ( converters.from_revision(rev_root) if rev_root else None, lookup_revision_with_context(rev_root, sha1_git, limit), ) def lookup_revision_with_context( sha1_git_root: Union[str, Dict[str, Any], Revision], sha1_git: str, limit: int = 100 ) -> Dict[str, Any]: """Return information about revision sha1_git, limited to the sub-graph of all transitive parents of sha1_git_root. In other words, sha1_git is an ancestor of sha1_git_root. Args: sha1_git_root: latest revision. The type is either a sha1 (as an hex string) or a non converted dict. sha1_git: one of sha1_git_root's ancestors limit: limit the lookup to 100 revisions back Returns: Information on sha1_git if it is an ancestor of sha1_git_root including children leading to sha1_git_root Raises: BadInputExc in case of unknown algo_hash or bad hash NotFoundExc if either revision is not found or if sha1_git is not an ancestor of sha1_git_root """ sha1_git_bin = _to_sha1_bin(sha1_git) revision = storage.revision_get([sha1_git_bin])[0] if not revision: raise NotFoundExc(f"Revision {sha1_git} not found") if isinstance(sha1_git_root, str): sha1_git_root_bin = _to_sha1_bin(sha1_git_root) revision_root = storage.revision_get([sha1_git_root_bin])[0] if not revision_root: raise NotFoundExc(f"Revision root {sha1_git_root} not found") elif isinstance(sha1_git_root, Revision): sha1_git_root_bin = sha1_git_root.id else: sha1_git_root_bin = sha1_git_root["id"] revision_log = storage.revision_log([sha1_git_root_bin], limit) parents: Dict[str, List[str]] = {} children = defaultdict(list) for rev in revision_log: rev_id = rev["id"] parents[rev_id] = [] for parent_id in rev["parents"]: parents[rev_id].append(parent_id) children[parent_id].append(rev_id) if revision.id not in parents: raise NotFoundExc(f"Revision {sha1_git} is not an ancestor of {sha1_git_root}") revision_d = revision.to_dict() revision_d["children"] = children[revision.id] return converters.from_revision(revision_d) def lookup_directory_with_revision(sha1_git, dir_path=None, with_data=False): """Return information on directory pointed by revision with sha1_git. If dir_path is not provided, display top level directory. Otherwise, display the directory pointed by dir_path (if it exists). Args: sha1_git: revision's hash. dir_path: optional directory pointed to by that revision. with_data: boolean that indicates to retrieve the raw data if the path resolves to a content. Default to False (for the api) Returns: Information on the directory pointed to by that revision. Raises: BadInputExc in case of unknown algo_hash or bad hash. NotFoundExc either if the revision is not found or the path referenced does not exist. NotImplementedError in case of dir_path exists but do not reference a type 'dir' or 'file'. """ sha1_git_bin = _to_sha1_bin(sha1_git) revision = storage.revision_get([sha1_git_bin])[0] if not revision: raise NotFoundExc(f"Revision {sha1_git} not found") dir_sha1_git_bin = revision.directory if dir_path: paths = dir_path.strip(os.path.sep).split(os.path.sep) entity = storage.directory_entry_get_by_path( dir_sha1_git_bin, list(map(lambda p: p.encode("utf-8"), paths)) ) if not entity: raise NotFoundExc( "Directory or File '%s' pointed to by revision %s not found" % (dir_path, sha1_git) ) else: entity = {"type": "dir", "target": dir_sha1_git_bin} if entity["type"] == "dir": directory_entries = storage.directory_ls(entity["target"]) or [] return { "type": "dir", "path": "." if not dir_path else dir_path, "revision": sha1_git, "content": list(map(converters.from_directory_entry, directory_entries)), } elif entity["type"] == "file": # content content = _first_element(storage.content_find({"sha1_git": entity["target"]})) if not content: raise NotFoundExc(f"Content not found for revision {sha1_git}") content_d = content.to_dict() if with_data: data = storage.content_get_data(content.sha1) if data: content_d["data"] = data return { "type": "file", "path": "." if not dir_path else dir_path, "revision": sha1_git, "content": converters.from_content(content_d), } elif entity["type"] == "rev": # revision revision = storage.revision_get([entity["target"]])[0] return { "type": "rev", "path": "." if not dir_path else dir_path, "revision": sha1_git, "content": converters.from_revision(revision) if revision else None, } else: raise NotImplementedError("Entity of type %s not implemented." % entity["type"]) def lookup_content(q: str) -> Dict[str, Any]: """Lookup the content designed by q. Args: q: The release's sha1 as hexadecimal Raises: NotFoundExc if the requested content is not found """ algo, hash_ = query.parse_hash(q) c = _first_element(storage.content_find({algo: hash_})) if not c: hhex = hashutil.hash_to_hex(hash_) raise NotFoundExc(f"Content with {algo} checksum equals to {hhex} not found!") return converters.from_content(c.to_dict()) def lookup_content_raw(q: str) -> Dict[str, Any]: """Lookup the content defined by q. Args: q: query string of the form Returns: dict with 'sha1' and 'data' keys. data representing its raw data decoded. Raises: NotFoundExc if the requested content is not found or if the content bytes are not available in the storage """ c = lookup_content(q) content_sha1_bytes = hashutil.hash_to_bytes(c["checksums"]["sha1"]) content_data = storage.content_get_data(content_sha1_bytes) if content_data is None: algo, hash_ = query.parse_hash(q) raise NotFoundExc( f"Bytes of content with {algo} checksum equals " f"to {hashutil.hash_to_hex(hash_)} are not available!" ) return converters.from_content({"sha1": content_sha1_bytes, "data": content_data}) def stat_counters(): """Return the stat counters for Software Heritage Returns: A dict mapping textual labels to integer values. """ return storage.stat_counters() def _lookup_origin_visits( origin_url: str, last_visit: Optional[int] = None, limit: int = 10 ) -> Iterator[OriginVisit]: """Yields the origin origins' visits. Args: origin_url (str): origin to list visits for last_visit (int): last visit to lookup from limit (int): Number of elements max to display Yields: OriginVisit for that origin """ limit = min(limit, MAX_LIMIT) page_token: Optional[str] if last_visit is not None: page_token = str(last_visit) else: page_token = None visit_page = storage.origin_visit_get( origin_url, page_token=page_token, limit=limit ) yield from visit_page.results def lookup_origin_visits( origin: str, last_visit: Optional[int] = None, per_page: int = 10 ) -> Iterator[OriginVisitInfo]: """Yields the origin origins' visits. Args: origin: origin to list visits for Yields: Dictionaries of origin_visit for that origin """ for visit in _lookup_origin_visits(origin, last_visit=last_visit, limit=per_page): visit_status = storage.origin_visit_status_get_latest(origin, visit.visit) yield converters.from_origin_visit( {**visit_status.to_dict(), "type": visit.type} ) def lookup_origin_visit_latest( origin_url: str, require_snapshot: bool = False, type: Optional[str] = None, allowed_statuses: Optional[List[str]] = None, ) -> Optional[OriginVisitInfo]: """Return the origin's latest visit Args: origin_url: origin to list visits for type: Optional visit type to filter on (e.g git, tar, dsc, svn, hg, npm, pypi, ...) allowed_statuses: list of visit statuses considered to find the latest visit. For instance, ``allowed_statuses=['full']`` will only consider visits that have successfully run to completion. require_snapshot: filter out origins without a snapshot Returns: The origin visit info as dict if found """ visit_status = origin_get_latest_visit_status( storage, origin_url, type=type, allowed_statuses=allowed_statuses, require_snapshot=require_snapshot, ) return ( converters.from_origin_visit(visit_status.to_dict()) if visit_status else None ) def lookup_origin_visit(origin_url: str, visit_id: int) -> OriginVisitInfo: """Return information about visit visit_id with origin origin. Args: origin: origin concerned by the visit visit_id: the visit identifier to lookup Yields: The dict origin_visit concerned """ visit = storage.origin_visit_get_by(origin_url, visit_id) visit_status = storage.origin_visit_status_get_latest(origin_url, visit_id) if not visit: raise NotFoundExc( f"Origin {origin_url} or its visit with id {visit_id} not found!" ) return converters.from_origin_visit({**visit_status.to_dict(), "type": visit.type}) def lookup_snapshot_sizes( - snapshot_id: str, branch_name_exclude_prefix: Optional[str] = None + snapshot_id: str, branch_name_exclude_prefix: Optional[str] = "refs/pull/" ) -> Dict[str, int]: """Count the number of branches in the snapshot with the given id Args: snapshot_id (str): sha1 identifier of the snapshot Returns: dict: A dict whose keys are the target types of branches and values their corresponding amount """ snapshot_id_bin = _to_sha1_bin(snapshot_id) snapshot_sizes = dict.fromkeys(("alias", "release", "revision"), 0) branch_counts = storage.snapshot_count_branches( snapshot_id_bin, branch_name_exclude_prefix.encode() if branch_name_exclude_prefix else None, ) # remove possible None key returned by snapshot_count_branches # when null branches are present in the snapshot branch_counts.pop(None, None) snapshot_sizes.update(branch_counts) return snapshot_sizes def lookup_snapshot( snapshot_id: str, branches_from: str = "", branches_count: int = 1000, target_types: Optional[List[str]] = None, branch_name_include_substring: Optional[str] = None, - branch_name_exclude_prefix: Optional[str] = None, + branch_name_exclude_prefix: Optional[str] = "refs/pull/", ) -> Dict[str, Any]: """Return information about a snapshot, aka the list of named branches found during a specific visit of an origin. Args: snapshot_id: sha1 identifier of the snapshot branches_from: optional parameter used to skip branches whose name is lesser than it before returning them branches_count: optional parameter used to restrain the amount of returned branches target_types: optional parameter used to filter the target types of branch to return (possible values that can be contained in that list are `'content', 'directory', 'revision', 'release', 'snapshot', 'alias'`) branch_name_include_substring: if provided, only return branches whose name contains given substring branch_name_exclude_prefix: if provided, do not return branches whose name starts with given pattern Returns: A dict filled with the snapshot content. """ snapshot_id_bin = _to_sha1_bin(snapshot_id) partial_branches = storage.snapshot_get_branches( snapshot_id_bin, branches_from.encode(), branches_count, target_types, branch_name_include_substring.encode() if branch_name_include_substring else None, branch_name_exclude_prefix.encode() if branch_name_exclude_prefix else None, ) if not partial_branches: raise NotFoundExc(f"Snapshot with id {snapshot_id} not found!") return converters.from_partial_branches(partial_branches) def lookup_latest_origin_snapshot( origin: str, allowed_statuses: List[str] = None ) -> Optional[Dict[str, Any]]: """Return information about the latest snapshot of an origin. .. warning:: At most 1000 branches contained in the snapshot will be returned for performance reasons. Args: origin: URL or integer identifier of the origin allowed_statuses: list of visit statuses considered to find the latest snapshot for the visit. For instance, ``allowed_statuses=['full']`` will only consider visits that have successfully run to completion. Returns: A dict filled with the snapshot content. """ snp = snapshot_get_latest( storage, origin, allowed_statuses=allowed_statuses, branches_count=1000 ) return converters.from_snapshot(snp.to_dict()) if snp is not None else None def lookup_snapshot_branch_name_from_tip_revision( snapshot_id: str, revision_id: str ) -> Optional[str]: """Check if a revision corresponds to the tip of a snapshot branch Args: snapshot_id: hexadecimal representation of a snapshot id revision_id: hexadecimal representation of a revision id Returns: The name of the first found branch or None otherwise """ per_page = 10000 branches_from = "" snapshot: Dict[str, Any] = {"branches": {}} branches = [] while not branches_from or len(snapshot["branches"]) == per_page + 1: snapshot = lookup_snapshot( snapshot_id, target_types=[REVISION], branches_from=branches_from, branches_count=per_page + 1, + branch_name_exclude_prefix=None, ) branches += [ {"name": k, "target": v["target"]} for k, v in snapshot["branches"].items() ] branches_from = branches[-1]["name"] for branch in branches: if branch["target"] == revision_id: return branch["name"] return None def lookup_snapshot_alias( snapshot_id: str, alias_name: str ) -> Optional[Dict[str, Any]]: """Try to resolve a branch alias in a snapshot. Args: snapshot_id: hexadecimal representation of a snapshot id alias_name: name of the branch alias to resolve Returns: Target branch information or None if the alias does not exist or target a dangling branch. """ resolved_alias = snapshot_resolve_alias( storage, _to_sha1_bin(snapshot_id), alias_name.encode() ) return ( converters.from_swh(resolved_alias.to_dict(), hashess={"target"}) if resolved_alias is not None else None ) def lookup_revision_through(revision, limit=100): """Retrieve a revision from the criterion stored in revision dictionary. Args: revision: Dictionary of criterion to lookup the revision with. Here are the supported combination of possible values: - origin_url, branch_name, ts, sha1_git - origin_url, branch_name, ts - sha1_git_root, sha1_git - sha1_git Returns: None if the revision is not found or the actual revision. """ if ( "origin_url" in revision and "branch_name" in revision and "ts" in revision and "sha1_git" in revision ): return lookup_revision_with_context_by( revision["origin_url"], revision["branch_name"], revision["ts"], revision["sha1_git"], limit, ) if "origin_url" in revision and "branch_name" in revision and "ts" in revision: return lookup_revision_by( revision["origin_url"], revision["branch_name"], revision["ts"] ) if "sha1_git_root" in revision and "sha1_git" in revision: return lookup_revision_with_context( revision["sha1_git_root"], revision["sha1_git"], limit ) if "sha1_git" in revision: return lookup_revision(revision["sha1_git"]) # this should not happen raise NotImplementedError("Should not happen!") def lookup_directory_through_revision(revision, path=None, limit=100, with_data=False): """Retrieve the directory information from the revision. Args: revision: dictionary of criterion representing a revision to lookup path: directory's path to lookup. limit: optional query parameter to limit the revisions log (default to 100). For now, note that this limit could impede the transitivity conclusion about sha1_git not being an ancestor of. with_data: indicate to retrieve the content's raw data if path resolves to a content. Returns: The directory pointing to by the revision criterions at path. """ rev = lookup_revision_through(revision, limit) if not rev: raise NotFoundExc("Revision with criterion %s not found!" % revision) return (rev["id"], lookup_directory_with_revision(rev["id"], path, with_data)) def _vault_request(vault_fn, *args, **kwargs): try: return vault_fn(*args, **kwargs) except VaultNotFoundExc: return None def vault_cook(obj_type, obj_id, email=None): """Cook a vault bundle. """ return _vault_request(vault.cook, obj_type, obj_id, email=email) def vault_fetch(obj_type, obj_id): """Fetch a vault bundle. """ return _vault_request(vault.fetch, obj_type, obj_id) def vault_progress(obj_type, obj_id): """Get the current progress of a vault bundle. """ return _vault_request(vault.progress, obj_type, obj_id) def diff_revision(rev_id): """Get the list of file changes (insertion / deletion / modification / renaming) for a particular revision. """ rev_sha1_git_bin = _to_sha1_bin(rev_id) changes = diff.diff_revision(storage, rev_sha1_git_bin, track_renaming=True) for change in changes: change["from"] = converters.from_directory_entry(change["from"]) change["to"] = converters.from_directory_entry(change["to"]) if change["from_path"]: change["from_path"] = change["from_path"].decode("utf-8") if change["to_path"]: change["to_path"] = change["to_path"].decode("utf-8") return changes class _RevisionsWalkerProxy(object): """ Proxy class wrapping a revisions walker iterator from swh-storage and performing needed conversions. """ def __init__(self, rev_walker_type, rev_start, *args, **kwargs): rev_start_bin = hashutil.hash_to_bytes(rev_start) self.revisions_walker = revisions_walker.get_revisions_walker( rev_walker_type, storage, rev_start_bin, *args, **kwargs ) def export_state(self): return self.revisions_walker.export_state() def __next__(self): return converters.from_revision(next(self.revisions_walker)) def __iter__(self): return self def get_revisions_walker(rev_walker_type, rev_start, *args, **kwargs): """ Utility function to instantiate a revisions walker of a given type, see :mod:`swh.storage.algos.revisions_walker`. Args: rev_walker_type (str): the type of revisions walker to return, possible values are: ``committer_date``, ``dfs``, ``dfs_post``, ``bfs`` and ``path`` rev_start (str): hexadecimal representation of a revision identifier args (list): position arguments to pass to the revisions walker constructor kwargs (dict): keyword arguments to pass to the revisions walker constructor """ # first check if the provided revision is valid lookup_revision(rev_start) return _RevisionsWalkerProxy(rev_walker_type, rev_start, *args, **kwargs) def lookup_object(object_type: str, object_id: str) -> Dict[str, Any]: """ Utility function for looking up an object in the archive by its type and id. Args: object_type (str): the type of object to lookup, either *content*, *directory*, *release*, *revision* or *snapshot* object_id (str): the *sha1_git* checksum identifier in hexadecimal form of the object to lookup Returns: Dict[str, Any]: A dictionary describing the object or a list of dictionary for the directory object type. Raises: swh.web.common.exc.NotFoundExc: if the object could not be found in the archive BadInputExc: if the object identifier is invalid """ if object_type == CONTENT: return lookup_content(f"sha1_git:{object_id}") elif object_type == DIRECTORY: return {"id": object_id, "content": list(lookup_directory(object_id))} elif object_type == RELEASE: return lookup_release(object_id) elif object_type == REVISION: return lookup_revision(object_id) elif object_type == SNAPSHOT: return lookup_snapshot(object_id) raise BadInputExc( ( "Invalid swh object type! Valid types are " f"{CONTENT}, {DIRECTORY}, {RELEASE} " f"{REVISION} or {SNAPSHOT}." ) ) def lookup_missing_hashes(grouped_swhids: Dict[str, List[bytes]]) -> Set[str]: """Lookup missing Software Heritage persistent identifier hash, using batch processing. Args: A dictionary with: keys: object types values: object hashes Returns: A set(hexadecimal) of the hashes not found in the storage """ missing_hashes = [] for obj_type, obj_ids in grouped_swhids.items(): if obj_type == CONTENT: missing_hashes.append(storage.content_missing_per_sha1_git(obj_ids)) elif obj_type == DIRECTORY: missing_hashes.append(storage.directory_missing(obj_ids)) elif obj_type == REVISION: missing_hashes.append(storage.revision_missing(obj_ids)) elif obj_type == RELEASE: missing_hashes.append(storage.release_missing(obj_ids)) elif obj_type == SNAPSHOT: missing_hashes.append(storage.snapshot_missing(obj_ids)) missing = set( map(lambda x: hashutil.hash_to_hex(x), itertools.chain(*missing_hashes)) ) return missing def lookup_origins_by_sha1s(sha1s: List[str]) -> Iterator[Optional[OriginInfo]]: """Lookup origins from the sha1 hash values of their URLs. Args: sha1s: list of sha1s hexadecimal representation Yields: origin information as dict """ sha1s_bytes = [hashutil.hash_to_bytes(sha1) for sha1 in sha1s] origins = storage.origin_get_by_sha1(sha1s_bytes) for origin in origins: yield converters.from_origin(origin) diff --git a/swh/web/tests/api/views/test_snapshot.py b/swh/web/tests/api/views/test_snapshot.py index 65047568..6280dac2 100644 --- a/swh/web/tests/api/views/test_snapshot.py +++ b/swh/web/tests/api/views/test_snapshot.py @@ -1,152 +1,168 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information import random from hypothesis import given from swh.model.hashutil import hash_to_hex from swh.model.model import Snapshot from swh.web.api.utils import enrich_snapshot from swh.web.common.utils import reverse from swh.web.tests.data import random_sha1 -from swh.web.tests.strategies import new_snapshot, snapshot +from swh.web.tests.strategies import ( + new_snapshot, + origin_with_pull_request_branches, + snapshot, +) from swh.web.tests.utils import check_api_get_responses, check_http_get_response @given(snapshot()) def test_api_snapshot(api_client, archive_data, snapshot): url = reverse("api-1-snapshot", url_args={"snapshot_id": snapshot}) rv = check_api_get_responses(api_client, url, status_code=200) expected_data = {**archive_data.snapshot_get(snapshot), "next_branch": None} expected_data = enrich_snapshot(expected_data, rv.wsgi_request) assert rv.data == expected_data @given(snapshot()) def test_api_snapshot_paginated(api_client, archive_data, snapshot): branches_offset = 0 branches_count = 2 snapshot_branches = [] for k, v in sorted(archive_data.snapshot_get(snapshot)["branches"].items()): snapshot_branches.append( {"name": k, "target_type": v["target_type"], "target": v["target"]} ) whole_snapshot = {"id": snapshot, "branches": {}, "next_branch": None} while branches_offset < len(snapshot_branches): branches_from = snapshot_branches[branches_offset]["name"] url = reverse( "api-1-snapshot", url_args={"snapshot_id": snapshot}, query_params={ "branches_from": branches_from, "branches_count": branches_count, }, ) rv = check_api_get_responses(api_client, url, status_code=200) expected_data = archive_data.snapshot_get_branches( snapshot, branches_from, branches_count ) expected_data = enrich_snapshot(expected_data, rv.wsgi_request) branches_offset += branches_count if branches_offset < len(snapshot_branches): next_branch = snapshot_branches[branches_offset]["name"] expected_data["next_branch"] = next_branch else: expected_data["next_branch"] = None assert rv.data == expected_data whole_snapshot["branches"].update(expected_data["branches"]) if branches_offset < len(snapshot_branches): next_url = rv.wsgi_request.build_absolute_uri( reverse( "api-1-snapshot", url_args={"snapshot_id": snapshot}, query_params={ "branches_from": next_branch, "branches_count": branches_count, }, ) ) assert rv["Link"] == '<%s>; rel="next"' % next_url else: assert not rv.has_header("Link") url = reverse("api-1-snapshot", url_args={"snapshot_id": snapshot}) rv = check_api_get_responses(api_client, url, status_code=200) assert rv.data == whole_snapshot @given(snapshot()) def test_api_snapshot_filtered(api_client, archive_data, snapshot): snapshot_branches = [] for k, v in sorted(archive_data.snapshot_get(snapshot)["branches"].items()): snapshot_branches.append( {"name": k, "target_type": v["target_type"], "target": v["target"]} ) target_type = random.choice(snapshot_branches)["target_type"] url = reverse( "api-1-snapshot", url_args={"snapshot_id": snapshot}, query_params={"target_types": target_type}, ) rv = check_api_get_responses(api_client, url, status_code=200) expected_data = archive_data.snapshot_get_branches( snapshot, target_types=target_type ) expected_data = enrich_snapshot(expected_data, rv.wsgi_request) assert rv.data == expected_data def test_api_snapshot_errors(api_client): unknown_snapshot_ = random_sha1() url = reverse("api-1-snapshot", url_args={"snapshot_id": "63ce369"}) check_api_get_responses(api_client, url, status_code=400) url = reverse("api-1-snapshot", url_args={"snapshot_id": unknown_snapshot_}) check_api_get_responses(api_client, url, status_code=404) @given(snapshot()) def test_api_snapshot_uppercase(api_client, snapshot): url = reverse( "api-1-snapshot-uppercase-checksum", url_args={"snapshot_id": snapshot.upper()} ) resp = check_http_get_response(api_client, url, status_code=302) redirect_url = reverse( "api-1-snapshot-uppercase-checksum", url_args={"snapshot_id": snapshot} ) assert resp["location"] == redirect_url @given(new_snapshot(min_size=4)) def test_api_snapshot_null_branch(api_client, archive_data, new_snapshot): snp_dict = new_snapshot.to_dict() snp_id = hash_to_hex(snp_dict["id"]) for branch in snp_dict["branches"].keys(): snp_dict["branches"][branch] = None break archive_data.snapshot_add([Snapshot.from_dict(snp_dict)]) url = reverse("api-1-snapshot", url_args={"snapshot_id": snp_id}) check_api_get_responses(api_client, url, status_code=200) + + +@given(origin_with_pull_request_branches()) +def test_api_snapshot_no_pull_request_branches_filtering( + api_client, archive_data, origin +): + """Pull request branches should not be filtered out when querying + a snapshot with the Web API.""" + snapshot = archive_data.snapshot_get_latest(origin.url) + url = reverse("api-1-snapshot", url_args={"snapshot_id": snapshot["id"]}) + resp = check_api_get_responses(api_client, url, status_code=200) + assert any([b.startswith("refs/pull/") for b in resp.data["branches"]]) diff --git a/swh/web/tests/browse/views/test_origin.py b/swh/web/tests/browse/views/test_origin.py index d124be14..3d23917f 100644 --- a/swh/web/tests/browse/views/test_origin.py +++ b/swh/web/tests/browse/views/test_origin.py @@ -1,1282 +1,1300 @@ # Copyright (C) 2017-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information import random import re import string from hypothesis import given from django.utils.html import escape from swh.model.hashutil import hash_to_bytes from swh.model.identifiers import CONTENT, DIRECTORY, RELEASE, REVISION, SNAPSHOT from swh.model.model import ( OriginVisit, OriginVisitStatus, Snapshot, SnapshotBranch, TargetType, ) from swh.storage.utils import now from swh.web.browse.snapshot_context import process_snapshot_branches from swh.web.common.exc import NotFoundExc from swh.web.common.identifiers import gen_swhid from swh.web.common.utils import ( format_utc_iso_date, gen_path_info, parse_iso8601_date_to_utc, reverse, ) from swh.web.tests.data import get_content, random_sha1 from swh.web.tests.django_asserts import assert_contains, assert_not_contains from swh.web.tests.strategies import ( new_origin, new_snapshot, origin, origin_with_multiple_visits, + origin_with_pull_request_branches, origin_with_releases, ) from swh.web.tests.strategies import release as existing_release from swh.web.tests.strategies import revisions, unknown_revision, visit_dates from swh.web.tests.utils import check_html_get_response @given(origin_with_multiple_visits()) def test_origin_visits_browse(client, archive_data, origin): url = reverse("browse-origin-visits", query_params={"origin_url": origin["url"]}) resp = check_html_get_response( client, url, status_code=200, template_used="browse/origin-visits.html" ) visits = archive_data.origin_visit_get(origin["url"]) for v in visits: vdate = format_utc_iso_date(v["date"], "%Y-%m-%dT%H:%M:%SZ") browse_dir_url = reverse( "browse-origin-directory", query_params={"origin_url": origin["url"], "timestamp": vdate}, ) assert_contains(resp, browse_dir_url) _check_origin_link(resp, origin["url"]) @given(origin_with_multiple_visits()) def test_origin_content_view(client, archive_data, origin): origin_visits = archive_data.origin_visit_get(origin["url"]) def _get_archive_data(visit_idx): snapshot = archive_data.snapshot_get(origin_visits[visit_idx]["snapshot"]) head_rev_id = archive_data.snapshot_get_head(snapshot) head_rev = archive_data.revision_get(head_rev_id) dir_content = archive_data.directory_ls(head_rev["directory"]) dir_files = [e for e in dir_content if e["type"] == "file"] dir_file = random.choice(dir_files) branches, releases, _ = process_snapshot_branches(snapshot) return { "branches": branches, "releases": releases, "root_dir_sha1": head_rev["directory"], "content": get_content(dir_file["checksums"]["sha1"]), "visit": origin_visits[visit_idx], "snapshot_sizes": archive_data.snapshot_count_branches(snapshot["id"]), } tdata = _get_archive_data(-1) _origin_content_view_test_helper( client, archive_data, origin, origin_visits[-1], tdata["snapshot_sizes"], tdata["branches"], tdata["releases"], tdata["root_dir_sha1"], tdata["content"], ) _origin_content_view_test_helper( client, archive_data, origin, origin_visits[-1], tdata["snapshot_sizes"], tdata["branches"], tdata["releases"], tdata["root_dir_sha1"], tdata["content"], timestamp=tdata["visit"]["date"], ) _origin_content_view_test_helper( client, archive_data, origin, origin_visits[-1], tdata["snapshot_sizes"], tdata["branches"], tdata["releases"], tdata["root_dir_sha1"], tdata["content"], snapshot_id=tdata["visit"]["snapshot"], ) tdata = _get_archive_data(0) _origin_content_view_test_helper( client, archive_data, origin, origin_visits[0], tdata["snapshot_sizes"], tdata["branches"], tdata["releases"], tdata["root_dir_sha1"], tdata["content"], visit_id=tdata["visit"]["visit"], ) _origin_content_view_test_helper( client, archive_data, origin, origin_visits[0], tdata["snapshot_sizes"], tdata["branches"], tdata["releases"], tdata["root_dir_sha1"], tdata["content"], snapshot_id=tdata["visit"]["snapshot"], ) @given(origin()) def test_origin_root_directory_view(client, archive_data, origin): origin_visits = archive_data.origin_visit_get(origin["url"]) visit = origin_visits[-1] snapshot = archive_data.snapshot_get(visit["snapshot"]) snapshot_sizes = archive_data.snapshot_count_branches(snapshot["id"]) head_rev_id = archive_data.snapshot_get_head(snapshot) head_rev = archive_data.revision_get(head_rev_id) root_dir_sha1 = head_rev["directory"] dir_content = archive_data.directory_ls(root_dir_sha1) branches, releases, _ = process_snapshot_branches(snapshot) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, dir_content, ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, dir_content, visit_id=visit["visit"], ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, dir_content, timestamp=visit["date"], ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, dir_content, snapshot_id=visit["snapshot"], ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, dir_content, ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, dir_content, visit_id=visit["visit"], ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, dir_content, timestamp=visit["date"], ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, dir_content, snapshot_id=visit["snapshot"], ) @given(origin()) def test_origin_sub_directory_view(client, archive_data, origin): origin_visits = archive_data.origin_visit_get(origin["url"]) visit = origin_visits[-1] snapshot = archive_data.snapshot_get(visit["snapshot"]) snapshot_sizes = archive_data.snapshot_count_branches(snapshot["id"]) head_rev_id = archive_data.snapshot_get_head(snapshot) head_rev = archive_data.revision_get(head_rev_id) root_dir_sha1 = head_rev["directory"] subdirs = [ e for e in archive_data.directory_ls(root_dir_sha1) if e["type"] == "dir" ] branches, releases, _ = process_snapshot_branches(snapshot) if len(subdirs) == 0: return subdir = random.choice(subdirs) subdir_content = archive_data.directory_ls(subdir["target"]) subdir_path = subdir["name"] _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, subdir_content, path=subdir_path, ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, subdir_content, path=subdir_path, visit_id=visit["visit"], ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, subdir_content, path=subdir_path, timestamp=visit["date"], ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, subdir_content, path=subdir_path, snapshot_id=visit["snapshot"], ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, subdir_content, path=subdir_path, ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, subdir_content, path=subdir_path, visit_id=visit["visit"], ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, subdir_content, path=subdir_path, timestamp=visit["date"], ) _origin_directory_view_test_helper( client, archive_data, origin, visit, snapshot_sizes, branches, releases, root_dir_sha1, subdir_content, path=subdir_path, snapshot_id=visit["snapshot"], ) @given(origin()) def test_origin_branches(client, archive_data, origin): origin_visits = archive_data.origin_visit_get(origin["url"]) visit = origin_visits[-1] snapshot = archive_data.snapshot_get(visit["snapshot"]) snapshot_sizes = archive_data.snapshot_count_branches(snapshot["id"]) snapshot_content = process_snapshot_branches(snapshot) _origin_branches_test_helper(client, origin, snapshot_content, snapshot_sizes) _origin_branches_test_helper( client, origin, snapshot_content, snapshot_sizes, snapshot_id=visit["snapshot"] ) @given(origin()) def test_origin_releases(client, archive_data, origin): origin_visits = archive_data.origin_visit_get(origin["url"]) visit = origin_visits[-1] snapshot = archive_data.snapshot_get(visit["snapshot"]) snapshot_sizes = archive_data.snapshot_count_branches(snapshot["id"]) snapshot_content = process_snapshot_branches(snapshot) _origin_releases_test_helper(client, origin, snapshot_content, snapshot_sizes) _origin_releases_test_helper( client, origin, snapshot_content, snapshot_sizes, snapshot_id=visit["snapshot"] ) @given( new_origin(), new_snapshot(min_size=4, max_size=4), visit_dates(), revisions(min_size=3, max_size=3), ) def test_origin_snapshot_null_branch( client, archive_data, new_origin, new_snapshot, visit_dates, revisions ): snp_dict = new_snapshot.to_dict() archive_data.origin_add([new_origin]) for i, branch in enumerate(snp_dict["branches"].keys()): if i == 0: snp_dict["branches"][branch] = None else: snp_dict["branches"][branch] = { "target_type": "revision", "target": hash_to_bytes(revisions[i - 1]), } archive_data.snapshot_add([Snapshot.from_dict(snp_dict)]) visit = archive_data.origin_visit_add( [OriginVisit(origin=new_origin.url, date=visit_dates[0], type="git",)] )[0] visit_status = OriginVisitStatus( origin=new_origin.url, visit=visit.visit, date=now(), status="partial", snapshot=snp_dict["id"], ) archive_data.origin_visit_status_add([visit_status]) url = reverse( "browse-origin-directory", query_params={"origin_url": new_origin.url} ) check_html_get_response( client, url, status_code=200, template_used="browse/directory.html" ) @given( new_origin(), new_snapshot(min_size=4, max_size=4), visit_dates(), revisions(min_size=4, max_size=4), ) def test_origin_snapshot_invalid_branch( client, archive_data, new_origin, new_snapshot, visit_dates, revisions ): snp_dict = new_snapshot.to_dict() archive_data.origin_add([new_origin]) for i, branch in enumerate(snp_dict["branches"].keys()): snp_dict["branches"][branch] = { "target_type": "revision", "target": hash_to_bytes(revisions[i]), } archive_data.snapshot_add([Snapshot.from_dict(snp_dict)]) visit = archive_data.origin_visit_add( [OriginVisit(origin=new_origin.url, date=visit_dates[0], type="git",)] )[0] visit_status = OriginVisitStatus( origin=new_origin.url, visit=visit.visit, date=now(), status="full", snapshot=snp_dict["id"], ) archive_data.origin_visit_status_add([visit_status]) url = reverse( "browse-origin-directory", query_params={"origin_url": new_origin.url, "branch": "invalid_branch"}, ) check_html_get_response(client, url, status_code=404, template_used="error.html") @given(new_origin()) def test_browse_visits_origin_not_found(client, new_origin): url = reverse("browse-origin-visits", query_params={"origin_url": new_origin.url}) resp = check_html_get_response( client, url, status_code=404, template_used="error.html" ) assert_contains( resp, f"Origin with url {new_origin.url} not found", status_code=404 ) @given(origin()) def test_browse_origin_directory_no_visit(client, mocker, origin): mock_get_origin_visits = mocker.patch( "swh.web.common.origin_visits.get_origin_visits" ) mock_get_origin_visits.return_value = [] mock_archive = mocker.patch("swh.web.common.origin_visits.archive") mock_archive.lookup_origin_visit_latest.return_value = None url = reverse("browse-origin-directory", query_params={"origin_url": origin["url"]}) resp = check_html_get_response( client, url, status_code=404, template_used="error.html" ) assert_contains(resp, "No valid visit", status_code=404) assert not mock_get_origin_visits.called @given(origin()) def test_browse_origin_directory_unknown_visit(client, mocker, origin): mock_get_origin_visits = mocker.patch( "swh.web.common.origin_visits.get_origin_visits" ) mock_get_origin_visits.return_value = [{"visit": 1}] url = reverse( "browse-origin-directory", query_params={"origin_url": origin["url"], "visit_id": 2}, ) resp = check_html_get_response( client, url, status_code=404, template_used="error.html" ) assert re.search("Visit.*not found", resp.content.decode("utf-8")) assert mock_get_origin_visits.called @given(origin()) def test_browse_origin_directory_not_found(client, origin): url = reverse( "browse-origin-directory", query_params={"origin_url": origin["url"], "path": "/invalid/dir/path/"}, ) resp = check_html_get_response( client, url, status_code=404, template_used="browse/directory.html" ) assert re.search("Directory.*not found", resp.content.decode("utf-8")) @given(origin()) def test_browse_origin_content_no_visit(client, mocker, origin): mock_get_origin_visits = mocker.patch( "swh.web.common.origin_visits.get_origin_visits" ) mock_get_origin_visits.return_value = [] mock_archive = mocker.patch("swh.web.common.origin_visits.archive") mock_archive.lookup_origin_visit_latest.return_value = None url = reverse( "browse-origin-content", query_params={"origin_url": origin["url"], "path": "foo"}, ) resp = check_html_get_response( client, url, status_code=404, template_used="error.html" ) assert_contains(resp, "No valid visit", status_code=404) assert not mock_get_origin_visits.called @given(origin()) def test_browse_origin_content_unknown_visit(client, mocker, origin): mock_get_origin_visits = mocker.patch( "swh.web.common.origin_visits.get_origin_visits" ) mock_get_origin_visits.return_value = [{"visit": 1}] url = reverse( "browse-origin-content", query_params={"origin_url": origin["url"], "path": "foo", "visit_id": 2}, ) resp = check_html_get_response( client, url, status_code=404, template_used="error.html" ) assert re.search("Visit.*not found", resp.content.decode("utf-8")) assert mock_get_origin_visits.called @given(origin()) def test_browse_origin_content_directory_empty_snapshot(client, mocker, origin): mock_snapshot_archive = mocker.patch("swh.web.browse.snapshot_context.archive") mock_get_origin_visit_snapshot = mocker.patch( "swh.web.browse.snapshot_context.get_origin_visit_snapshot" ) mock_get_origin_visit_snapshot.return_value = ([], [], {}) mock_snapshot_archive.lookup_origin.return_value = origin mock_snapshot_archive.lookup_snapshot_sizes.return_value = { "alias": 0, "revision": 0, "release": 0, } for browse_context in ("content", "directory"): url = reverse( f"browse-origin-{browse_context}", query_params={"origin_url": origin["url"], "path": "baz"}, ) resp = check_html_get_response( client, url, status_code=200, template_used=f"browse/{browse_context}.html" ) assert re.search("snapshot.*is empty", resp.content.decode("utf-8")) assert mock_get_origin_visit_snapshot.called assert mock_snapshot_archive.lookup_origin.called assert mock_snapshot_archive.lookup_snapshot_sizes.called @given(origin()) def test_browse_origin_content_not_found(client, origin): url = reverse( "browse-origin-content", query_params={"origin_url": origin["url"], "path": "/invalid/file/path"}, ) resp = check_html_get_response( client, url, status_code=404, template_used="browse/content.html" ) assert re.search("Directory entry.*not found", resp.content.decode("utf-8")) @given(origin()) def test_browse_directory_snapshot_not_found(client, mocker, origin): mock_get_snapshot_context = mocker.patch( "swh.web.browse.snapshot_context.get_snapshot_context" ) mock_get_snapshot_context.side_effect = NotFoundExc("Snapshot not found") url = reverse("browse-origin-directory", query_params={"origin_url": origin["url"]}) resp = check_html_get_response( client, url, status_code=404, template_used="error.html" ) assert_contains(resp, "Snapshot not found", status_code=404) assert mock_get_snapshot_context.called @given(origin()) def test_origin_empty_snapshot(client, mocker, origin): mock_archive = mocker.patch("swh.web.browse.snapshot_context.archive") mock_get_origin_visit_snapshot = mocker.patch( "swh.web.browse.snapshot_context.get_origin_visit_snapshot" ) mock_get_origin_visit_snapshot.return_value = ([], [], {}) mock_archive.lookup_snapshot_sizes.return_value = { "alias": 0, "revision": 0, "release": 0, } mock_archive.lookup_origin.return_value = origin url = reverse("browse-origin-directory", query_params={"origin_url": origin["url"]}) resp = check_html_get_response( client, url, status_code=200, template_used="browse/directory.html" ) resp_content = resp.content.decode("utf-8") assert re.search("snapshot.*is empty", resp_content) assert not re.search("swh-tr-link", resp_content) assert mock_get_origin_visit_snapshot.called assert mock_archive.lookup_snapshot_sizes.called @given(new_origin()) def test_origin_empty_snapshot_null_revision(client, archive_data, new_origin): snapshot = Snapshot( branches={ b"HEAD": SnapshotBranch( target="refs/head/master".encode(), target_type=TargetType.ALIAS, ), b"refs/head/master": None, } ) archive_data.origin_add([new_origin]) archive_data.snapshot_add([snapshot]) visit = archive_data.origin_visit_add( [OriginVisit(origin=new_origin.url, date=now(), type="git",)] )[0] visit_status = OriginVisitStatus( origin=new_origin.url, visit=visit.visit, date=now(), status="partial", snapshot=snapshot.id, ) archive_data.origin_visit_status_add([visit_status]) url = reverse( "browse-origin-directory", query_params={"origin_url": new_origin.url}, ) resp = check_html_get_response( client, url, status_code=200, template_used="browse/directory.html" ) resp_content = resp.content.decode("utf-8") assert re.search("snapshot.*is empty", resp_content) assert not re.search("swh-tr-link", resp_content) @given(origin_with_releases()) def test_origin_release_browse(client, archive_data, origin): snapshot = archive_data.snapshot_get_latest(origin["url"]) release = [ b for b in snapshot["branches"].values() if b["target_type"] == "release" ][-1] release_data = archive_data.release_get(release["target"]) revision_data = archive_data.revision_get(release_data["target"]) url = reverse( "browse-origin-directory", query_params={"origin_url": origin["url"], "release": release_data["name"]}, ) resp = check_html_get_response( client, url, status_code=200, template_used="browse/directory.html" ) assert_contains(resp, release_data["name"]) assert_contains(resp, release["target"]) swhid_context = { "origin": origin["url"], "visit": gen_swhid(SNAPSHOT, snapshot["id"]), "anchor": gen_swhid(RELEASE, release_data["id"]), } swh_dir_id = gen_swhid( DIRECTORY, revision_data["directory"], metadata=swhid_context ) swh_dir_id_url = reverse("browse-swhid", url_args={"swhid": swh_dir_id}) assert_contains(resp, swh_dir_id) assert_contains(resp, swh_dir_id_url) @given(origin_with_releases()) def test_origin_release_browse_not_found(client, origin): invalid_release_name = "swh-foo-bar" url = reverse( "browse-origin-directory", query_params={"origin_url": origin["url"], "release": invalid_release_name}, ) resp = check_html_get_response( client, url, status_code=404, template_used="error.html" ) assert re.search( f"Release {invalid_release_name}.*not found", resp.content.decode("utf-8") ) @given(new_origin(), unknown_revision()) def test_origin_browse_directory_branch_with_non_resolvable_revision( client, archive_data, new_origin, unknown_revision ): branch_name = "master" snapshot = Snapshot( branches={ branch_name.encode(): SnapshotBranch( target=hash_to_bytes(unknown_revision), target_type=TargetType.REVISION, ) } ) archive_data.origin_add([new_origin]) archive_data.snapshot_add([snapshot]) visit = archive_data.origin_visit_add( [OriginVisit(origin=new_origin.url, date=now(), type="git",)] )[0] visit_status = OriginVisitStatus( origin=new_origin.url, visit=visit.visit, date=now(), status="partial", snapshot=snapshot.id, ) archive_data.origin_visit_status_add([visit_status]) url = reverse( "browse-origin-directory", query_params={"origin_url": new_origin.url, "branch": branch_name}, ) resp = check_html_get_response( client, url, status_code=200, template_used="browse/directory.html" ) assert_contains( resp, f"Revision {unknown_revision } could not be found in the archive." ) @given(origin()) def test_origin_content_no_path(client, origin): url = reverse("browse-origin-content", query_params={"origin_url": origin["url"]}) resp = check_html_get_response( client, url, status_code=400, template_used="error.html" ) assert_contains( resp, "The path of a content must be given as query parameter.", status_code=400 ) def test_origin_views_no_url_query_parameter(client): for browse_context in ( "content", "directory", "log", "branches", "releases", "visits", ): url = reverse(f"browse-origin-{browse_context}") resp = check_html_get_response( client, url, status_code=400, template_used="error.html" ) assert_contains( resp, "An origin URL must be provided as query parameter.", status_code=400 ) def _origin_content_view_test_helper( client, archive_data, origin_info, origin_visit, snapshot_sizes, origin_branches, origin_releases, root_dir_sha1, content, visit_id=None, timestamp=None, snapshot_id=None, ): content_path = "/".join(content["path"].split("/")[1:]) if not visit_id and not snapshot_id: visit_id = origin_visit["visit"] query_params = {"origin_url": origin_info["url"], "path": content_path} if timestamp: query_params["timestamp"] = timestamp if visit_id: query_params["visit_id"] = visit_id elif snapshot_id: query_params["snapshot"] = snapshot_id url = reverse("browse-origin-content", query_params=query_params) resp = check_html_get_response( client, url, status_code=200, template_used="browse/content.html" ) assert type(content["data"]) == str assert_contains(resp, '' % content["hljs_language"]) assert_contains(resp, escape(content["data"])) split_path = content_path.split("/") filename = split_path[-1] path = content_path.replace(filename, "")[:-1] path_info = gen_path_info(path) del query_params["path"] if timestamp: query_params["timestamp"] = format_utc_iso_date( parse_iso8601_date_to_utc(timestamp).isoformat(), "%Y-%m-%dT%H:%M:%SZ" ) root_dir_url = reverse("browse-origin-directory", query_params=query_params) assert_contains(resp, '
  • ', count=len(path_info) + 1) assert_contains(resp, '%s' % (root_dir_url, root_dir_sha1[:7])) for p in path_info: query_params["path"] = p["path"] dir_url = reverse("browse-origin-directory", query_params=query_params) assert_contains(resp, '%s' % (dir_url, p["name"])) assert_contains(resp, "
  • %s
  • " % filename) query_string = "sha1_git:" + content["sha1_git"] url_raw = reverse( "browse-content-raw", url_args={"query_string": query_string}, query_params={"filename": filename}, ) assert_contains(resp, url_raw) if "path" in query_params: del query_params["path"] origin_branches_url = reverse("browse-origin-branches", query_params=query_params) assert_contains(resp, f'href="{escape(origin_branches_url)}"') assert_contains(resp, f"Branches ({snapshot_sizes['revision']})") origin_releases_url = reverse("browse-origin-releases", query_params=query_params) assert_contains(resp, f'href="{escape(origin_releases_url)}">') assert_contains(resp, f"Releases ({snapshot_sizes['release']})") assert_contains(resp, '
  • ', count=len(origin_branches)) query_params["path"] = content_path for branch in origin_branches: root_dir_branch_url = reverse( "browse-origin-content", query_params={"branch": branch["name"], **query_params}, ) assert_contains(resp, '' % root_dir_branch_url) assert_contains(resp, '
  • ', count=len(origin_releases)) query_params["branch"] = None for release in origin_releases: root_dir_release_url = reverse( "browse-origin-content", query_params={"release": release["name"], **query_params}, ) assert_contains(resp, '' % root_dir_release_url) url = reverse("browse-origin-content", query_params=query_params) resp = check_html_get_response( client, url, status_code=200, template_used="browse/content.html" ) snapshot = archive_data.snapshot_get(origin_visit["snapshot"]) head_rev_id = archive_data.snapshot_get_head(snapshot) swhid_context = { "origin": origin_info["url"], "visit": gen_swhid(SNAPSHOT, snapshot["id"]), "anchor": gen_swhid(REVISION, head_rev_id), "path": f"/{content_path}", } swh_cnt_id = gen_swhid(CONTENT, content["sha1_git"], metadata=swhid_context) swh_cnt_id_url = reverse("browse-swhid", url_args={"swhid": swh_cnt_id}) assert_contains(resp, swh_cnt_id) assert_contains(resp, swh_cnt_id_url) assert_contains(resp, "swh-take-new-snapshot") _check_origin_link(resp, origin_info["url"]) assert_not_contains(resp, "swh-metadata-popover") def _origin_directory_view_test_helper( client, archive_data, origin_info, origin_visit, snapshot_sizes, origin_branches, origin_releases, root_directory_sha1, directory_entries, visit_id=None, timestamp=None, snapshot_id=None, path=None, ): dirs = [e for e in directory_entries if e["type"] in ("dir", "rev")] files = [e for e in directory_entries if e["type"] == "file"] if not visit_id and not snapshot_id: visit_id = origin_visit["visit"] query_params = {"origin_url": origin_info["url"]} if timestamp: query_params["timestamp"] = timestamp elif visit_id: query_params["visit_id"] = visit_id else: query_params["snapshot"] = snapshot_id if path: query_params["path"] = path url = reverse("browse-origin-directory", query_params=query_params) resp = check_html_get_response( client, url, status_code=200, template_used="browse/directory.html" ) assert_contains(resp, '', count=len(dirs)) assert_contains(resp, '', count=len(files)) if timestamp: query_params["timestamp"] = format_utc_iso_date( parse_iso8601_date_to_utc(timestamp).isoformat(), "%Y-%m-%dT%H:%M:%SZ" ) for d in dirs: if d["type"] == "rev": dir_url = reverse("browse-revision", url_args={"sha1_git": d["target"]}) else: dir_path = d["name"] if path: dir_path = "%s/%s" % (path, d["name"]) query_params["path"] = dir_path dir_url = reverse("browse-origin-directory", query_params=query_params,) assert_contains(resp, dir_url) for f in files: file_path = f["name"] if path: file_path = "%s/%s" % (path, f["name"]) query_params["path"] = file_path file_url = reverse("browse-origin-content", query_params=query_params) assert_contains(resp, file_url) if "path" in query_params: del query_params["path"] root_dir_branch_url = reverse("browse-origin-directory", query_params=query_params) nb_bc_paths = 1 if path: nb_bc_paths = len(path.split("/")) + 1 assert_contains(resp, '
  • ', count=nb_bc_paths) assert_contains( resp, '%s' % (root_dir_branch_url, root_directory_sha1[:7]) ) origin_branches_url = reverse("browse-origin-branches", query_params=query_params) assert_contains(resp, f'href="{escape(origin_branches_url)}"') assert_contains(resp, f"Branches ({snapshot_sizes['revision']})") origin_releases_url = reverse("browse-origin-releases", query_params=query_params) nb_releases = len(origin_releases) if nb_releases > 0: assert_contains(resp, f'href="{escape(origin_releases_url)}"') assert_contains(resp, f"Releases ({snapshot_sizes['release']})") if path: query_params["path"] = path assert_contains(resp, '
  • ', count=len(origin_branches)) for branch in origin_branches: query_params["branch"] = branch["name"] root_dir_branch_url = reverse( "browse-origin-directory", query_params=query_params ) assert_contains(resp, '' % root_dir_branch_url) assert_contains(resp, '
  • ', count=len(origin_releases)) query_params["branch"] = None for release in origin_releases: query_params["release"] = release["name"] root_dir_release_url = reverse( "browse-origin-directory", query_params=query_params ) assert_contains(resp, 'href="%s"' % root_dir_release_url) assert_contains(resp, "vault-cook-directory") assert_contains(resp, "vault-cook-revision") snapshot = archive_data.snapshot_get(origin_visit["snapshot"]) head_rev_id = archive_data.snapshot_get_head(snapshot) swhid_context = { "origin": origin_info["url"], "visit": gen_swhid(SNAPSHOT, snapshot["id"]), "anchor": gen_swhid(REVISION, head_rev_id), "path": f"/{path}" if path else None, } swh_dir_id = gen_swhid( DIRECTORY, directory_entries[0]["dir_id"], metadata=swhid_context ) swh_dir_id_url = reverse("browse-swhid", url_args={"swhid": swh_dir_id}) assert_contains(resp, swh_dir_id) assert_contains(resp, swh_dir_id_url) assert_contains(resp, "swh-take-new-snapshot") _check_origin_link(resp, origin_info["url"]) assert_not_contains(resp, "swh-metadata-popover") def _origin_branches_test_helper( client, origin_info, origin_snapshot, snapshot_sizes, snapshot_id=None ): query_params = {"origin_url": origin_info["url"], "snapshot": snapshot_id} url = reverse("browse-origin-branches", query_params=query_params) resp = check_html_get_response( client, url, status_code=200, template_used="browse/branches.html" ) origin_branches = origin_snapshot[0] origin_releases = origin_snapshot[1] origin_branches_url = reverse("browse-origin-branches", query_params=query_params) assert_contains(resp, f'href="{escape(origin_branches_url)}"') assert_contains(resp, f"Branches ({snapshot_sizes['revision']})") origin_releases_url = reverse("browse-origin-releases", query_params=query_params) nb_releases = len(origin_releases) if nb_releases > 0: assert_contains(resp, f'href="{escape(origin_releases_url)}">') assert_contains(resp, f"Releases ({snapshot_sizes['release']})") assert_contains(resp, '' % escape(browse_branch_url)) browse_revision_url = reverse( "browse-revision", url_args={"sha1_git": branch["revision"]}, query_params=query_params, ) assert_contains(resp, '' % escape(browse_revision_url)) _check_origin_link(resp, origin_info["url"]) def _origin_releases_test_helper( client, origin_info, origin_snapshot, snapshot_sizes, snapshot_id=None ): query_params = {"origin_url": origin_info["url"], "snapshot": snapshot_id} url = reverse("browse-origin-releases", query_params=query_params) resp = check_html_get_response( client, url, status_code=200, template_used="browse/releases.html" ) origin_releases = origin_snapshot[1] origin_branches_url = reverse("browse-origin-branches", query_params=query_params) assert_contains(resp, f'href="{escape(origin_branches_url)}"') assert_contains(resp, f"Branches ({snapshot_sizes['revision']})") origin_releases_url = reverse("browse-origin-releases", query_params=query_params) nb_releases = len(origin_releases) if nb_releases > 0: assert_contains(resp, f'href="{escape(origin_releases_url)}"') assert_contains(resp, f"Releases ({snapshot_sizes['release']}") assert_contains(resp, '' % escape(browse_release_url)) assert_contains(resp, '' % escape(browse_revision_url)) _check_origin_link(resp, origin_info["url"]) @given( new_origin(), visit_dates(), revisions(min_size=10, max_size=10), existing_release() ) def test_origin_branches_pagination_with_alias( client, archive_data, mocker, new_origin, visit_dates, revisions, existing_release ): """ When a snapshot contains a branch or a release alias, pagination links in the branches / releases view should be displayed. """ mocker.patch("swh.web.browse.snapshot_context.PER_PAGE", len(revisions) / 2) snp_dict = {"branches": {}, "id": hash_to_bytes(random_sha1())} for i in range(len(revisions)): branch = "".join(random.choices(string.ascii_lowercase, k=8)) snp_dict["branches"][branch.encode()] = { "target_type": "revision", "target": hash_to_bytes(revisions[i]), } release = "".join(random.choices(string.ascii_lowercase, k=8)) snp_dict["branches"][b"RELEASE_ALIAS"] = { "target_type": "alias", "target": release.encode(), } snp_dict["branches"][release.encode()] = { "target_type": "release", "target": hash_to_bytes(existing_release), } archive_data.origin_add([new_origin]) archive_data.snapshot_add([Snapshot.from_dict(snp_dict)]) visit = archive_data.origin_visit_add( [OriginVisit(origin=new_origin.url, date=visit_dates[0], type="git",)] )[0] visit_status = OriginVisitStatus( origin=new_origin.url, visit=visit.visit, date=now(), status="full", snapshot=snp_dict["id"], ) archive_data.origin_visit_status_add([visit_status]) url = reverse("browse-origin-branches", query_params={"origin_url": new_origin.url}) resp = check_html_get_response( client, url, status_code=200, template_used="browse/branches.html" ) assert_contains(resp, '
      ",}, }, }, ) _TEST_CTAGS_INDEXER_CONFIG = merge_configs( _TEST_INDEXER_BASE_CONFIG, { "workdir": "/tmp/swh/indexer.ctags", "languages": {"c": "c"}, "tools": { "name": "universal-ctags", "version": "~git7859817b", "configuration": { "command_line": """ctags --fields=+lnz --sort=no --links=no """ """--output-format=json """ }, }, }, ) # Lightweight git repositories that will be loaded to generate # input data for tests _TEST_ORIGINS = [ { "type": "git", "url": "https://github.com/memononen/libtess2", "archives": ["libtess2.zip"], }, { "type": "git", "url": "https://github.com/wcoder/highlightjs-line-numbers.js", "archives": [ "highlightjs-line-numbers.js.zip", "highlightjs-line-numbers.js_visit2.zip", ], "metadata": {"description": "Line numbering plugin for Highlight.js",}, }, { "type": "git", "url": "repo_with_submodules", "archives": ["repo_with_submodules.tgz"], "metadata": { "description": "This is just a sample repository with submodules", }, }, ] _contents = {} def _add_extra_contents(storage, contents): pbm_image_data = b"""P1 # PBM example 24 7 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0 1 1 1 0 0 0 1 1 1 0 0 0 1 1 1 0 0 0 1 1 1 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0""" # add file with mimetype image/x-portable-bitmap in the archive content pbm_content = Content.from_data(pbm_image_data) storage.content_add([pbm_content]) contents.add(pbm_content.sha1) INDEXER_TOOL = { "tool_name": "swh-web tests", "tool_version": "1.0", "tool_configuration": {}, } ORIGIN_METADATA_KEY = "vcs" ORIGIN_METADATA_VALUE = "git" ORIGIN_MASTER_REVISION = {} +def _add_origin(storage, search, origin_url, visit_type="git", snapshot_branches={}): + storage.origin_add([Origin(url=origin_url)]) + search.origin_update( + [{"url": origin_url, "has_visits": True, "visit_types": [visit_type]}] + ) + date = now() + visit = OriginVisit(origin=origin_url, date=date, type=visit_type) + visit = storage.origin_visit_add([visit])[0] + snapshot = Snapshot.from_dict({"branches": snapshot_branches}) + storage.snapshot_add([snapshot]) + visit_status = OriginVisitStatus( + origin=origin_url, + visit=visit.visit, + date=date + timedelta(minutes=1), + type=visit.type, + status="full", + snapshot=snapshot.id, + ) + storage.origin_visit_status_add([visit_status]) + + # Tests data initialization def _init_tests_data(): # To hold reference to the memory storage storage = get_storage("memory") # Create search instance search = get_search("memory") search.initialize() search.origin_update({"url": origin["url"]} for origin in _TEST_ORIGINS) # Create indexer storage instance that will be shared by indexers idx_storage = get_indexer_storage("memory") # Declare a test tool for origin intrinsic metadata tests idx_tool = idx_storage.indexer_configuration_add([INDEXER_TOOL])[0] INDEXER_TOOL["id"] = idx_tool["id"] # Load git repositories from archives for origin in _TEST_ORIGINS: for i, archive_ in enumerate(origin["archives"]): if i > 0: # ensure visit dates will be different when simulating # multiple visits of an origin time.sleep(1) origin_repo_archive = os.path.join( os.path.dirname(__file__), "resources/repos/%s" % archive_ ) loader = GitLoaderFromArchive( storage, origin["url"], archive_path=origin_repo_archive, ) result = loader.load() assert result["status"] == "eventful" ori = storage.origin_get([origin["url"]])[0] origin.update(ori.to_dict()) # add an 'id' key if enabled search.origin_update( [{"url": origin["url"], "has_visits": True, "visit_types": ["git"]}] ) for i in range(250): - url = "https://many.origins/%d" % (i + 1) - # storage.origin_add([{'url': url}]) - storage.origin_add([Origin(url=url)]) - search.origin_update([{"url": url, "has_visits": True, "visit_types": ["tar"]}]) - date = now() - visit = OriginVisit(origin=url, date=date, type="tar") - visit = storage.origin_visit_add([visit])[0] - visit_status = OriginVisitStatus( - origin=url, - visit=visit.visit, - date=date + timedelta(minutes=1), - type=visit.type, - status="full", - snapshot=hash_to_bytes("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e"), + _add_origin( + storage, search, origin_url=f"https://many.origins/{i+1}", visit_type="tar" ) - storage.origin_visit_status_add([visit_status]) sha1s: Set[Sha1] = set() directories = set() revisions = set() releases = set() snapshots = set() content_path = {} # Get all objects loaded into the test archive common_metadata = {ORIGIN_METADATA_KEY: ORIGIN_METADATA_VALUE} for origin in _TEST_ORIGINS: snp = snapshot_get_latest(storage, origin["url"]) snapshots.add(hash_to_hex(snp.id)) for branch_name, branch_data in snp.branches.items(): target_type = branch_data.target_type.value if target_type == "revision": revisions.add(branch_data.target) if b"master" in branch_name: # Add some origin intrinsic metadata for tests metadata = common_metadata metadata.update(origin.get("metadata", {})) origin_metadata = OriginIntrinsicMetadataRow( id=origin["url"], from_revision=branch_data.target, indexer_configuration_id=idx_tool["id"], metadata=metadata, mappings=[], ) idx_storage.origin_intrinsic_metadata_add([origin_metadata]) search.origin_update( [{"url": origin["url"], "intrinsic_metadata": metadata}] ) ORIGIN_MASTER_REVISION[origin["url"]] = hash_to_hex( branch_data.target ) elif target_type == "release": release = storage.release_get([branch_data.target])[0] revisions.add(release.target) releases.add(hash_to_hex(branch_data.target)) for rev_log in storage.revision_shortlog(set(revisions)): rev_id = rev_log[0] revisions.add(rev_id) for rev in storage.revision_get(revisions): if rev is None: continue dir_id = rev.directory directories.add(hash_to_hex(dir_id)) for entry in dir_iterator(storage, dir_id): if entry["type"] == "file": sha1s.add(entry["sha1"]) content_path[entry["sha1"]] = "/".join( [hash_to_hex(dir_id), entry["path"].decode("utf-8")] ) elif entry["type"] == "dir": directories.add(hash_to_hex(entry["target"])) _add_extra_contents(storage, sha1s) # Get all checksums for each content result: List[Optional[Content]] = storage.content_get(list(sha1s)) contents: List[Dict] = [] for content in result: assert content is not None sha1 = hash_to_hex(content.sha1) content_metadata = { algo: hash_to_hex(getattr(content, algo)) for algo in DEFAULT_ALGORITHMS } path = "" if content.sha1 in content_path: path = content_path[content.sha1] cnt_data = storage.content_get_data(content.sha1) assert cnt_data is not None mimetype, encoding = get_mimetype_and_encoding_for_content(cnt_data) _, _, cnt_data = _re_encode_content(mimetype, encoding, cnt_data) content_display_data = prepare_content_for_display(cnt_data, mimetype, path) content_metadata.update( { "path": path, "mimetype": mimetype, "encoding": encoding, "hljs_language": content_display_data["language"], "data": content_display_data["content_data"], } ) _contents[sha1] = content_metadata contents.append(content_metadata) # Add the empty directory to the test archive storage.directory_add([Directory(entries=())]) # Add empty content to the test archive storage.content_add([Content.from_data(data=b"")]) + # Add fake git origin with pull request branches + _add_origin( + storage, + search, + origin_url="https://git.example.org/project", + snapshot_branches={ + b"refs/heads/master": { + "target_type": "revision", + "target": next(iter(revisions)), + }, + **{ + f"refs/pull/{i}".encode(): { + "target_type": "revision", + "target": next(iter(revisions)), + } + for i in range(300) + }, + }, + ) + # Return tests data return { "search": search, "storage": storage, "idx_storage": idx_storage, "origins": _TEST_ORIGINS, "contents": contents, "directories": list(directories), "releases": list(releases), "revisions": list(map(hash_to_hex, revisions)), "snapshots": list(snapshots), "generated_checksums": set(), } def _init_indexers(tests_data): # Instantiate content indexers that will be used in tests # and force them to use the memory storages indexers = {} for idx_name, idx_class, idx_config in ( ("mimetype_indexer", MimetypeIndexer, _TEST_MIMETYPE_INDEXER_CONFIG), ("license_indexer", FossologyLicenseIndexer, _TEST_LICENSE_INDEXER_CONFIG), ("ctags_indexer", CtagsIndexer, _TEST_CTAGS_INDEXER_CONFIG), ): idx = idx_class(config=idx_config) idx.storage = tests_data["storage"] idx.objstorage = tests_data["storage"].objstorage idx.idx_storage = tests_data["idx_storage"] idx.register_tools(idx.config["tools"]) indexers[idx_name] = idx return indexers def get_content(content_sha1): return _contents.get(content_sha1) _tests_data = None _current_tests_data = None _indexer_loggers = {} def get_tests_data(reset=False): """ Initialize tests data and return them in a dict. """ global _tests_data, _current_tests_data if _tests_data is None: _tests_data = _init_tests_data() indexers = _init_indexers(_tests_data) for (name, idx) in indexers.items(): # pytest makes the loggers use a temporary file; and deepcopy # requires serializability. So we remove them, and add them # back after the copy. _indexer_loggers[name] = idx.log del idx.log _tests_data.update(indexers) if reset or _current_tests_data is None: _current_tests_data = deepcopy(_tests_data) for (name, logger) in _indexer_loggers.items(): _current_tests_data[name].log = logger return _current_tests_data def override_storages(storage, idx_storage, search): """ Helper function to replace the storages from which archive data are fetched. """ swh_config = config.get_config() swh_config.update( {"storage": storage, "indexer_storage": idx_storage, "search": search,} ) archive.storage = storage archive.idx_storage = idx_storage archive.search = search diff --git a/swh/web/tests/strategies.py b/swh/web/tests/strategies.py index eaeced9a..a4e53e33 100644 --- a/swh/web/tests/strategies.py +++ b/swh/web/tests/strategies.py @@ -1,617 +1,633 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict from datetime import datetime import random from hypothesis import assume, settings from hypothesis.extra.dateutil import timezones from hypothesis.strategies import ( binary, characters, composite, datetimes, just, lists, sampled_from, text, ) from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex from swh.model.hypothesis_strategies import origins as new_origin_strategy from swh.model.hypothesis_strategies import snapshots as new_snapshot from swh.model.model import ( Content, Directory, Person, Revision, RevisionType, TimestampWithTimezone, ) from swh.storage.algos.revisions_walker import get_revisions_walker from swh.storage.algos.snapshot import snapshot_get_latest from swh.web.common.utils import browsers_supported_image_mimes from swh.web.tests.data import get_tests_data # Module dedicated to the generation of input data for tests through # the use of hypothesis. # Some of these data are sampled from a test archive created and populated # in the swh.web.tests.data module. # Set the swh-web hypothesis profile if none has been explicitly set hypothesis_default_settings = settings.get_profile("default") if repr(settings()) == repr(hypothesis_default_settings): settings.load_profile("swh-web") # The following strategies exploit the hypothesis capabilities def _filter_checksum(cs): generated_checksums = get_tests_data()["generated_checksums"] if not int.from_bytes(cs, byteorder="little") or cs in generated_checksums: return False generated_checksums.add(cs) return True def _known_swh_object(object_type): return sampled_from(get_tests_data()[object_type]) def sha1(): """ Hypothesis strategy returning a valid hexadecimal sha1 value. """ return binary(min_size=20, max_size=20).filter(_filter_checksum).map(hash_to_hex) def invalid_sha1(): """ Hypothesis strategy returning an invalid sha1 representation. """ return binary(min_size=50, max_size=50).filter(_filter_checksum).map(hash_to_hex) def sha256(): """ Hypothesis strategy returning a valid hexadecimal sha256 value. """ return binary(min_size=32, max_size=32).filter(_filter_checksum).map(hash_to_hex) def content(): """ Hypothesis strategy returning a random content ingested into the test archive. """ return _known_swh_object("contents") def contents(): """ Hypothesis strategy returning random contents ingested into the test archive. """ return lists(content(), min_size=2, max_size=8) def empty_content(): """ Hypothesis strategy returning the empty content ingested into the test archive. """ empty_content = Content.from_data(data=b"").to_dict() for algo in DEFAULT_ALGORITHMS: empty_content[algo] = hash_to_hex(empty_content[algo]) return just(empty_content) def content_text(): """ Hypothesis strategy returning random textual contents ingested into the test archive. """ return content().filter(lambda c: c["mimetype"].startswith("text/")) def content_text_non_utf8(): """ Hypothesis strategy returning random textual contents not encoded to UTF-8 ingested into the test archive. """ return content().filter( lambda c: c["mimetype"].startswith("text/") and c["encoding"] not in ("utf-8", "us-ascii") ) def content_text_no_highlight(): """ Hypothesis strategy returning random textual contents with no detected programming language to highlight ingested into the test archive. """ return content().filter( lambda c: c["mimetype"].startswith("text/") and c["hljs_language"] == "nohighlight" ) def content_image_type(): """ Hypothesis strategy returning random image contents ingested into the test archive. """ return content().filter(lambda c: c["mimetype"] in browsers_supported_image_mimes) def content_unsupported_image_type_rendering(): """ Hypothesis strategy returning random image contents ingested into the test archive that can not be rendered by browsers. """ return content().filter( lambda c: c["mimetype"].startswith("image/") and c["mimetype"] not in browsers_supported_image_mimes ) def content_utf8_detected_as_binary(): """ Hypothesis strategy returning random textual contents detected as binary by libmagic while they are valid UTF-8 encoded files. """ def utf8_binary_detected(content): if content["encoding"] != "binary": return False try: content["data"].decode("utf-8") except Exception: return False else: return True return content().filter(utf8_binary_detected) @composite def new_content(draw): blake2s256_hex = draw(sha256()) sha1_hex = draw(sha1()) sha1_git_hex = draw(sha1()) sha256_hex = draw(sha256()) assume(sha1_hex != sha1_git_hex) assume(blake2s256_hex != sha256_hex) return { "blake2S256": blake2s256_hex, "sha1": sha1_hex, "sha1_git": sha1_git_hex, "sha256": sha256_hex, } def unknown_content(): """ Hypothesis strategy returning a random content not ingested into the test archive. """ return new_content().filter( lambda c: get_tests_data()["storage"].content_get_data(hash_to_bytes(c["sha1"])) is None ) def unknown_contents(): """ Hypothesis strategy returning random contents not ingested into the test archive. """ return lists(unknown_content(), min_size=2, max_size=8) def directory(): """ Hypothesis strategy returning a random directory ingested into the test archive. """ return _known_swh_object("directories") def directory_with_subdirs(): """ Hypothesis strategy returning a random directory containing sub directories ingested into the test archive. """ return directory().filter( lambda d: any( [ e["type"] == "dir" for e in list( get_tests_data()["storage"].directory_ls(hash_to_bytes(d)) ) ] ) ) def empty_directory(): """ Hypothesis strategy returning the empty directory ingested into the test archive. """ return just(Directory(entries=()).id.hex()) def unknown_directory(): """ Hypothesis strategy returning a random directory not ingested into the test archive. """ return sha1().filter( lambda s: len( list(get_tests_data()["storage"].directory_missing([hash_to_bytes(s)])) ) > 0 ) def origin(): """ Hypothesis strategy returning a random origin ingested into the test archive. """ return _known_swh_object("origins") def origin_with_multiple_visits(): """ Hypothesis strategy returning a random origin ingested into the test archive. """ ret = [] tests_data = get_tests_data() storage = tests_data["storage"] for origin in tests_data["origins"]: visit_page = storage.origin_visit_get(origin["url"]) if len(visit_page.results) > 1: ret.append(origin) return sampled_from(ret) def origin_with_releases(): """ Hypothesis strategy returning a random origin ingested into the test archive. """ ret = [] tests_data = get_tests_data() for origin in tests_data["origins"]: snapshot = snapshot_get_latest(tests_data["storage"], origin["url"]) if any([b.target_type.value == "release" for b in snapshot.branches.values()]): ret.append(origin) return sampled_from(ret) +def origin_with_pull_request_branches(): + """ + Hypothesis strategy returning a random origin with pull request branches + ingested into the test archive. + """ + ret = [] + tests_data = get_tests_data() + storage = tests_data["storage"] + origins = storage.origin_list(limit=1000) + for origin in origins.results: + snapshot = snapshot_get_latest(storage, origin.url) + if any([b"refs/pull/" in b for b in snapshot.branches]): + ret.append(origin) + return sampled_from(ret) + + def new_origin(): """ Hypothesis strategy returning a random origin not ingested into the test archive. """ return new_origin_strategy().filter( lambda origin: get_tests_data()["storage"].origin_get([origin.url])[0] is None ) def new_origins(nb_origins=None): """ Hypothesis strategy returning random origins not ingested into the test archive. """ min_size = nb_origins if nb_origins is not None else 2 max_size = nb_origins if nb_origins is not None else 8 size = random.randint(min_size, max_size) return lists( new_origin(), min_size=size, max_size=size, unique_by=lambda o: tuple(sorted(o.items())), ) def visit_dates(nb_dates=None): """ Hypothesis strategy returning a list of visit dates. """ min_size = nb_dates if nb_dates else 2 max_size = nb_dates if nb_dates else 8 return lists( datetimes( min_value=datetime(2015, 1, 1, 0, 0), max_value=datetime(2018, 12, 31, 0, 0), timezones=timezones(), ), min_size=min_size, max_size=max_size, unique=True, ).map(sorted) def release(): """ Hypothesis strategy returning a random release ingested into the test archive. """ return _known_swh_object("releases") def releases(min_size=2, max_size=8): """ Hypothesis strategy returning random releases ingested into the test archive. """ return lists(release(), min_size=min_size, max_size=max_size) def unknown_release(): """ Hypothesis strategy returning a random revision not ingested into the test archive. """ return sha1().filter( lambda s: get_tests_data()["storage"].release_get([s])[0] is None ) def revision(): """ Hypothesis strategy returning a random revision ingested into the test archive. """ return _known_swh_object("revisions") def unknown_revision(): """ Hypothesis strategy returning a random revision not ingested into the test archive. """ return sha1().filter( lambda s: get_tests_data()["storage"].revision_get([hash_to_bytes(s)])[0] is None ) @composite def new_person(draw): """ Hypothesis strategy returning random raw swh person data. """ name = draw( text( min_size=5, max_size=30, alphabet=characters(min_codepoint=0, max_codepoint=255), ) ) email = "%s@company.org" % name return Person( name=name.encode(), email=email.encode(), fullname=("%s <%s>" % (name, email)).encode(), ) @composite def new_swh_date(draw): """ Hypothesis strategy returning random raw swh date data. """ timestamp = draw( datetimes( min_value=datetime(2015, 1, 1, 0, 0), max_value=datetime(2018, 12, 31, 0, 0) ).map(lambda d: int(d.timestamp())) ) return { "timestamp": timestamp, "offset": 0, "negative_utc": False, } @composite def new_revision(draw): """ Hypothesis strategy returning random raw swh revision data not ingested into the test archive. """ return Revision( directory=draw(sha1().map(hash_to_bytes)), author=draw(new_person()), committer=draw(new_person()), message=draw(text(min_size=20, max_size=100).map(lambda t: t.encode())), date=TimestampWithTimezone.from_datetime(draw(new_swh_date())), committer_date=TimestampWithTimezone.from_datetime(draw(new_swh_date())), synthetic=False, type=RevisionType.GIT, ) def revisions(min_size=2, max_size=8): """ Hypothesis strategy returning random revisions ingested into the test archive. """ return lists(revision(), min_size=min_size, max_size=max_size) def unknown_revisions(min_size=2, max_size=8): """ Hypothesis strategy returning random revisions not ingested into the test archive. """ return lists(unknown_revision(), min_size=min_size, max_size=max_size) def snapshot(): """ Hypothesis strategy returning a random snapshot ingested into the test archive. """ return _known_swh_object("snapshots") def new_snapshots(nb_snapshots=None): min_size = nb_snapshots if nb_snapshots else 2 max_size = nb_snapshots if nb_snapshots else 8 return lists( new_snapshot(min_size=2, max_size=10, only_objects=True), min_size=min_size, max_size=max_size, ) def unknown_snapshot(): """ Hypothesis strategy returning a random revision not ingested into the test archive. """ return sha1().filter( lambda s: get_tests_data()["storage"].snapshot_get_branches(hash_to_bytes(s)) is None ) def _get_origin_dfs_revisions_walker(): tests_data = get_tests_data() storage = tests_data["storage"] origin = random.choice(tests_data["origins"][:-1]) snapshot = snapshot_get_latest(storage, origin["url"]) if snapshot.branches[b"HEAD"].target_type.value == "alias": target = snapshot.branches[b"HEAD"].target head = snapshot.branches[target].target else: head = snapshot.branches[b"HEAD"].target return get_revisions_walker("dfs", storage, head) def ancestor_revisions(): """ Hypothesis strategy returning a pair of revisions ingested into the test archive with an ancestor relation. """ # get a dfs revisions walker for one of the origins # loaded into the test archive revisions_walker = _get_origin_dfs_revisions_walker() master_revisions = [] children = defaultdict(list) init_rev_found = False # get revisions only authored in the master branch for rev in revisions_walker: for rev_p in rev["parents"]: children[rev_p].append(rev["id"]) if not init_rev_found: master_revisions.append(rev) if not rev["parents"]: init_rev_found = True # head revision root_rev = master_revisions[0] # pick a random revision, different from head, only authored # in the master branch ancestor_rev_idx = random.choice(list(range(1, len(master_revisions) - 1))) ancestor_rev = master_revisions[ancestor_rev_idx] ancestor_child_revs = children[ancestor_rev["id"]] return just( { "sha1_git_root": hash_to_hex(root_rev["id"]), "sha1_git": hash_to_hex(ancestor_rev["id"]), "children": [hash_to_hex(r) for r in ancestor_child_revs], } ) def non_ancestor_revisions(): """ Hypothesis strategy returning a pair of revisions ingested into the test archive with no ancestor relation. """ # get a dfs revisions walker for one of the origins # loaded into the test archive revisions_walker = _get_origin_dfs_revisions_walker() merge_revs = [] children = defaultdict(list) # get all merge revisions for rev in revisions_walker: if len(rev["parents"]) > 1: merge_revs.append(rev) for rev_p in rev["parents"]: children[rev_p].append(rev["id"]) # find a merge revisions whose parents have a unique child revision random.shuffle(merge_revs) selected_revs = None for merge_rev in merge_revs: if all(len(children[rev_p]) == 1 for rev_p in merge_rev["parents"]): selected_revs = merge_rev["parents"] return just( { "sha1_git_root": hash_to_hex(selected_revs[0]), "sha1_git": hash_to_hex(selected_revs[1]), } ) # The following strategies returns data specific to some tests # that can not be generated and thus are hardcoded. def contents_with_ctags(): """ Hypothesis strategy returning contents ingested into the test archive. Those contents are ctags compatible, that is running ctags on those lay results. """ return just( { "sha1s": [ "0ab37c02043ebff946c1937523f60aadd0844351", "15554cf7608dde6bfefac7e3d525596343a85b6f", "2ce837f1489bdfb8faf3ebcc7e72421b5bea83bd", "30acd0b47fc25e159e27a980102ddb1c4bea0b95", "4f81f05aaea3efb981f9d90144f746d6b682285b", "5153aa4b6e4455a62525bc4de38ed0ff6e7dd682", "59d08bafa6a749110dfb65ba43a61963d5a5bf9f", "7568285b2d7f31ae483ae71617bd3db873deaa2c", "7ed3ee8e94ac52ba983dd7690bdc9ab7618247b4", "8ed7ef2e7ff9ed845e10259d08e4145f1b3b5b03", "9b3557f1ab4111c8607a4f2ea3c1e53c6992916c", "9c20da07ed14dc4fcd3ca2b055af99b2598d8bdd", "c20ceebd6ec6f7a19b5c3aebc512a12fbdc9234b", "e89e55a12def4cd54d5bff58378a3b5119878eb7", "e8c0654fe2d75ecd7e0b01bee8a8fc60a130097e", "eb6595e559a1d34a2b41e8d4835e0e4f98a5d2b5", ], "symbol_name": "ABS", } ) def revision_with_submodules(): """ Hypothesis strategy returning a revision that is known to point to a directory with revision entries (aka git submodule) """ return just( { "rev_sha1_git": "ffcb69001f3f6745dfd5b48f72ab6addb560e234", "rev_dir_sha1_git": "d92a21446387fa28410e5a74379c934298f39ae2", "rev_dir_rev_path": "libtess2", } )