diff --git a/requirements-swh.txt b/requirements-swh.txt index c58ed6e..ee16f4e 100644 --- a/requirements-swh.txt +++ b/requirements-swh.txt @@ -1,4 +1,4 @@ swh.core >= 0.0.75 -swh.model >= 0.0.57 +swh.model >= 0.0.60 swh.scheduler swh.storage >= 0.0.163 diff --git a/swh/loader/package/cran/loader.py b/swh/loader/package/cran/loader.py index 1a8ca77..10eb4e2 100644 --- a/swh/loader/package/cran/loader.py +++ b/swh/loader/package/cran/loader.py @@ -1,194 +1,194 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import dateutil.parser import datetime import os import logging import re from datetime import timezone from os import path from typing import Any, Generator, Dict, List, Mapping, Optional, Tuple from debian.deb822 import Deb822 from swh.loader.package.loader import PackageLoader from swh.loader.package.utils import ( - release_name, parse_author, swh_author, artifact_identity + release_name, artifact_identity ) from swh.model.model import ( - TimestampWithTimezone, Sha1Git, Revision, RevisionType, + Person, TimestampWithTimezone, Sha1Git, Revision, RevisionType, ) logger = logging.getLogger(__name__) DATE_PATTERN = re.compile(r'^(?P\d{4})-(?P\d{2})$') class CRANLoader(PackageLoader): visit_type = 'cran' def __init__(self, url: str, artifacts: List[Dict]): """Loader constructor. Args: url: Origin url to retrieve cran artifact(s) from artifacts: List of associated artifact for the origin url """ super().__init__(url=url) # explicit what we consider the artifact identity self.id_keys = ['url', 'version'] self.artifacts = artifacts def get_versions(self) -> List[str]: versions = [] for artifact in self.artifacts: versions.append(artifact['version']) return versions def get_default_version(self) -> str: return self.artifacts[-1]['version'] def get_package_info(self, version: str) -> Generator[ Tuple[str, Dict[str, Any]], None, None]: for a_metadata in self.artifacts: url = a_metadata['url'] package_version = a_metadata['version'] if version == package_version: p_info = { 'url': url, 'filename': path.basename(url), 'raw': a_metadata, } yield release_name(version), p_info def resolve_revision_from( self, known_artifacts: Mapping[bytes, Mapping], artifact_metadata: Mapping[str, Any]) \ -> Optional[bytes]: """Given known_artifacts per revision, try to determine the revision for artifact_metadata """ new_identity = artifact_identity(artifact_metadata, self.id_keys) for rev_id, known_artifact_meta in known_artifacts.items(): logging.debug('known_artifact_meta: %s', known_artifact_meta) known_artifact = known_artifact_meta['extrinsic']['raw'] known_identity = artifact_identity(known_artifact, self.id_keys) if new_identity == known_identity: return rev_id return None def build_revision( self, a_metadata: Mapping[str, Any], uncompressed_path: str, directory: Sha1Git) -> Optional[Revision]: # a_metadata is empty metadata = extract_intrinsic_metadata(uncompressed_path) date = parse_date(metadata.get('Date')) - author = swh_author(parse_author(metadata.get('Maintainer', {}))) + author = Person.from_fullname(metadata.get('Maintainer', '').encode()) version = metadata.get('Version', a_metadata['version']) return Revision( message=version.encode('utf-8'), type=RevisionType.TAR, date=date, author=author, committer=author, committer_date=date, parents=[], directory=directory, synthetic=True, metadata={ 'intrinsic': { 'tool': 'DESCRIPTION', 'raw': metadata, }, 'extrinsic': { 'provider': self.url, 'when': self.visit_date.isoformat(), 'raw': a_metadata, }, }, ) def parse_debian_control(filepath: str) -> Dict[str, Any]: """Parse debian control at filepath""" metadata: Dict = {} logger.debug('Debian control file %s', filepath) for paragraph in Deb822.iter_paragraphs(open(filepath, 'rb')): logger.debug('paragraph: %s', paragraph) metadata.update(**paragraph) logger.debug('metadata parsed: %s', metadata) return metadata def extract_intrinsic_metadata(dir_path: str) -> Dict[str, Any]: """Given an uncompressed path holding the DESCRIPTION file, returns a DESCRIPTION parsed structure as a dict. Cran origins describes their intrinsic metadata within a DESCRIPTION file at the root tree of a tarball. This DESCRIPTION uses a simple file format called DCF, the Debian control format. The release artifact contains at their root one folder. For example: $ tar tvf zprint-0.0.6.tar.gz drwxr-xr-x root/root 0 2018-08-22 11:01 zprint-0.0.6/ ... Args: dir_path (str): Path to the uncompressed directory representing a release artifact from pypi. Returns: the DESCRIPTION parsed structure as a dict (or empty dict if missing) """ # Retrieve the root folder of the archive if not os.path.exists(dir_path): return {} lst = os.listdir(dir_path) if len(lst) != 1: return {} project_dirname = lst[0] description_path = os.path.join(dir_path, project_dirname, 'DESCRIPTION') if not os.path.exists(description_path): return {} return parse_debian_control(description_path) def parse_date(date: Optional[str]) -> Optional[TimestampWithTimezone]: """Parse a date into a datetime """ assert not date or isinstance(date, str) dt: Optional[datetime.datetime] = None if not date: return None try: specific_date = DATE_PATTERN.match(date) if specific_date: year = int(specific_date.group('year')) month = int(specific_date.group('month')) dt = datetime.datetime(year, month, 1) else: dt = dateutil.parser.parse(date) if not dt.tzinfo: # up for discussion the timezone needs to be set or # normalize_timestamp is not happy: ValueError: normalize_timestamp # received datetime without timezone: 2001-06-08 00:00:00 dt = dt.replace(tzinfo=timezone.utc) except Exception as e: logger.warning('Fail to parse date %s. Reason: %s', (date, e)) if dt: return TimestampWithTimezone.from_datetime(dt) else: return None diff --git a/swh/loader/package/npm/loader.py b/swh/loader/package/npm/loader.py index 901256a..4a1cf88 100644 --- a/swh/loader/package/npm/loader.py +++ b/swh/loader/package/npm/loader.py @@ -1,287 +1,286 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json import logging import os from codecs import BOM_UTF8 from typing import Any, Dict, Generator, Mapping, Sequence, Tuple, Optional import attr import chardet from urllib.parse import quote from swh.model.model import ( Person, RevisionType, Revision, TimestampWithTimezone, Sha1Git, ) from swh.loader.package.loader import PackageLoader from swh.loader.package.utils import ( - api_info, release_name, parse_author, swh_author + api_info, release_name ) logger = logging.getLogger(__name__) class NpmLoader(PackageLoader): """Load npm origin's artifact releases into swh archive. """ visit_type = 'npm' def __init__(self, url: str): """Constructor Args str: origin url (e.g. https://www.npmjs.com/package/) """ super().__init__(url=url) package_name = url.split('https://www.npmjs.com/package/')[1] safe_name = quote(package_name, safe='') self.provider_url = f'https://replicate.npmjs.com/{safe_name}/' self._info: Dict[str, Any] = {} self._versions = None @property def info(self) -> Dict[str, Any]: """Return the project metadata information (fetched from npm registry) """ if not self._info: self._info = api_info(self.provider_url) return self._info def get_versions(self) -> Sequence[str]: return sorted(list(self.info['versions'].keys())) def get_default_version(self) -> str: return self.info['dist-tags'].get('latest', '') def get_package_info(self, version: str) -> Generator[ Tuple[str, Mapping[str, Any]], None, None]: meta = self.info['versions'][version] url = meta['dist']['tarball'] p_info = { 'url': url, 'filename': os.path.basename(url), 'raw': meta, } yield release_name(version), p_info def resolve_revision_from( self, known_artifacts: Dict, artifact_metadata: Dict) \ -> Optional[bytes]: return artifact_to_revision_id(known_artifacts, artifact_metadata) def build_revision( self, a_metadata: Dict, uncompressed_path: str, directory: Sha1Git) -> Optional[Revision]: i_metadata = extract_intrinsic_metadata(uncompressed_path) if not i_metadata: return None # from intrinsic metadata author = extract_npm_package_author(i_metadata) message = i_metadata['version'].encode('ascii') # from extrinsic metadata # No date available in intrinsic metadata: retrieve it from the API # metadata, using the version number that the API claims this package # has. extrinsic_version = a_metadata['version'] if 'time' in self.info: date = self.info['time'][extrinsic_version] elif 'mtime' in a_metadata: date = a_metadata['mtime'] else: artifact_name = os.path.basename(a_metadata['dist']['tarball']) raise ValueError( 'Origin %s: Cannot determine upload time for artifact %s.' % (self.url, artifact_name) ) date = TimestampWithTimezone.from_iso8601(date) # FIXME: this is to remain bug-compatible with earlier versions: date = attr.evolve(date, timestamp=attr.evolve( date.timestamp, microseconds=0)) r = Revision( type=RevisionType.TAR, message=message, author=author, date=date, committer=author, committer_date=date, parents=[], directory=directory, synthetic=True, metadata={ 'intrinsic': { 'tool': 'package.json', 'raw': i_metadata, }, 'extrinsic': { 'provider': self.provider_url, 'when': self.visit_date.isoformat(), 'raw': a_metadata, }, }, ) return r def artifact_to_revision_id( known_artifacts: Dict, artifact_metadata: Dict) -> Optional[bytes]: """Given metadata artifact, solves the associated revision id. The following code allows to deal with 2 metadata formats: - old format sample:: { 'package_source': { 'sha1': '05181c12cd8c22035dd31155656826b85745da37', } } - new format sample:: { 'original_artifact': [{ 'checksums': { 'sha256': '6975816f2c5ad4046acc676ba112f2fff945b01522d63948531f11f11e0892ec', # noqa ... }, }], ... } """ shasum = artifact_metadata['dist']['shasum'] for rev_id, known_artifact in known_artifacts.items(): known_original_artifact = known_artifact.get('original_artifact') if not known_original_artifact: # previous loader-npm version kept original artifact elsewhere known_original_artifact = known_artifact.get('package_source') if not known_original_artifact: continue original_hash = known_original_artifact['sha1'] else: assert isinstance(known_original_artifact, list) original_hash = known_original_artifact[0]['checksums']['sha1'] if shasum == original_hash: return rev_id return None def extract_npm_package_author(package_json) -> Person: """ Extract package author from a ``package.json`` file content and return it in swh format. Args: package_json (dict): Dict holding the content of parsed ``package.json`` file Returns: Person """ def _author_str(author_data): if type(author_data) is dict: author_str = '' if 'name' in author_data: author_str += author_data['name'] if 'email' in author_data: author_str += ' <%s>' % author_data['email'] return author_str elif type(author_data) is list: return _author_str(author_data[0]) if len(author_data) > 0 else '' else: return author_data - author_data: Dict = {} for author_key in ('author', 'authors'): if author_key in package_json: author_str = _author_str(package_json[author_key]) - author_data = parse_author(author_str) + return Person.from_fullname(author_str.encode()) - return swh_author(author_data) + return Person(fullname=b'', name=None, email=None) def _lstrip_bom(s, bom=BOM_UTF8): if s.startswith(bom): return s[len(bom):] else: return s def load_json(json_bytes): """ Try to load JSON from bytes and return a dictionary. First try to decode from utf-8. If the decoding failed, try to detect the encoding and decode again with replace error handling. If JSON is malformed, an empty dictionary will be returned. Args: json_bytes (bytes): binary content of a JSON file Returns: dict: JSON data loaded in a dictionary """ json_data = {} try: json_str = _lstrip_bom(json_bytes).decode('utf-8') except UnicodeDecodeError: encoding = chardet.detect(json_bytes)['encoding'] if encoding: json_str = json_bytes.decode(encoding, 'replace') try: json_data = json.loads(json_str) except json.decoder.JSONDecodeError: pass return json_data def extract_intrinsic_metadata(dir_path: str) -> Dict: """Given an uncompressed path holding the pkginfo file, returns a pkginfo parsed structure as a dict. The release artifact contains at their root one folder. For example: $ tar tvf zprint-0.0.6.tar.gz drwxr-xr-x root/root 0 2018-08-22 11:01 zprint-0.0.6/ ... Args: dir_path (str): Path to the uncompressed directory representing a release artifact from npm. Returns: the pkginfo parsed structure as a dict if any or None if none was present. """ # Retrieve the root folder of the archive if not os.path.exists(dir_path): return {} lst = os.listdir(dir_path) if len(lst) == 0: return {} project_dirname = lst[0] package_json_path = os.path.join(dir_path, project_dirname, 'package.json') if not os.path.exists(package_json_path): return {} with open(package_json_path, 'rb') as package_json_file: package_json_bytes = package_json_file.read() return load_json(package_json_bytes) diff --git a/swh/loader/package/npm/tests/test_npm.py b/swh/loader/package/npm/tests/test_npm.py index 0348630..be6dd05 100644 --- a/swh/loader/package/npm/tests/test_npm.py +++ b/swh/loader/package/npm/tests/test_npm.py @@ -1,591 +1,591 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json import os import pytest from swh.model.hashutil import hash_to_bytes from swh.model.model import Person from swh.loader.package.npm.loader import ( NpmLoader, extract_npm_package_author, artifact_to_revision_id ) from swh.loader.package.tests.common import ( check_snapshot, check_metadata_paths, get_stats ) def test_extract_npm_package_author(datadir): package_metadata_filepath = os.path.join( datadir, 'https_replicate.npmjs.com', 'org_visit1') with open(package_metadata_filepath) as json_file: package_metadata = json.load(json_file) extract_npm_package_author(package_metadata['versions']['0.0.2']) == \ Person( fullname=b'mooz ', name=b'mooz', email=b'stillpedant@gmail.com' ) assert ( extract_npm_package_author(package_metadata['versions']['0.0.3']) == Person( fullname=b'Masafumi Oyamada ', name=b'Masafumi Oyamada', email=b'stillpedant@gmail.com' ) ) package_json = json.loads(''' { "name": "highlightjs-line-numbers.js", "version": "2.7.0", "description": "Highlight.js line numbers plugin.", "main": "src/highlightjs-line-numbers.js", "dependencies": {}, "devDependencies": { "gulp": "^4.0.0", "gulp-rename": "^1.4.0", "gulp-replace": "^0.6.1", "gulp-uglify": "^1.2.0" }, "repository": { "type": "git", "url": "https://github.com/wcoder/highlightjs-line-numbers.js.git" }, "author": "Yauheni Pakala ", "license": "MIT", "bugs": { "url": "https://github.com/wcoder/highlightjs-line-numbers.js/issues" }, "homepage": "http://wcoder.github.io/highlightjs-line-numbers.js/" }''') # noqa assert extract_npm_package_author(package_json) == \ Person( fullname=b'Yauheni Pakala ', name=b'Yauheni Pakala', email=b'evgeniy.pakalo@gmail.com' ) package_json = json.loads(''' { "name": "3-way-diff", "version": "0.0.1", "description": "3-way diffing of JavaScript objects", "main": "index.js", "authors": [ { "name": "Shawn Walsh", "url": "https://github.com/shawnpwalsh" }, { "name": "Markham F Rollins IV", "url": "https://github.com/mrollinsiv" } ], "keywords": [ "3-way diff", "3 way diff", "three-way diff", "three way diff" ], "devDependencies": { "babel-core": "^6.20.0", "babel-preset-es2015": "^6.18.0", "mocha": "^3.0.2" }, "dependencies": { "lodash": "^4.15.0" } }''') assert extract_npm_package_author(package_json) == \ Person( fullname=b'Shawn Walsh', name=b'Shawn Walsh', email=None ) package_json = json.loads(''' { "name": "yfe-ynpm", "version": "1.0.0", "homepage": "http://gitlab.ywwl.com/yfe/yfe-ynpm", "repository": { "type": "git", "url": "git@gitlab.ywwl.com:yfe/yfe-ynpm.git" }, "author": [ "fengmk2 (https://fengmk2.com)", "xufuzi (https://7993.org)" ], "license": "MIT" }''') assert extract_npm_package_author(package_json) == \ Person( - fullname=b'fengmk2 ', + fullname=b'fengmk2 (https://fengmk2.com)', name=b'fengmk2', - email=b'fengmk2@gmail.com' + email=b'fengmk2@gmail.com', ) package_json = json.loads(''' { "name": "umi-plugin-whale", "version": "0.0.8", "description": "Internal contract component", "authors": { "name": "xiaohuoni", "email": "448627663@qq.com" }, "repository": "alitajs/whale", "devDependencies": { "np": "^3.0.4", "umi-tools": "*" }, "license": "MIT" }''') assert extract_npm_package_author(package_json) == \ Person( fullname=b'xiaohuoni <448627663@qq.com>', name=b'xiaohuoni', email=b'448627663@qq.com' ) def normalize_hashes(hashes): if isinstance(hashes, str): return hash_to_bytes(hashes) if isinstance(hashes, list): return [hash_to_bytes(x) for x in hashes] return {hash_to_bytes(k): hash_to_bytes(v) for k, v in hashes.items()} _expected_new_contents_first_visit = normalize_hashes([ '4ce3058e16ab3d7e077f65aabf855c34895bf17c', '858c3ceee84c8311adc808f8cdb30d233ddc9d18', '0fa33b4f5a4e0496da6843a38ff1af8b61541996', '85a410f8ef8eb8920f2c384a9555566ad4a2e21b', '9163ac8025923d5a45aaac482262893955c9b37b', '692cf623b8dd2c5df2c2998fd95ae4ec99882fb4', '18c03aac6d3e910efb20039c15d70ab5e0297101', '41265c42446aac17ca769e67d1704f99e5a1394d', '783ff33f5882813dca9239452c4a7cadd4dba778', 'b029cfb85107aee4590c2434a3329bfcf36f8fa1', '112d1900b4c2e3e9351050d1b542c9744f9793f3', '5439bbc4bd9a996f1a38244e6892b71850bc98fd', 'd83097a2f994b503185adf4e719d154123150159', 'd0939b4898e83090ee55fd9d8a60e312cfadfbaf', 'b3523a26f7147e4af40d9d462adaae6d49eda13e', 'cd065fb435d6fb204a8871bcd623d0d0e673088c', '2854a40855ad839a54f4b08f5cff0cf52fca4399', 'b8a53bbaac34ebb8c6169d11a4b9f13b05c583fe', '0f73d56e1cf480bded8a1ecf20ec6fc53c574713', '0d9882b2dfafdce31f4e77fe307d41a44a74cefe', '585fc5caab9ead178a327d3660d35851db713df1', 'e8cd41a48d79101977e3036a87aeb1aac730686f', '5414efaef33cceb9f3c9eb5c4cc1682cd62d14f7', '9c3cc2763bf9e9e37067d3607302c4776502df98', '3649a68410e354c83cd4a38b66bd314de4c8f5c9', 'e96ed0c091de1ebdf587104eaf63400d1974a1fe', '078ca03d2f99e4e6eab16f7b75fbb7afb699c86c', '38de737da99514de6559ff163c988198bc91367a', ]) _expected_new_directories_first_visit = normalize_hashes([ '3370d20d6f96dc1c9e50f083e2134881db110f4f', '42753c0c2ab00c4501b552ac4671c68f3cf5aece', 'd7895533ef5edbcffdea3f057d9fef3a1ef845ce', '80579be563e2ef3e385226fe7a3f079b377f142c', '3b0ddc6a9e58b4b53c222da4e27b280b6cda591c', 'bcad03ce58ac136f26f000990fc9064e559fe1c0', '5fc7e82a1bc72e074665c6078c6d3fad2f13d7ca', 'e3cd26beba9b1e02f6762ef54bd9ac80cc5f25fd', '584b5b4b6cf7f038095e820b99386a9c232de931', '184c8d6d0d242f2b1792ef9d3bf396a5434b7f7a', 'bb5f4ee143c970367eb409f2e4c1104898048b9d', '1b95491047add1103db0dfdfa84a9735dcb11e88', 'a00c6de13471a2d66e64aca140ddb21ef5521e62', '5ce6c1cd5cda2d546db513aaad8c72a44c7771e2', 'c337091e349b6ac10d38a49cdf8c2401ef9bb0f2', '202fafcd7c0f8230e89d5496ad7f44ab12b807bf', '775cc516543be86c15c1dc172f49c0d4e6e78235', 'ff3d1ead85a14f891e8b3fa3a89de39db1b8de2e', ]) _expected_new_revisions_first_visit = normalize_hashes({ 'd8a1c7474d2956ac598a19f0f27d52f7015f117e': '42753c0c2ab00c4501b552ac4671c68f3cf5aece', '5f9eb78af37ffd12949f235e86fac04898f9f72a': '3370d20d6f96dc1c9e50f083e2134881db110f4f', 'ba019b192bdb94bd0b5bd68b3a5f92b5acc2239a': 'd7895533ef5edbcffdea3f057d9fef3a1ef845ce'} ) def package_url(package): return 'https://www.npmjs.com/package/%s' % package def package_metadata_url(package): return 'https://replicate.npmjs.com/%s/' % package def test_revision_metadata_structure(swh_config, requests_mock_datadir): package = 'org' loader = NpmLoader(package_url(package)) actual_load_status = loader.load() assert actual_load_status['status'] == 'eventful' assert actual_load_status['snapshot_id'] is not None expected_revision_id = hash_to_bytes( 'd8a1c7474d2956ac598a19f0f27d52f7015f117e') revision = list(loader.storage.revision_get([expected_revision_id]))[0] assert revision is not None check_metadata_paths(revision['metadata'], paths=[ ('intrinsic.tool', str), ('intrinsic.raw', dict), ('extrinsic.provider', str), ('extrinsic.when', str), ('extrinsic.raw', dict), ('original_artifact', list), ]) for original_artifact in revision['metadata']['original_artifact']: check_metadata_paths(original_artifact, paths=[ ('filename', str), ('length', int), ('checksums', dict), ]) def test_npm_loader_first_visit(swh_config, requests_mock_datadir): package = 'org' loader = NpmLoader(package_url(package)) actual_load_status = loader.load() expected_snapshot_id = 'd0587e1195aed5a8800411a008f2f2d627f18e2d' assert actual_load_status == { 'status': 'eventful', 'snapshot_id': expected_snapshot_id } stats = get_stats(loader.storage) assert { 'content': len(_expected_new_contents_first_visit), 'directory': len(_expected_new_directories_first_visit), 'origin': 1, 'origin_visit': 1, 'person': 2, 'release': 0, 'revision': len(_expected_new_revisions_first_visit), 'skipped_content': 0, 'snapshot': 1, } == stats assert len(list(loader.storage.content_get( _expected_new_contents_first_visit))) == len( _expected_new_contents_first_visit) assert list(loader.storage.directory_missing( _expected_new_directories_first_visit)) == [] assert list(loader.storage.revision_missing( _expected_new_revisions_first_visit)) == [] expected_snapshot = { 'id': expected_snapshot_id, 'branches': { 'HEAD': { 'target': 'releases/0.0.4', 'target_type': 'alias' }, 'releases/0.0.2': { 'target': 'd8a1c7474d2956ac598a19f0f27d52f7015f117e', 'target_type': 'revision' }, 'releases/0.0.3': { 'target': '5f9eb78af37ffd12949f235e86fac04898f9f72a', 'target_type': 'revision' }, 'releases/0.0.4': { 'target': 'ba019b192bdb94bd0b5bd68b3a5f92b5acc2239a', 'target_type': 'revision' } } } check_snapshot(expected_snapshot, loader.storage) def test_npm_loader_incremental_visit( swh_config, requests_mock_datadir_visits): package = 'org' url = package_url(package) loader = NpmLoader(url) actual_load_status = loader.load() assert actual_load_status['status'] == 'eventful' assert actual_load_status['status'] is not None origin_visit = list(loader.storage.origin_visit_get(url))[-1] assert origin_visit['status'] == 'full' assert origin_visit['type'] == 'npm' stats = get_stats(loader.storage) assert { 'content': len(_expected_new_contents_first_visit), 'directory': len(_expected_new_directories_first_visit), 'origin': 1, 'origin_visit': 1, 'person': 2, 'release': 0, 'revision': len(_expected_new_revisions_first_visit), 'skipped_content': 0, 'snapshot': 1, } == stats loader._info = None # reset loader internal state actual_load_status2 = loader.load() assert actual_load_status2['status'] == 'eventful' snap_id2 = actual_load_status2['snapshot_id'] assert snap_id2 is not None assert snap_id2 != actual_load_status['snapshot_id'] origin_visit2 = list(loader.storage.origin_visit_get(url))[-1] assert origin_visit2['status'] == 'full' assert origin_visit2['type'] == 'npm' stats = get_stats(loader.storage) assert { # 3 new releases artifacts 'content': len(_expected_new_contents_first_visit) + 14, 'directory': len(_expected_new_directories_first_visit) + 15, 'origin': 1, 'origin_visit': 2, 'person': 2, 'release': 0, 'revision': len(_expected_new_revisions_first_visit) + 3, 'skipped_content': 0, 'snapshot': 2, } == stats urls = [ m.url for m in requests_mock_datadir_visits.request_history if m.url.startswith('https://registry.npmjs.org') ] assert len(urls) == len(set(urls)) # we visited each artifact once across @pytest.mark.usefixtures('requests_mock_datadir') def test_npm_loader_version_divergence(swh_config): package = '@aller_shared' url = package_url(package) loader = NpmLoader(url) actual_load_status = loader.load() assert actual_load_status['status'] == 'eventful' assert actual_load_status['status'] is not None origin_visit = list(loader.storage.origin_visit_get(url))[-1] assert origin_visit['status'] == 'full' assert origin_visit['type'] == 'npm' stats = get_stats(loader.storage) assert { # 1 new releases artifacts 'content': 534, 'directory': 153, 'origin': 1, 'origin_visit': 1, 'person': 1, 'release': 0, 'revision': 2, 'skipped_content': 0, 'snapshot': 1, } == stats expected_snapshot = { 'id': 'b11ebac8c9d0c9e5063a2df693a18e3aba4b2f92', 'branches': { 'HEAD': { 'target_type': 'alias', 'target': 'releases/0.1.0' }, 'releases/0.1.0': { 'target_type': 'revision', 'target': '845673bfe8cbd31b1eaf757745a964137e6f9116', }, 'releases/0.1.1-alpha.14': { 'target_type': 'revision', 'target': '05181c12cd8c22035dd31155656826b85745da37', }, }, } check_snapshot(expected_snapshot, loader.storage) def test_npm_artifact_to_revision_id_none(): """Current loader version should stop soon if nothing can be found """ artifact_metadata = { 'dist': { 'shasum': '05181c12cd8c22035dd31155656826b85745da37', }, } known_artifacts = { 'b11ebac8c9d0c9e5063a2df693a18e3aba4b2f92': {}, } assert artifact_to_revision_id(known_artifacts, artifact_metadata) is None def test_npm_artifact_to_revision_id_old_loader_version(): """Current loader version should solve old metadata scheme """ artifact_metadata = { 'dist': { 'shasum': '05181c12cd8c22035dd31155656826b85745da37', } } known_artifacts = { hash_to_bytes('b11ebac8c9d0c9e5063a2df693a18e3aba4b2f92'): { 'package_source': { 'sha1': "something-wrong" } }, hash_to_bytes('845673bfe8cbd31b1eaf757745a964137e6f9116'): { 'package_source': { 'sha1': '05181c12cd8c22035dd31155656826b85745da37', } } } assert artifact_to_revision_id(known_artifacts, artifact_metadata) \ == hash_to_bytes('845673bfe8cbd31b1eaf757745a964137e6f9116') def test_npm_artifact_to_revision_id_current_loader_version(): """Current loader version should be able to solve current metadata scheme """ artifact_metadata = { 'dist': { 'shasum': '05181c12cd8c22035dd31155656826b85745da37', } } known_artifacts = { hash_to_bytes('b11ebac8c9d0c9e5063a2df693a18e3aba4b2f92'): { 'original_artifact': [{ 'checksums': { 'sha1': "05181c12cd8c22035dd31155656826b85745da37" }, }], }, hash_to_bytes('845673bfe8cbd31b1eaf757745a964137e6f9116'): { 'original_artifact': [{ 'checksums': { 'sha1': 'something-wrong' }, }], }, } assert artifact_to_revision_id(known_artifacts, artifact_metadata) \ == hash_to_bytes('b11ebac8c9d0c9e5063a2df693a18e3aba4b2f92') def test_npm_artifact_with_no_intrinsic_metadata( swh_config, requests_mock_datadir): """Skip artifact with no intrinsic metadata during ingestion """ package = 'nativescript-telerik-analytics' url = package_url(package) loader = NpmLoader(url) actual_load_status = loader.load() assert actual_load_status['status'] == 'eventful' # no branch as one artifact without any intrinsic metadata expected_snapshot = { 'id': '1a8893e6a86f444e8be8e7bda6cb34fb1735a00e', 'branches': {}, } check_snapshot(expected_snapshot, loader.storage) origin_visit = list(loader.storage.origin_visit_get(url))[-1] assert origin_visit['status'] == 'full' assert origin_visit['type'] == 'npm' def test_npm_artifact_with_no_upload_time(swh_config, requests_mock_datadir): """With no time upload, artifact is skipped """ package = 'jammit-no-time' url = package_url(package) loader = NpmLoader(url) actual_load_status = loader.load() assert actual_load_status['status'] == 'failed' # no branch as one artifact without any intrinsic metadata expected_snapshot = { 'id': '1a8893e6a86f444e8be8e7bda6cb34fb1735a00e', 'branches': {}, } check_snapshot(expected_snapshot, loader.storage) origin_visit = list(loader.storage.origin_visit_get(url))[-1] assert origin_visit['status'] == 'partial' assert origin_visit['type'] == 'npm' def test_npm_artifact_use_mtime_if_no_time(swh_config, requests_mock_datadir): """With no time upload, artifact is skipped """ package = 'jammit-express' url = package_url(package) loader = NpmLoader(url) actual_load_status = loader.load() assert actual_load_status['status'] == 'eventful' # artifact is used expected_snapshot = { 'id': 'd6e08e19159f77983242877c373c75222d5ae9dd', 'branches': { 'HEAD': { 'target_type': 'alias', 'target': 'releases/0.0.1' }, 'releases/0.0.1': { 'target_type': 'revision', 'target': '9e4dd2b40d1b46b70917c0949aa2195c823a648e', } } } check_snapshot(expected_snapshot, loader.storage) origin_visit = list(loader.storage.origin_visit_get(url))[-1] assert origin_visit['status'] == 'full' assert origin_visit['type'] == 'npm' diff --git a/swh/loader/package/tests/test_utils.py b/swh/loader/package/tests/test_utils.py index 9f40bcb..f2a888f 100644 --- a/swh/loader/package/tests/test_utils.py +++ b/swh/loader/package/tests/test_utils.py @@ -1,322 +1,180 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import pytest import swh.loader.package from swh.loader.package.utils import ( - download, api_info, release_name, parse_author, artifact_identity + download, api_info, release_name, artifact_identity ) def test_version_generation(): assert swh.loader.package.__version__ != 'devel', \ "Make sure swh.loader.core is installed (e.g. pip install -e .)" @pytest.mark.fs def test_download_fail_to_download(tmp_path, requests_mock): url = 'https://pypi.org/pypi/arrow/json' status_code = 404 requests_mock.get(url, status_code=status_code) with pytest.raises(ValueError) as e: download(url, tmp_path) assert e.value.args[0] == "Fail to query '%s'. Reason: %s" % ( url, status_code) @pytest.mark.fs def test_download_ok(tmp_path, requests_mock): """Download without issue should provide filename and hashes""" filename = 'requests-0.0.1.tar.gz' url = 'https://pypi.org/pypi/requests/%s' % filename data = 'this is something' requests_mock.get(url, text=data, headers={ 'content-length': str(len(data)) }) actual_filepath, actual_hashes = download(url, dest=str(tmp_path)) actual_filename = os.path.basename(actual_filepath) assert actual_filename == filename assert actual_hashes['length'] == len(data) assert actual_hashes['checksums']['sha1'] == 'fdd1ce606a904b08c816ba84f3125f2af44d92b2' # noqa assert (actual_hashes['checksums']['sha256'] == '1d9224378d77925d612c9f926eb9fb92850e6551def8328011b6a972323298d5') @pytest.mark.fs def test_download_ok_no_header(tmp_path, requests_mock): """Download without issue should provide filename and hashes""" filename = 'requests-0.0.1.tar.gz' url = 'https://pypi.org/pypi/requests/%s' % filename data = 'this is something' requests_mock.get(url, text=data) # no header information actual_filepath, actual_hashes = download(url, dest=str(tmp_path)) actual_filename = os.path.basename(actual_filepath) assert actual_filename == filename assert actual_hashes['length'] == len(data) assert actual_hashes['checksums']['sha1'] == 'fdd1ce606a904b08c816ba84f3125f2af44d92b2' # noqa assert (actual_hashes['checksums']['sha256'] == '1d9224378d77925d612c9f926eb9fb92850e6551def8328011b6a972323298d5') @pytest.mark.fs def test_download_ok_with_hashes(tmp_path, requests_mock): """Download without issue should provide filename and hashes""" filename = 'requests-0.0.1.tar.gz' url = 'https://pypi.org/pypi/requests/%s' % filename data = 'this is something' requests_mock.get(url, text=data, headers={ 'content-length': str(len(data)) }) # good hashes for such file good = { 'sha1': 'fdd1ce606a904b08c816ba84f3125f2af44d92b2', 'sha256': '1d9224378d77925d612c9f926eb9fb92850e6551def8328011b6a972323298d5', # noqa } actual_filepath, actual_hashes = download(url, dest=str(tmp_path), hashes=good) actual_filename = os.path.basename(actual_filepath) assert actual_filename == filename assert actual_hashes['length'] == len(data) assert actual_hashes['checksums']['sha1'] == good['sha1'] assert actual_hashes['checksums']['sha256'] == good['sha256'] @pytest.mark.fs def test_download_fail_hashes_mismatch(tmp_path, requests_mock): """Mismatch hash after download should raise """ filename = 'requests-0.0.1.tar.gz' url = 'https://pypi.org/pypi/requests/%s' % filename data = 'this is something' requests_mock.get(url, text=data, headers={ 'content-length': str(len(data)) }) # good hashes for such file good = { 'sha1': 'fdd1ce606a904b08c816ba84f3125f2af44d92b2', 'sha256': '1d9224378d77925d612c9f926eb9fb92850e6551def8328011b6a972323298d5', # noqa } for hash_algo in good.keys(): wrong_hash = good[hash_algo].replace('1', '0') expected_hashes = good.copy() expected_hashes[hash_algo] = wrong_hash # set the wrong hash expected_msg = ("Failure when fetching %s. " "Checksum mismatched: %s != %s" % ( url, wrong_hash, good[hash_algo] )) with pytest.raises(ValueError, match=expected_msg): download(url, dest=str(tmp_path), hashes=expected_hashes) def test_api_info_failure(requests_mock): """Failure to fetch info/release information should raise""" url = 'https://pypi.org/pypi/requests/json' status_code = 400 requests_mock.get(url, status_code=status_code) with pytest.raises(ValueError) as e0: api_info(url) assert e0.value.args[0] == "Fail to query '%s'. Reason: %s" % ( url, status_code ) def test_api_info(requests_mock): """Fetching json info from pypi project should be ok""" url = 'https://pypi.org/pypi/requests/json' requests_mock.get(url, text='{"version": "0.0.1"}') actual_info = api_info(url) assert actual_info == { 'version': '0.0.1', } def test_release_name(): for version, filename, expected_release in [ ('0.0.1', None, 'releases/0.0.1'), ('0.0.2', 'something', 'releases/0.0.2/something')]: assert release_name(version, filename) == expected_release -def _parse_author_string_test(author_str, expected_result): - assert parse_author(author_str) == expected_result - assert parse_author(' %s' % author_str) == expected_result - assert parse_author('%s ' % author_str) == expected_result - - -def test_parse_author(): - _parse_author_string_test( - 'John Doe', - { - 'name': 'John Doe' - } - ) - - _parse_author_string_test( - '', - { - 'email': 'john.doe@foo.bar' - } - ) - - _parse_author_string_test( - '(https://john.doe)', - { - 'url': 'https://john.doe' - } - ) - - _parse_author_string_test( - 'John Doe ', - { - 'name': 'John Doe', - 'email': 'john.doe@foo.bar' - } - ) - - _parse_author_string_test( - 'John Doe', - { - 'name': 'John Doe', - 'email': 'john.doe@foo.bar' - } - ) - - _parse_author_string_test( - 'John Doe (https://john.doe)', - { - 'name': 'John Doe', - 'url': 'https://john.doe' - } - ) - - _parse_author_string_test( - 'John Doe(https://john.doe)', - { - 'name': 'John Doe', - 'url': 'https://john.doe' - } - ) - - _parse_author_string_test( - ' (https://john.doe)', - { - 'email': 'john.doe@foo.bar', - 'url': 'https://john.doe' - } - ) - - _parse_author_string_test( - '(https://john.doe) ', - { - 'email': 'john.doe@foo.bar', - 'url': 'https://john.doe' - } - ) - - _parse_author_string_test( - 'John Doe (https://john.doe)', - { - 'name': 'John Doe', - 'email': 'john.doe@foo.bar', - 'url': 'https://john.doe' - } - ) - - _parse_author_string_test( - 'John Doe (https://john.doe) ', - { - 'name': 'John Doe', - 'email': 'john.doe@foo.bar', - 'url': 'https://john.doe' - } - ) - - _parse_author_string_test( - 'John Doe (https://john.doe)', - { - 'name': 'John Doe', - 'email': 'john.doe@foo.bar', - 'url': 'https://john.doe' - } - ) - - _parse_author_string_test( - 'John Doe(https://john.doe)', - { - 'name': 'John Doe', - 'email': 'john.doe@foo.bar', - 'url': 'https://john.doe' - } - ) - - _parse_author_string_test('', {}) - _parse_author_string_test('<>', {}) - _parse_author_string_test(' <>', {}) - _parse_author_string_test('<>()', {}) - _parse_author_string_test('<> ()', {}) - _parse_author_string_test('()', {}) - _parse_author_string_test(' ()', {}) - - _parse_author_string_test( - 'John Doe <> ()', - { - 'name': 'John Doe' - } - ) - - _parse_author_string_test( - 'John Doe <>', - { - 'name': 'John Doe' - } - ) - - _parse_author_string_test( - 'John Doe ()', - { - 'name': 'John Doe' - } - ) - - def test_artifact_identity(): """Compute primary key should return the right identity """ data = { 'a': 1, 'b': 2, 'length': 221837, 'filename': '8sync-0.1.0.tar.gz', 'version': '0.1.0', } for id_keys, expected_id in [ (['a', 'b'], [1, 2]), ([], []), (['a', 'key-that-does-not-exist'], [1, None]) ]: actual_id = artifact_identity(data, id_keys=id_keys) assert actual_id == expected_id diff --git a/swh/loader/package/utils.py b/swh/loader/package/utils.py index a93ee49..7408cfd 100644 --- a/swh/loader/package/utils.py +++ b/swh/loader/package/utils.py @@ -1,219 +1,138 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import logging import os import requests -import re from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple from swh.model.hashutil import MultiHash, HASH_BLOCK_SIZE from swh.model.model import Person from swh.loader.package import DEFAULT_PARAMS logger = logging.getLogger(__name__) DOWNLOAD_HASHES = set(['sha1', 'sha256', 'length']) -# https://github.com/jonschlinkert/author-regex -_author_regexp = r'([^<(]+?)?[ \t]*(?:<([^>(]+?)>)?[ \t]*(?:\(([^)]+?)\)|$)' - - EMPTY_AUTHOR = Person( fullname=b'', name=None, email=None, ) def api_info(url: str) -> Dict: """Basic api client to retrieve information on project. This deals with fetching json metadata about pypi projects. Args: url (str): The api url (e.g PyPI, npm, etc...) Raises: ValueError in case of query failures (for some reasons: 404, ...) Returns: The associated response's information dict """ response = requests.get(url, **DEFAULT_PARAMS) if response.status_code != 200: raise ValueError("Fail to query '%s'. Reason: %s" % ( url, response.status_code)) return response.json() def download(url: str, dest: str, hashes: Dict = {}, filename: Optional[str] = None, auth: Optional[Tuple[str, str]] = None) -> Tuple[str, Dict]: """Download a remote tarball from url, uncompresses and computes swh hashes on it. Args: url: Artifact uri to fetch, uncompress and hash dest: Directory to write the archive to hashes: Dict of expected hashes (key is the hash algo) for the artifact to download (those hashes are expected to be hex string) auth: Optional tuple of login/password (for http authentication service, e.g. deposit) Raises: ValueError in case of any error when fetching/computing (length, checksums mismatched...) Returns: Tuple of local (filepath, hashes of filepath) """ params = copy.deepcopy(DEFAULT_PARAMS) if auth is not None: params['auth'] = auth response = requests.get(url, **params, stream=True) if response.status_code != 200: raise ValueError("Fail to query '%s'. Reason: %s" % ( url, response.status_code)) filename = filename if filename else os.path.basename(url) logger.debug('filename: %s', filename) filepath = os.path.join(dest, filename) logger.debug('filepath: %s', filepath) h = MultiHash(hash_names=DOWNLOAD_HASHES) with open(filepath, 'wb') as f: for chunk in response.iter_content(chunk_size=HASH_BLOCK_SIZE): h.update(chunk) f.write(chunk) # Also check the expected hashes if provided if hashes: actual_hashes = h.hexdigest() for algo_hash in hashes.keys(): actual_digest = actual_hashes[algo_hash] expected_digest = hashes[algo_hash] if actual_digest != expected_digest: raise ValueError( 'Failure when fetching %s. ' 'Checksum mismatched: %s != %s' % ( url, expected_digest, actual_digest)) computed_hashes = h.hexdigest() length = computed_hashes.pop('length') extrinsic_metadata = { 'length': length, 'filename': filename, 'checksums': computed_hashes, } logger.debug('extrinsic_metadata', extrinsic_metadata) return filepath, extrinsic_metadata def release_name(version: str, filename: Optional[str] = None) -> str: if filename: return 'releases/%s/%s' % (version, filename) return 'releases/%s' % version -def parse_author(author_str: str) -> Dict[str, str]: - """ - Parse npm package author string. - - It works with a flexible range of formats, as detailed below:: - - name - name (url) - name (url) - name (url) - name(url) - name (url) - name (url) - name(url) - name(url) - name (url) - name(url) - name - name - (url) - (url) - (url) - (url) - - (url) - - Args: - author_str (str): input author string - - Returns: - dict: A dict that may contain the following keys: - * name - * email - * url - - """ - author = {} - matches = re.findall(_author_regexp, - author_str.replace('<>', '').replace('()', ''), - re.M) - for match in matches: - if match[0].strip(): - author['name'] = match[0].strip() - if match[1].strip(): - author['email'] = match[1].strip() - if match[2].strip(): - author['url'] = match[2].strip() - return author - - -def swh_author(author: Dict[str, str]) -> Person: - """Transform an author like dict to an expected swh like dict (values are - bytes) - - """ - name = author.get('name') - email = author.get('email') - - fullname = None - - if name and email: - fullname = '%s <%s>' % (name, email) - elif name: - fullname = name - - if not fullname: - r = EMPTY_AUTHOR - else: - r = Person( - fullname=fullname.encode('utf-8') if fullname else b'', - name=name.encode('utf-8') if name else None, - email=email.encode('utf-8') if email else None - ) - return r - - def artifact_identity(d: Mapping[str, Any], id_keys: Sequence[str]) -> List[Any]: """Compute the primary key for a dict using the id_keys as primary key composite. Args: d: A dict entry to compute the primary key on id_keys: Sequence of keys to use as primary key Returns: The identity for that dict entry """ return [d.get(k) for k in id_keys]