Page Menu
Home
Software Heritage
Search
Configure Global Search
Log In
Files
F9337493
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
101 KB
Subscribers
None
View Options
diff --git a/swh/indexer/ctags.py b/swh/indexer/ctags.py
index b29e4c7..1e32bf3 100644
--- a/swh/indexer/ctags.py
+++ b/swh/indexer/ctags.py
@@ -1,151 +1,154 @@
-# Copyright (C) 2015-2017 The Software Heritage developers
+# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import subprocess
import json
+from typing import Dict, List
+
from swh.model import hashutil
from .indexer import ContentIndexer, write_to_temp
# Options used to compute tags
__FLAGS = [
'--fields=+lnz', # +l: language
# +n: line number of tag definition
# +z: include the symbol's kind (function, variable, ...)
'--sort=no', # sort output on tag name
'--links=no', # do not follow symlinks
'--output-format=json', # outputs in json
]
def compute_language(content, log=None):
raise NotImplementedError(
'Language detection was unreliable, so it is currently disabled. '
'See https://forge.softwareheritage.org/D1455')
def run_ctags(path, lang=None, ctags_command='ctags'):
"""Run ctags on file path with optional language.
Args:
path: path to the file
lang: language for that path (optional)
Yields:
dict: ctags' output
"""
optional = []
if lang:
optional = ['--language-force=%s' % lang]
cmd = [ctags_command] + __FLAGS + optional + [path]
output = subprocess.check_output(cmd, universal_newlines=True)
for symbol in output.split('\n'):
if not symbol:
continue
js_symbol = json.loads(symbol)
yield {
'name': js_symbol['name'],
'kind': js_symbol['kind'],
'line': js_symbol['line'],
'lang': js_symbol['language'],
}
class CtagsIndexer(ContentIndexer):
CONFIG_BASE_FILENAME = 'indexer/ctags'
ADDITIONAL_CONFIG = {
'workdir': ('str', '/tmp/swh/indexer.ctags'),
'tools': ('dict', {
'name': 'universal-ctags',
'version': '~git7859817b',
'configuration': {
'command_line': '''ctags --fields=+lnz --sort=no --links=no '''
'''--output-format=json <filepath>'''
},
}),
'languages': ('dict', {
'ada': 'Ada',
'adl': None,
'agda': None,
# ...
})
}
def prepare(self):
super().prepare()
self.working_directory = self.config['workdir']
self.language_map = self.config['languages']
def filter(self, ids):
"""Filter out known sha1s and return only missing ones.
"""
yield from self.idx_storage.content_ctags_missing((
{
'id': sha1,
'indexer_configuration_id': self.tool['id'],
} for sha1 in ids
))
def index(self, id, data):
"""Index sha1s' content and store result.
Args:
id (bytes): content's identifier
data (bytes): raw content in bytes
Returns:
dict: a dict representing a content_mimetype with keys:
- **id** (bytes): content's identifier (sha1)
- **ctags** ([dict]): ctags list of symbols
"""
lang = compute_language(data, log=self.log)['lang']
if not lang:
return None
ctags_lang = self.language_map.get(lang)
if not ctags_lang:
return None
ctags = {
'id': id,
}
filename = hashutil.hash_to_hex(id)
with write_to_temp(
filename=filename, data=data,
working_directory=self.working_directory) as content_path:
result = run_ctags(content_path, lang=ctags_lang)
ctags.update({
'ctags': list(result),
'indexer_configuration_id': self.tool['id'],
})
return ctags
- def persist_index_computations(self, results, policy_update):
+ def persist_index_computations(
+ self, results: List[Dict], policy_update: str) -> Dict:
"""Persist the results in storage.
Args:
- results ([dict]): list of content_mimetype, dict with the
+ results: list of content_mimetype, dict with the
following keys:
- id (bytes): content's identifier (sha1)
- ctags ([dict]): ctags list of symbols
- policy_update ([str]): either 'update-dups' or 'ignore-dups' to
+ policy_update: either 'update-dups' or 'ignore-dups' to
respectively update duplicates or ignore them
"""
- self.idx_storage.content_ctags_add(
+ return self.idx_storage.content_ctags_add(
results, conflict_update=(policy_update == 'update-dups'))
diff --git a/swh/indexer/fossology_license.py b/swh/indexer/fossology_license.py
index ed9a431..7e00bc4 100644
--- a/swh/indexer/fossology_license.py
+++ b/swh/indexer/fossology_license.py
@@ -1,173 +1,181 @@
-# Copyright (C) 2016-2018 The Software Heritage developers
+# Copyright (C) 2016-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
+import logging
import subprocess
-from typing import Optional
+from typing import Any, Dict, List, Optional
from swh.model import hashutil
from .indexer import ContentIndexer, ContentRangeIndexer, write_to_temp
-def compute_license(path, log=None):
+logger = logging.getLogger(__name__)
+
+
+def compute_license(path):
"""Determine license from file at path.
Args:
path: filepath to determine the license
Returns:
dict: A dict with the following keys:
- licenses ([str]): associated detected licenses to path
- path (bytes): content filepath
"""
try:
properties = subprocess.check_output(['nomossa', path],
universal_newlines=True)
if properties:
res = properties.rstrip().split(' contains license(s) ')
licenses = res[1].split(',')
else:
licenses = []
return {
'licenses': licenses,
'path': path,
}
except subprocess.CalledProcessError:
- if log:
- from os import path as __path
- log.exception('Problem during license detection for sha1 %s' %
- __path.basename(path))
+ from os import path as __path
+ logger.exception('Problem during license detection for sha1 %s' %
+ __path.basename(path))
return {
'licenses': [],
'path': path,
}
class MixinFossologyLicenseIndexer:
"""Mixin fossology license indexer.
See :class:`FossologyLicenseIndexer` and
:class:`FossologyLicenseRangeIndexer`
"""
ADDITIONAL_CONFIG = {
'workdir': ('str', '/tmp/swh/indexer.fossology.license'),
'tools': ('dict', {
'name': 'nomos',
'version': '3.1.0rc2-31-ga2cbb8c',
'configuration': {
'command_line': 'nomossa <filepath>',
},
}),
'write_batch_size': ('int', 1000),
}
CONFIG_BASE_FILENAME = 'indexer/fossology_license' # type: Optional[str]
+ tool: Any
+ idx_storage: Any
def prepare(self):
super().prepare()
self.working_directory = self.config['workdir']
- def index(self, id, data):
+ def index(self, id: bytes, data: Optional[bytes] = None,
+ **kwargs) -> Dict[str, Any]:
"""Index sha1s' content and store result.
Args:
id (bytes): content's identifier
raw_content (bytes): associated raw content to content id
Returns:
dict: A dict, representing a content_license, with keys:
- id (bytes): content's identifier (sha1)
- license (bytes): license in bytes
- path (bytes): path
- indexer_configuration_id (int): tool used to compute the output
"""
assert isinstance(id, bytes)
+ assert data is not None
with write_to_temp(
filename=hashutil.hash_to_hex(id), # use the id as pathname
data=data,
working_directory=self.working_directory) as content_path:
- properties = compute_license(path=content_path, log=self.log)
+ properties = compute_license(path=content_path)
properties.update({
'id': id,
'indexer_configuration_id': self.tool['id'],
})
return properties
- def persist_index_computations(self, results, policy_update):
+ def persist_index_computations(
+ self, results: List[Dict], policy_update: str) -> Dict:
"""Persist the results in storage.
Args:
- results ([dict]): list of content_license, dict with the
+ results: list of content_license dict with the
following keys:
- id (bytes): content's identifier (sha1)
- license (bytes): license in bytes
- path (bytes): path
- policy_update ([str]): either 'update-dups' or 'ignore-dups' to
+ policy_update: either 'update-dups' or 'ignore-dups' to
respectively update duplicates or ignore them
"""
- self.idx_storage.content_fossology_license_add(
+ return self.idx_storage.content_fossology_license_add(
results, conflict_update=(policy_update == 'update-dups'))
class FossologyLicenseIndexer(
MixinFossologyLicenseIndexer, ContentIndexer):
"""Indexer in charge of:
- filtering out content already indexed
- reading content from objstorage per the content's id (sha1)
- computing {license, encoding} from that content
- store result in storage
"""
def filter(self, ids):
"""Filter out known sha1s and return only missing ones.
"""
yield from self.idx_storage.content_fossology_license_missing((
{
'id': sha1,
'indexer_configuration_id': self.tool['id'],
} for sha1 in ids
))
class FossologyLicenseRangeIndexer(
MixinFossologyLicenseIndexer, ContentRangeIndexer):
"""FossologyLicense Range Indexer working on range of content identifiers.
- filters out the non textual content
- (optionally) filters out content already indexed (cf
:meth:`.indexed_contents_in_range`)
- reads content from objstorage per the content's id (sha1)
- computes {mimetype, encoding} from that content
- stores result in storage
"""
def indexed_contents_in_range(self, start, end):
"""Retrieve indexed content id within range [start, end].
Args:
start (bytes): Starting bound from range identifier
end (bytes): End range identifier
Returns:
dict: a dict with keys:
- **ids** [bytes]: iterable of content ids within the range.
- **next** (Optional[bytes]): The next range of sha1 starts at
this sha1 if any
"""
return self.idx_storage.content_fossology_license_get_range(
start, end, self.tool['id'])
diff --git a/swh/indexer/indexer.py b/swh/indexer/indexer.py
index 75b65c8..fb5fa8a 100644
--- a/swh/indexer/indexer.py
+++ b/swh/indexer/indexer.py
@@ -1,596 +1,594 @@
# Copyright (C) 2016-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import abc
import os
import logging
import shutil
import tempfile
from contextlib import contextmanager
-from typing import Any, Dict, Tuple, Generator, Union, List
-from typing import Set
+from typing import (
+ Any, Dict, Iterator, List, Optional, Set, Tuple, Union
+)
from swh.scheduler import CONFIG as SWH_CONFIG
from swh.storage import get_storage
from swh.core.config import SWHConfig
from swh.objstorage import get_objstorage
from swh.objstorage.exc import ObjNotFoundError
from swh.indexer.storage import get_indexer_storage, INDEXER_CFG_KEY
from swh.model import hashutil
from swh.core import utils
@contextmanager
def write_to_temp(
- filename: str, data: bytes, working_directory: str
-) -> Generator[str, None, None]:
+ filename: str, data: bytes, working_directory: str) -> Iterator[str]:
"""Write the sha1's content in a temporary file.
Args:
filename: one of sha1's many filenames
data: the sha1's content to write in temporary
file
working_directory: the directory into which the
file is written
Returns:
The path to the temporary file created. That file is
filled in with the raw content's data.
"""
os.makedirs(working_directory, exist_ok=True)
temp_dir = tempfile.mkdtemp(dir=working_directory)
content_path = os.path.join(temp_dir, filename)
with open(content_path, 'wb') as f:
f.write(data)
yield content_path
shutil.rmtree(temp_dir)
class BaseIndexer(SWHConfig, metaclass=abc.ABCMeta):
"""Base class for indexers to inherit from.
The main entry point is the :func:`run` function which is in
charge of triggering the computations on the batch dict/ids
received.
Indexers can:
- filter out ids whose data has already been indexed.
- retrieve ids data from storage or objstorage
- index this data depending on the object and store the result in
storage.
To implement a new object type indexer, inherit from the
BaseIndexer and implement indexing:
:meth:`~BaseIndexer.run`:
object_ids are different depending on object. For example: sha1 for
content, sha1_git for revision, directory, release, and id for origin
To implement a new concrete indexer, inherit from the object level
classes: :class:`ContentIndexer`, :class:`RevisionIndexer`,
:class:`OriginIndexer`.
Then you need to implement the following functions:
:meth:`~BaseIndexer.filter`:
filter out data already indexed (in storage).
:meth:`~BaseIndexer.index_object`:
compute index on id with data (retrieved from the storage or the
objstorage by the id key) and return the resulting index computation.
:meth:`~BaseIndexer.persist_index_computations`:
persist the results of multiple index computations in the storage.
The new indexer implementation can also override the following functions:
:meth:`~BaseIndexer.prepare`:
Configuration preparation for the indexer. When overriding, this must
call the `super().prepare()` instruction.
:meth:`~BaseIndexer.check`:
Configuration check for the indexer. When overriding, this must call the
`super().check()` instruction.
:meth:`~BaseIndexer.register_tools`:
This should return a dict of the tool(s) to use when indexing or
filtering.
"""
results: List[Dict]
CONFIG = 'indexer/base'
DEFAULT_CONFIG = {
INDEXER_CFG_KEY: ('dict', {
'cls': 'remote',
'args': {
'url': 'http://localhost:5007/'
}
}),
'storage': ('dict', {
'cls': 'remote',
'args': {
'url': 'http://localhost:5002/',
}
}),
'objstorage': ('dict', {
'cls': 'remote',
'args': {
'url': 'http://localhost:5003/',
}
})
}
ADDITIONAL_CONFIG = {} # type: Dict[str, Tuple[str, Any]]
USE_TOOLS = True
catch_exceptions = True
"""Prevents exceptions in `index()` from raising too high. Set to False
in tests to properly catch all exceptions."""
scheduler: Any
def __init__(self, config=None, **kw) -> None:
"""Prepare and check that the indexer is ready to run.
"""
super().__init__()
if config is not None:
self.config = config
elif SWH_CONFIG:
self.config = SWH_CONFIG.copy()
else:
config_keys = ('base_filename', 'config_filename',
'additional_configs', 'global_config')
config_args = {k: v for k, v in kw.items() if k in config_keys}
if self.ADDITIONAL_CONFIG:
config_args.setdefault('additional_configs', []).append(
self.ADDITIONAL_CONFIG)
self.config = self.parse_config_file(**config_args)
self.prepare()
self.check()
self.log.debug('%s: config=%s', self, self.config)
def prepare(self) -> None:
"""Prepare the indexer's needed runtime configuration.
Without this step, the indexer cannot possibly run.
"""
config_storage = self.config.get('storage')
if config_storage:
self.storage = get_storage(**config_storage)
objstorage = self.config['objstorage']
self.objstorage = get_objstorage(objstorage['cls'],
objstorage['args'])
idx_storage = self.config[INDEXER_CFG_KEY]
self.idx_storage = get_indexer_storage(**idx_storage)
_log = logging.getLogger('requests.packages.urllib3.connectionpool')
_log.setLevel(logging.WARN)
self.log = logging.getLogger('swh.indexer')
if self.USE_TOOLS:
self.tools = list(self.register_tools(
self.config.get('tools', [])))
self.results = []
@property
def tool(self) -> Dict:
return self.tools[0]
def check(self) -> None:
"""Check the indexer's configuration is ok before proceeding.
If ok, does nothing. If not raise error.
"""
if self.USE_TOOLS and not self.tools:
raise ValueError('Tools %s is unknown, cannot continue' %
self.tools)
def _prepare_tool(self, tool: Dict[str, Any]) -> Dict[str, Any]:
"""Prepare the tool dict to be compliant with the storage api.
"""
return {'tool_%s' % key: value for key, value in tool.items()}
def register_tools(
self, tools: Union[Dict[str, Any], List[Dict[str, Any]]]
) -> List[Dict[str, Any]]:
"""Permit to register tools to the storage.
Add a sensible default which can be overridden if not
sufficient. (For now, all indexers use only one tool)
Expects the self.config['tools'] property to be set with
one or more tools.
Args:
tools: Either a dict or a list of dict.
Returns:
list: List of dicts with additional id key.
Raises:
ValueError: if not a list nor a dict.
"""
if isinstance(tools, list):
tools = list(map(self._prepare_tool, tools))
elif isinstance(tools, dict):
tools = [self._prepare_tool(tools)]
else:
raise ValueError('Configuration tool(s) must be a dict or list!')
if tools:
return self.idx_storage.indexer_configuration_add(tools)
else:
return []
- def index(
- self, id: bytes, data: bytes
- ) -> Dict[str, Any]:
+ def index(self, id: bytes, data: Optional[bytes] = None,
+ **kwargs) -> Dict[str, Any]:
"""Index computation for the id and associated raw data.
Args:
id: identifier
data: id's data from storage or objstorage depending on
object type
Returns:
dict: a dict that makes sense for the
:meth:`.persist_index_computations` method.
"""
raise NotImplementedError()
- def filter(self, ids: List[bytes]) -> Generator[bytes, None, None]:
+ def filter(self, ids: List[bytes]) -> Iterator[bytes]:
"""Filter missing ids for that particular indexer.
Args:
ids: list of ids
Yields:
iterator of missing ids
"""
yield from ids
@abc.abstractmethod
- def persist_index_computations(self, results, policy_update):
+ def persist_index_computations(self, results, policy_update) -> Dict:
"""Persist the computation resulting from the index.
Args:
results ([result]): List of results. One result is the
result of the index function.
policy_update ([str]): either 'update-dups' or 'ignore-dups' to
respectively update duplicates or ignore them
Returns:
- None
+ a summary dict of what has been inserted in the storage
"""
- pass
-
- @abc.abstractmethod
- def run(self, ids, policy_update, **kwargs):
- """Given a list of ids:
-
- - retrieves the data from the storage
- - executes the indexing computations
- - stores the results (according to policy_update)
-
- Args:
- ids ([bytes]): id's identifier list
- policy_update (str): either 'update-dups' or 'ignore-dups' to
- respectively update duplicates or ignore them
- **kwargs: passed to the `index` method
-
- """
- pass
+ return {}
class ContentIndexer(BaseIndexer):
"""A content indexer working on a list of ids directly.
To work on indexer range, use the :class:`ContentRangeIndexer`
instead.
Note: :class:`ContentIndexer` is not an instantiable object. To
use it, one should inherit from this class and override the
methods mentioned in the :class:`BaseIndexer` class.
"""
-
- def run(self, ids, policy_update, **kwargs):
+ def run(self, ids: Union[List[bytes], bytes, str], policy_update: str,
+ **kwargs) -> Dict:
"""Given a list of ids:
- retrieve the content from the storage
- execute the indexing computations
- store the results (according to policy_update)
Args:
ids (Iterable[Union[bytes, str]]): sha1's identifier list
policy_update (str): either 'update-dups' or 'ignore-dups' to
respectively update duplicates or ignore
them
**kwargs: passed to the `index` method
+ Returns:
+ A summary Dict of the task's status
+
"""
- ids = [hashutil.hash_to_bytes(id_) if isinstance(id_, str) else id_
- for id_ in ids]
+ status = 'uneventful'
+ sha1s = [hashutil.hash_to_bytes(id_) if isinstance(id_, str) else id_
+ for id_ in ids]
results = []
+ summary: Dict = {}
try:
- for sha1 in ids:
+ for sha1 in sha1s:
try:
raw_content = self.objstorage.get(sha1)
except ObjNotFoundError:
self.log.warning('Content %s not found in objstorage' %
hashutil.hash_to_hex(sha1))
continue
res = self.index(sha1, raw_content, **kwargs)
if res: # If no results, skip it
results.append(res)
-
- self.persist_index_computations(results, policy_update)
+ status = 'eventful'
+ summary = self.persist_index_computations(results, policy_update)
self.results = results
except Exception:
if not self.catch_exceptions:
raise
self.log.exception(
'Problem when reading contents metadata.')
+ status = 'failed'
+ finally:
+ summary['status'] = status
+ return summary
class ContentRangeIndexer(BaseIndexer):
"""A content range indexer.
This expects as input a range of ids to index.
To work on a list of ids, use the :class:`ContentIndexer` instead.
Note: :class:`ContentRangeIndexer` is not an instantiable
object. To use it, one should inherit from this class and override
the methods mentioned in the :class:`BaseIndexer` class.
"""
@abc.abstractmethod
def indexed_contents_in_range(
self, start: bytes, end: bytes
) -> Any:
"""Retrieve indexed contents within range [start, end].
Args:
start: Starting bound from range identifier
end: End range identifier
Yields:
bytes: Content identifier present in the range ``[start, end]``
"""
pass
def _list_contents_to_index(
self, start: bytes, end: bytes, indexed: Set[bytes]
- ) -> Generator[bytes, None, None]:
+ ) -> Iterator[bytes]:
"""Compute from storage the new contents to index in the range [start,
end]. The already indexed contents are skipped.
Args:
start: Starting bound from range identifier
end: End range identifier
indexed: Set of content already indexed.
Yields:
bytes: Identifier of contents to index.
"""
if not isinstance(start, bytes) or not isinstance(end, bytes):
raise TypeError('identifiers must be bytes, not %r and %r.' %
(start, end))
while start:
result = self.storage.content_get_range(start, end)
contents = result['contents']
for c in contents:
_id = hashutil.hash_to_bytes(c['sha1'])
if _id in indexed:
continue
yield _id
start = result['next']
def _index_contents(
self, start: bytes, end: bytes, indexed: Set[bytes], **kwargs: Any
- ) -> Generator[Dict, None, None]:
+ ) -> Iterator[Dict]:
"""Index the contents from within range [start, end]
Args:
start: Starting bound from range identifier
end: End range identifier
indexed: Set of content already indexed.
Yields:
dict: Data indexed to persist using the indexer storage
"""
for sha1 in self._list_contents_to_index(start, end, indexed):
try:
raw_content = self.objstorage.get(sha1)
except ObjNotFoundError:
self.log.warning('Content %s not found in objstorage' %
hashutil.hash_to_hex(sha1))
continue
- res = self.index(sha1, raw_content, **kwargs) # type: ignore
+ res = self.index(sha1, raw_content, **kwargs)
if res:
if not isinstance(res['id'], bytes):
raise TypeError(
'%r.index should return ids as bytes, not %r' %
(self.__class__.__name__, res['id']))
yield res
def _index_with_skipping_already_done(
- self, start: bytes, end: bytes
- ) -> Generator[Dict, None, None]:
+ self, start: bytes, end: bytes) -> Iterator[Dict]:
"""Index not already indexed contents in range [start, end].
Args:
start: Starting range identifier
end: Ending range identifier
Yields:
dict: Content identifier present in the range
``[start, end]`` which are not already indexed.
"""
while start:
indexed_page = self.indexed_contents_in_range(start, end)
contents = indexed_page['ids']
_end = contents[-1] if contents else end
yield from self._index_contents(
start, _end, contents)
start = indexed_page['next']
- def run(self, start, end, skip_existing=True, **kwargs):
+ def run(self, start: Union[bytes, str], end: Union[bytes, str],
+ skip_existing: bool = True, **kwargs) -> Dict:
"""Given a range of content ids, compute the indexing computations on
the contents within. Either the indexer is incremental
(filter out existing computed data) or not (compute
everything from scratch).
Args:
- start (Union[bytes, str]): Starting range identifier
- end (Union[bytes, str]): Ending range identifier
- skip_existing (bool): Skip existing indexed data
+ start: Starting range identifier
+ end: Ending range identifier
+ skip_existing: Skip existing indexed data
(default) or not
**kwargs: passed to the `index` method
Returns:
- bool: True if data was indexed, False otherwise.
+ A dict with the task's status
"""
- with_indexed_data = False
+ status = 'uneventful'
+ summary: Dict = {}
try:
- if isinstance(start, str):
- start = hashutil.hash_to_bytes(start)
- if isinstance(end, str):
- end = hashutil.hash_to_bytes(end)
+ range_start = hashutil.hash_to_bytes(start) \
+ if isinstance(start, str) else start
+ range_end = hashutil.hash_to_bytes(end) \
+ if isinstance(end, str) else end
if skip_existing:
- gen = self._index_with_skipping_already_done(start, end)
+ gen = self._index_with_skipping_already_done(
+ range_start, range_end)
else:
- gen = self._index_contents(start, end, indexed=[])
-
- for results in utils.grouper(gen,
- n=self.config['write_batch_size']):
- self.persist_index_computations(
- results, policy_update='update-dups')
- with_indexed_data = True
+ gen = self._index_contents(
+ range_start, range_end, indexed=set([]))
+
+ for contents in utils.grouper(
+ gen, n=self.config['write_batch_size']):
+ res = self.persist_index_computations(
+ contents, policy_update='update-dups')
+ summary['content_mimetype:add'] += res.get(
+ 'content_mimetype:add')
+ status = 'eventful'
except Exception:
if not self.catch_exceptions:
raise
self.log.exception(
'Problem when computing metadata.')
+ status = 'failed'
finally:
- return with_indexed_data
+ summary['status'] = status
+ return summary
class OriginIndexer(BaseIndexer):
"""An object type indexer, inherits from the :class:`BaseIndexer` and
implements Origin indexing using the run method
Note: the :class:`OriginIndexer` is not an instantiable object.
To use it in another context one should inherit from this class
and override the methods mentioned in the :class:`BaseIndexer`
class.
"""
- def run(self, origin_urls, policy_update='update-dups',
- next_step=None, **kwargs):
+ def run(self, origin_urls: List[str],
+ policy_update: str = 'update-dups', **kwargs) -> Dict:
"""Given a list of origin urls:
- retrieve origins from storage
- execute the indexing computations
- store the results (according to policy_update)
Args:
- origin_urls ([str]): list of origin urls.
- policy_update (str): either 'update-dups' or 'ignore-dups' to
+ origin_urls: list of origin urls.
+ policy_update: either 'update-dups' or 'ignore-dups' to
respectively update duplicates (default) or ignore them
- parse_ids (bool): Do we need to parse id or not (default)
**kwargs: passed to the `index` method
"""
results = self.index_list(origin_urls, **kwargs)
-
- self.persist_index_computations(results, policy_update)
+ summary = self.persist_index_computations(results, policy_update)
self.results = results
+ return summary
def index_list(self, origins: List[Any], **kwargs: Any) -> List[Dict]:
results = []
for origin in origins:
try:
res = self.index(origin, **kwargs)
if res: # If no results, skip it
results.append(res)
except Exception:
if not self.catch_exceptions:
raise
self.log.exception(
'Problem when processing origin %s',
origin['id'])
return results
class RevisionIndexer(BaseIndexer):
"""An object type indexer, inherits from the :class:`BaseIndexer` and
implements Revision indexing using the run method
Note: the :class:`RevisionIndexer` is not an instantiable object.
To use it in another context one should inherit from this class
and override the methods mentioned in the :class:`BaseIndexer`
class.
"""
- def run(self, ids, policy_update):
+ def run(self, ids: Union[str, bytes], policy_update: str) -> Dict:
"""Given a list of sha1_gits:
- retrieve revisions from storage
- execute the indexing computations
- store the results (according to policy_update)
Args:
- ids ([bytes or str]): sha1_git's identifier list
- policy_update (str): either 'update-dups' or 'ignore-dups' to
+ ids: sha1_git's identifier list
+ policy_update: either 'update-dups' or 'ignore-dups' to
respectively update duplicates or ignore them
"""
results = []
- ids = [hashutil.hash_to_bytes(id_) if isinstance(id_, str) else id_
- for id_ in ids]
- revs = self.storage.revision_get(ids)
+ revs = self.storage.revision_get(
+ hashutil.hash_to_bytes(id_) if isinstance(id_, str) else id_
+ for id_ in ids)
for rev in revs:
if not rev:
self.log.warning('Revisions %s not found in storage' %
list(map(hashutil.hash_to_hex, ids)))
continue
try:
res = self.index(rev)
if res: # If no results, skip it
results.append(res)
except Exception:
if not self.catch_exceptions:
raise
self.log.exception(
'Problem when processing revision')
- self.persist_index_computations(results, policy_update)
+ summary = self.persist_index_computations(results, policy_update)
self.results = results
+ return summary
diff --git a/swh/indexer/metadata.py b/swh/indexer/metadata.py
index ae9a990..50c31b6 100644
--- a/swh/indexer/metadata.py
+++ b/swh/indexer/metadata.py
@@ -1,368 +1,377 @@
# Copyright (C) 2017-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from copy import deepcopy
-from typing import Any, List, Dict, Tuple, Callable, Generator
+from typing import Any, Callable, Dict, Iterator, List, Tuple
from swh.core.utils import grouper
from swh.indexer.codemeta import merge_documents
from swh.indexer.indexer import ContentIndexer, RevisionIndexer, OriginIndexer
from swh.indexer.origin_head import OriginHeadIndexer
from swh.indexer.metadata_dictionary import MAPPINGS
from swh.indexer.metadata_detector import detect_metadata
from swh.indexer.storage import INDEXER_CFG_KEY
from swh.model import hashutil
REVISION_GET_BATCH_SIZE = 10
ORIGIN_GET_BATCH_SIZE = 10
def call_with_batches(
f: Callable[[List[Dict[str, Any]]], Dict['str', Any]],
args: List[Dict[str, str]], batch_size: int
-) -> Generator[str, None, None]:
+) -> Iterator[str]:
"""Calls a function with batches of args, and concatenates the results.
"""
groups = grouper(args, batch_size)
for group in groups:
yield from f(list(group))
class ContentMetadataIndexer(ContentIndexer):
"""Content-level indexer
This indexer is in charge of:
- filtering out content already indexed in content_metadata
- reading content from objstorage with the content's id sha1
- computing metadata by given context
- using the metadata_dictionary as the 'swh-metadata-translator' tool
- store result in content_metadata table
"""
def filter(self, ids):
"""Filter out known sha1s and return only missing ones.
"""
yield from self.idx_storage.content_metadata_missing((
{
'id': sha1,
'indexer_configuration_id': self.tool['id'],
} for sha1 in ids
))
def index(self, id, data, log_suffix='unknown revision'):
"""Index sha1s' content and store result.
Args:
id (bytes): content's identifier
data (bytes): raw content in bytes
Returns:
dict: dictionary representing a content_metadata. If the
translation wasn't successful the metadata keys will
be returned as None
"""
result = {
'id': id,
'indexer_configuration_id': self.tool['id'],
'metadata': None
}
try:
mapping_name = self.tool['tool_configuration']['context']
log_suffix += ', content_id=%s' % hashutil.hash_to_hex(id)
result['metadata'] = \
MAPPINGS[mapping_name](log_suffix).translate(data)
except Exception:
self.log.exception(
"Problem during metadata translation "
"for content %s" % hashutil.hash_to_hex(id))
if result['metadata'] is None:
return None
return result
def persist_index_computations(
self, results: List[Dict], policy_update: str
- ) -> None:
+ ) -> Dict:
"""Persist the results in storage.
Args:
results: list of content_metadata, dict with the
following keys:
- id (bytes): content's identifier (sha1)
- metadata (jsonb): detected metadata
policy_update: either 'update-dups' or 'ignore-dups' to
respectively update duplicates or ignore them
"""
- self.idx_storage.content_metadata_add(
+ return self.idx_storage.content_metadata_add(
results, conflict_update=(policy_update == 'update-dups'))
class RevisionMetadataIndexer(RevisionIndexer):
"""Revision-level indexer
This indexer is in charge of:
- filtering revisions already indexed in revision_intrinsic_metadata table
with defined computation tool
- retrieve all entry_files in root directory
- use metadata_detector for file_names containing metadata
- compute metadata translation if necessary and possible (depends on tool)
- send sha1s to content indexing if possible
- store the results for revision
"""
ADDITIONAL_CONFIG = {
'tools': ('dict', {
'name': 'swh-metadata-detector',
'version': '0.0.2',
'configuration': {
},
}),
}
def filter(self, sha1_gits):
"""Filter out known sha1s and return only missing ones.
"""
yield from self.idx_storage.revision_intrinsic_metadata_missing((
{
'id': sha1_git,
'indexer_configuration_id': self.tool['id'],
} for sha1_git in sha1_gits
))
def index(self, rev):
"""Index rev by processing it and organizing result.
use metadata_detector to iterate on filenames
- if one filename detected -> sends file to content indexer
- if multiple file detected -> translation needed at revision level
Args:
rev (dict): revision artifact from storage
Returns:
dict: dictionary representing a revision_intrinsic_metadata, with
keys:
- id (str): rev's identifier (sha1_git)
- indexer_configuration_id (bytes): tool used
- metadata: dict of retrieved metadata
"""
result = {
'id': rev['id'],
'indexer_configuration_id': self.tool['id'],
'mappings': None,
'metadata': None
}
try:
root_dir = rev['directory']
dir_ls = list(self.storage.directory_ls(root_dir, recursive=False))
if [entry['type'] for entry in dir_ls] == ['dir']:
# If the root is just a single directory, recurse into it
# eg. PyPI packages, GNU tarballs
subdir = dir_ls[0]['target']
dir_ls = self.storage.directory_ls(subdir, recursive=False)
files = [entry for entry in dir_ls if entry['type'] == 'file']
detected_files = detect_metadata(files)
(mappings, metadata) = self.translate_revision_intrinsic_metadata(
detected_files,
log_suffix='revision=%s' % hashutil.hash_to_hex(rev['id']))
result['mappings'] = mappings
result['metadata'] = metadata
except Exception as e:
self.log.exception(
'Problem when indexing rev: %r', e)
return result
def persist_index_computations(
self, results: List[Dict], policy_update: str
- ) -> None:
+ ) -> Dict:
"""Persist the results in storage.
Args:
results: list of content_mimetype, dict with the
following keys:
- id (bytes): content's identifier (sha1)
- mimetype (bytes): mimetype in bytes
- encoding (bytes): encoding in bytes
policy_update: either 'update-dups' or 'ignore-dups' to
respectively update duplicates or ignore them
"""
# TODO: add functions in storage to keep data in
# revision_intrinsic_metadata
- self.idx_storage.revision_intrinsic_metadata_add(
+ return self.idx_storage.revision_intrinsic_metadata_add(
results, conflict_update=(policy_update == 'update-dups'))
def translate_revision_intrinsic_metadata(
self, detected_files: Dict[str, List[Any]], log_suffix: str
) -> Tuple[List[Any], List[Any]]:
"""
Determine plan of action to translate metadata when containing
one or multiple detected files:
Args:
detected_files: dictionary mapping context names (e.g.,
"npm", "authors") to list of sha1
Returns:
(List[str], dict): list of mappings used and dict with
translated metadata according to the CodeMeta vocabulary
"""
used_mappings = [MAPPINGS[context].name for context in detected_files]
metadata = []
tool = {
'name': 'swh-metadata-translator',
'version': '0.0.2',
'configuration': {
},
}
# TODO: iterate on each context, on each file
# -> get raw_contents
# -> translate each content
config = {
k: self.config[k]
for k in [INDEXER_CFG_KEY, 'objstorage', 'storage']
}
config['tools'] = [tool]
for context in detected_files.keys():
cfg = deepcopy(config)
cfg['tools'][0]['configuration']['context'] = context
c_metadata_indexer = ContentMetadataIndexer(config=cfg)
# sha1s that are in content_metadata table
sha1s_in_storage = []
metadata_generator = self.idx_storage.content_metadata_get(
detected_files[context])
for c in metadata_generator:
# extracting metadata
sha1 = c['id']
sha1s_in_storage.append(sha1)
local_metadata = c['metadata']
# local metadata is aggregated
if local_metadata:
metadata.append(local_metadata)
sha1s_filtered = [item for item in detected_files[context]
if item not in sha1s_in_storage]
if sha1s_filtered:
# content indexing
try:
c_metadata_indexer.run(sha1s_filtered,
policy_update='ignore-dups',
log_suffix=log_suffix)
# on the fly possibility:
for result in c_metadata_indexer.results:
local_metadata = result['metadata']
metadata.append(local_metadata)
except Exception:
self.log.exception(
"Exception while indexing metadata on contents")
metadata = merge_documents(metadata)
return (used_mappings, metadata)
class OriginMetadataIndexer(OriginIndexer):
ADDITIONAL_CONFIG = RevisionMetadataIndexer.ADDITIONAL_CONFIG
USE_TOOLS = False
def __init__(self, config=None, **kwargs) -> None:
super().__init__(config=config, **kwargs)
self.origin_head_indexer = OriginHeadIndexer(config=config)
self.revision_metadata_indexer = RevisionMetadataIndexer(config=config)
def index_list(self, origin_urls):
head_rev_ids = []
origins_with_head = []
origins = list(call_with_batches(
self.storage.origin_get,
[{'url': url} for url in origin_urls], ORIGIN_GET_BATCH_SIZE))
for origin in origins:
if origin is None:
continue
head_result = self.origin_head_indexer.index(origin['url'])
if head_result:
origins_with_head.append(origin)
head_rev_ids.append(head_result['revision_id'])
head_revs = list(call_with_batches(
self.storage.revision_get,
head_rev_ids, REVISION_GET_BATCH_SIZE))
assert len(head_revs) == len(head_rev_ids)
results = []
for (origin, rev) in zip(origins_with_head, head_revs):
if not rev:
self.log.warning('Missing head revision of origin %r',
origin['url'])
continue
rev_metadata = self.revision_metadata_indexer.index(rev)
orig_metadata = {
'from_revision': rev_metadata['id'],
'id': origin['url'],
'metadata': rev_metadata['metadata'],
'mappings': rev_metadata['mappings'],
'indexer_configuration_id':
rev_metadata['indexer_configuration_id'],
}
results.append((orig_metadata, rev_metadata))
return results
def persist_index_computations(
self, results: List[Dict], policy_update: str
- ) -> None:
+ ) -> Dict:
conflict_update = (policy_update == 'update-dups')
# Deduplicate revisions
rev_metadata: List[Any] = []
orig_metadata: List[Any] = []
revs_to_delete: List[Any] = []
origs_to_delete: List[Any] = []
+ summary: Dict = {}
for (orig_item, rev_item) in results:
assert rev_item['metadata'] == orig_item['metadata']
if not rev_item['metadata'] or \
rev_item['metadata'].keys() <= {'@context'}:
# If we didn't find any metadata, don't store a DB record
# (and delete existing ones, if any)
if rev_item not in revs_to_delete:
revs_to_delete.append(rev_item)
if orig_item not in origs_to_delete:
origs_to_delete.append(orig_item)
else:
if rev_item not in rev_metadata:
rev_metadata.append(rev_item)
if orig_item not in orig_metadata:
orig_metadata.append(orig_item)
if rev_metadata:
- self.idx_storage.revision_intrinsic_metadata_add(
+ summary_rev = self.idx_storage.revision_intrinsic_metadata_add(
rev_metadata, conflict_update=conflict_update)
+ summary.update(summary_rev)
if orig_metadata:
- self.idx_storage.origin_intrinsic_metadata_add(
+ summary_ori = self.idx_storage.origin_intrinsic_metadata_add(
orig_metadata, conflict_update=conflict_update)
+ summary.update(summary_ori)
# revs_to_delete should always be empty unless we changed a mapping
# to detect less files or less content.
# However, origs_to_delete may be empty whenever an upstream deletes
# a metadata file.
if origs_to_delete:
- self.idx_storage.origin_intrinsic_metadata_delete(origs_to_delete)
+ summary_ori = self.idx_storage.origin_intrinsic_metadata_delete(
+ origs_to_delete)
+ summary.update(summary_ori)
if revs_to_delete:
- self.idx_storage.revision_intrinsic_metadata_delete(revs_to_delete)
+ summary_rev = self.idx_storage.revision_intrinsic_metadata_delete(
+ revs_to_delete)
+ summary.update(summary_ori)
+
+ return summary
diff --git a/swh/indexer/mimetype.py b/swh/indexer/mimetype.py
index 0d391bb..fd8b86e 100644
--- a/swh/indexer/mimetype.py
+++ b/swh/indexer/mimetype.py
@@ -1,152 +1,154 @@
# Copyright (C) 2016-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import Optional, Dict, Any, List
import magic
from .indexer import ContentIndexer, ContentRangeIndexer
if not hasattr(magic.Magic, 'from_buffer'):
raise ImportError(
'Expected "import magic" to import python-magic, but file_magic '
'was imported instead.')
def compute_mimetype_encoding(raw_content: bytes) -> Dict[str, bytes]:
"""Determine mimetype and encoding from the raw content.
Args:
raw_content: content's raw data
Returns:
dict: mimetype and encoding key and corresponding values.
"""
m = magic.Magic(mime=True, mime_encoding=True)
res = m.from_buffer(raw_content)
(mimetype, encoding) = res.split('; charset=')
return {
'mimetype': mimetype,
'encoding': encoding,
}
class MixinMimetypeIndexer:
"""Mixin mimetype indexer.
See :class:`MimetypeIndexer` and :class:`MimetypeRangeIndexer`
"""
tool: Dict[str, Any]
idx_storage: Any
ADDITIONAL_CONFIG = {
'tools': ('dict', {
'name': 'file',
'version': '1:5.30-1+deb9u1',
'configuration': {
"type": "library",
"debian-package": "python3-magic"
},
}),
'write_batch_size': ('int', 1000),
}
CONFIG_BASE_FILENAME = 'indexer/mimetype' # type: Optional[str]
- def index(self, id: bytes, data: bytes) -> Dict[str, Any]:
+ def index(self, id: bytes, data: Optional[bytes] = None,
+ **kwargs) -> Dict[str, Any]:
"""Index sha1s' content and store result.
Args:
id: content's identifier
data: raw content in bytes
Returns:
dict: content's mimetype; dict keys being
- id: content's identifier (sha1)
- mimetype: mimetype in bytes
- encoding: encoding in bytes
"""
+ assert data is not None
properties = compute_mimetype_encoding(data)
properties.update({
'id': id,
'indexer_configuration_id': self.tool['id'],
})
return properties
def persist_index_computations(
- self, results: List[Dict], policy_update: List[str]
- ) -> None:
+ self, results: List[Dict], policy_update: str
+ ) -> Dict:
"""Persist the results in storage.
Args:
results: list of content's mimetype dicts
(see :meth:`.index`)
policy_update: either 'update-dups' or 'ignore-dups' to
respectively update duplicates or ignore them
"""
- self.idx_storage.content_mimetype_add(
+ return self.idx_storage.content_mimetype_add(
results, conflict_update=(policy_update == 'update-dups'))
class MimetypeIndexer(MixinMimetypeIndexer, ContentIndexer):
"""Mimetype Indexer working on list of content identifiers.
It:
- (optionally) filters out content already indexed (cf.
:meth:`.filter`)
- reads content from objstorage per the content's id (sha1)
- computes {mimetype, encoding} from that content
- stores result in storage
"""
def filter(self, ids):
"""Filter out known sha1s and return only missing ones.
"""
yield from self.idx_storage.content_mimetype_missing((
{
'id': sha1,
'indexer_configuration_id': self.tool['id'],
} for sha1 in ids
))
class MimetypeRangeIndexer(MixinMimetypeIndexer, ContentRangeIndexer):
"""Mimetype Range Indexer working on range of content identifiers.
It:
- (optionally) filters out content already indexed (cf
:meth:`.indexed_contents_in_range`)
- reads content from objstorage per the content's id (sha1)
- computes {mimetype, encoding} from that content
- stores result in storage
"""
def indexed_contents_in_range(
self, start: bytes, end: bytes
) -> Dict[str, Optional[bytes]]:
"""Retrieve indexed content id within range [start, end].
Args:
start: Starting bound from range identifier
end: End range identifier
Returns:
dict: a dict with keys:
- ids: iterable of content ids within the range.
- next: The next range of sha1 starts at
this sha1 if any
"""
return self.idx_storage.content_mimetype_get_range(
start, end, self.tool['id'])
diff --git a/swh/indexer/origin_head.py b/swh/indexer/origin_head.py
index 707d643..abfbb02 100644
--- a/swh/indexer/origin_head.py
+++ b/swh/indexer/origin_head.py
@@ -1,158 +1,158 @@
# Copyright (C) 2018-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import List, Tuple, Any, Dict, Union
import re
import click
import logging
from swh.indexer.indexer import OriginIndexer
class OriginHeadIndexer(OriginIndexer):
"""Origin-level indexer.
This indexer is in charge of looking up the revision that acts as the
"head" of an origin.
In git, this is usually the commit pointed to by the 'master' branch."""
USE_TOOLS = False
def persist_index_computations(
self, results: Any, policy_update: str
- ) -> None:
+ ) -> Dict:
"""Do nothing. The indexer's results are not persistent, they
should only be piped to another indexer."""
- pass
+ return {}
# Dispatch
def index(self, origin_url):
latest_visit = self.storage.origin_visit_get_latest(
origin_url, allowed_statuses=['full'], require_snapshot=True)
if latest_visit is None:
return None
latest_snapshot = self.storage.snapshot_get(latest_visit['snapshot'])
method = getattr(
self, '_try_get_%s_head' % latest_visit['type'],
self._try_get_head_generic)
rev_id = method(latest_snapshot)
if rev_id is not None:
return {
'origin_url': origin_url,
'revision_id': rev_id,
}
# could not find a head revision
return None
# Tarballs
_archive_filename_re = re.compile(
rb'^'
rb'(?P<pkgname>.*)[-_]'
rb'(?P<version>[0-9]+(\.[0-9])*)'
rb'(?P<preversion>[-+][a-zA-Z0-9.~]+?)?'
rb'(?P<extension>(\.[a-zA-Z0-9]+)+)'
rb'$')
@classmethod
def _parse_version(
cls: Any, filename: str
) -> Tuple[Union[float, int], ...]:
"""Extracts the release version from an archive filename,
to get an ordering whose maximum is likely to be the last
version of the software
>>> OriginHeadIndexer._parse_version(b'foo')
(-inf,)
>>> OriginHeadIndexer._parse_version(b'foo.tar.gz')
(-inf,)
>>> OriginHeadIndexer._parse_version(b'gnu-hello-0.0.1.tar.gz')
(0, 0, 1, 0)
>>> OriginHeadIndexer._parse_version(b'gnu-hello-0.0.1-beta2.tar.gz')
(0, 0, 1, -1, 'beta2')
>>> OriginHeadIndexer._parse_version(b'gnu-hello-0.0.1+foobar.tar.gz')
(0, 0, 1, 1, 'foobar')
"""
res = cls._archive_filename_re.match(filename)
if res is None:
return (float('-infinity'),)
version = [int(n) for n in res.group('version').decode().split('.')]
if res.group('preversion') is None:
version.append(0)
else:
preversion = res.group('preversion').decode()
if preversion.startswith('-'):
version.append(-1)
version.append(preversion[1:])
elif preversion.startswith('+'):
version.append(1)
version.append(preversion[1:])
else:
assert False, res.group('preversion')
return tuple(version)
def _try_get_ftp_head(self, snapshot: Dict[str, Any]) -> Any:
archive_names = list(snapshot['branches'])
max_archive_name = max(archive_names, key=self._parse_version)
r = self._try_resolve_target(snapshot['branches'], max_archive_name)
return r
# Generic
def _try_get_head_generic(
self, snapshot: Dict[str, Any]
) -> Any:
# Works on 'deposit', 'pypi', and VCSs.
try:
branches = snapshot['branches']
except KeyError:
return None
else:
return (
self._try_resolve_target(branches, b'HEAD') or
self._try_resolve_target(branches, b'master')
)
def _try_resolve_target(self, branches: Dict, target_name: bytes) -> Any:
try:
target = branches[target_name]
if target is None:
return None
while target['target_type'] == 'alias':
target = branches[target['target']]
if target is None:
return None
if target['target_type'] == 'revision':
return target['target']
elif target['target_type'] == 'content':
return None # TODO
elif target['target_type'] == 'directory':
return None # TODO
elif target['target_type'] == 'release':
return None # TODO
else:
assert False
except KeyError:
return None
@click.command()
@click.option('--origins', '-i',
help='Origins to lookup, in the "type+url" format',
multiple=True)
def main(origins: List[str]) -> None:
rev_metadata_indexer = OriginHeadIndexer()
rev_metadata_indexer.run(origins)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
diff --git a/swh/indexer/rehash.py b/swh/indexer/rehash.py
index 038b63e..bec65ec 100644
--- a/swh/indexer/rehash.py
+++ b/swh/indexer/rehash.py
@@ -1,177 +1,190 @@
# Copyright (C) 2017-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import logging
import itertools
from collections import defaultdict
from typing import Dict, Any, Tuple, List, Generator
from swh.core import utils
from swh.core.config import SWHConfig
from swh.model import hashutil
from swh.objstorage import get_objstorage
from swh.objstorage.exc import ObjNotFoundError
from swh.storage import get_storage
class RecomputeChecksums(SWHConfig):
"""Class in charge of (re)computing content's hashes.
Hashes to compute are defined across 2 configuration options:
compute_checksums ([str])
list of hash algorithms that
py:func:`swh.model.hashutil.MultiHash.from_data` function should
be able to deal with. For variable-length checksums, a desired
checksum length should also be provided. Their format is
<algorithm's name>:<variable-length> e.g: blake2:512
recompute_checksums (bool)
a boolean to notify that we also want to recompute potential existing
hashes specified in compute_checksums. Default to False.
"""
DEFAULT_CONFIG = {
# The storage to read from or update metadata to
'storage': ('dict', {
'cls': 'remote',
'args': {
'url': 'http://localhost:5002/'
},
}),
# The objstorage to read contents' data from
'objstorage': ('dict', {
'cls': 'pathslicing',
'args': {
'root': '/srv/softwareheritage/objects',
'slicing': '0:2/2:4/4:6',
},
}),
# the set of checksums that should be computed.
# Examples: 'sha1_git', 'blake2b512', 'blake2s256'
'compute_checksums': (
'list[str]', []),
# whether checksums that already exist in the DB should be
# recomputed/updated or left untouched
'recompute_checksums': ('bool', False),
# Number of contents to retrieve blobs at the same time
'batch_size_retrieve_content': ('int', 10),
# Number of contents to update at the same time
'batch_size_update': ('int', 100),
}
CONFIG_BASE_FILENAME = 'indexer/rehash'
def __init__(self) -> None:
self.config = self.parse_config_file()
self.storage = get_storage(**self.config['storage'])
self.objstorage = get_objstorage(**self.config['objstorage'])
self.compute_checksums = self.config['compute_checksums']
self.recompute_checksums = self.config[
'recompute_checksums']
self.batch_size_retrieve_content = self.config[
'batch_size_retrieve_content']
self.batch_size_update = self.config[
'batch_size_update']
self.log = logging.getLogger('swh.indexer.rehash')
if not self.compute_checksums:
raise ValueError('Checksums list should not be empty.')
def _read_content_ids(
self, contents: List[Dict[str, Any]]
) -> Generator[bytes, Any, None]:
"""Read the content identifiers from the contents.
"""
for c in contents:
h = c['sha1']
if isinstance(h, str):
h = hashutil.hash_to_bytes(h)
yield h
def get_new_contents_metadata(
self, all_contents: List[Dict[str, Any]]
) -> Generator[Tuple[Dict[str, Any], List[Any]], Any, None]:
"""Retrieve raw contents and compute new checksums on the
contents. Unknown or corrupted contents are skipped.
Args:
all_contents: List of contents as dictionary with
the necessary primary keys
Yields:
tuple: tuple of (content to update, list of checksums computed)
"""
content_ids = self._read_content_ids(all_contents)
for contents in utils.grouper(content_ids,
self.batch_size_retrieve_content):
contents_iter = itertools.tee(contents, 2)
try:
content_metadata = self.storage.content_get_metadata(
[s for s in contents_iter[0]])
except Exception:
self.log.exception(
'Problem when reading contents metadata.')
continue
for content in content_metadata:
# Recompute checksums provided in compute_checksums options
if self.recompute_checksums:
checksums_to_compute = list(self.compute_checksums)
else:
# Compute checksums provided in compute_checksums
# options not already defined for that content
checksums_to_compute = [h for h in self.compute_checksums
if not content.get(h)]
if not checksums_to_compute: # Nothing to recompute
continue
try:
raw_content = self.objstorage.get(content['sha1'])
except ObjNotFoundError:
self.log.warning('Content %s not found in objstorage!' %
content['sha1'])
continue
content_hashes = hashutil.MultiHash.from_data(
raw_content, hash_names=checksums_to_compute).digest()
content.update(content_hashes)
yield content, checksums_to_compute
- def run(self, contents: List[Dict[str, Any]]) -> None:
+ def run(self, contents: List[Dict[str, Any]]) -> Dict:
"""Given a list of content:
- (re)compute a given set of checksums on contents available in our
object storage
- update those contents with the new metadata
- Args:
- contents: contents as dictionary with necessary keys.
- key present in such dictionary should be the ones defined in
- the 'primary_key' option.
+ Args:
+ contents: contents as dictionary with necessary keys.
+ key present in such dictionary should be the ones defined in
+ the 'primary_key' option.
+
+ Returns:
+ A summary dict with key 'status', task' status and 'count' the
+ number of updated contents.
"""
+ status = 'uneventful'
+ count = 0
for data in utils.grouper(
self.get_new_contents_metadata(contents),
self.batch_size_update):
groups: Dict[str, List[Any]] = defaultdict(list)
for content, keys_to_update in data:
keys = ','.join(keys_to_update)
groups[keys].append(content)
for keys_to_update, contents in groups.items():
keys = keys_to_update.split(',')
try:
self.storage.content_update(contents,
keys=keys)
+ count += len(contents)
+ status = 'eventful'
except Exception:
self.log.exception('Problem during update.')
continue
+
+ return {
+ 'status': status,
+ 'count': count,
+ }
diff --git a/swh/indexer/tasks.py b/swh/indexer/tasks.py
index dc47146..0e4331f 100644
--- a/swh/indexer/tasks.py
+++ b/swh/indexer/tasks.py
@@ -1,57 +1,50 @@
-# Copyright (C) 2016-2019 The Software Heritage developers
+# Copyright (C) 2016-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from celery import current_app as app
from .mimetype import MimetypeIndexer, MimetypeRangeIndexer
from .ctags import CtagsIndexer
from .fossology_license import (
FossologyLicenseIndexer, FossologyLicenseRangeIndexer
)
from .rehash import RecomputeChecksums
from .metadata import OriginMetadataIndexer
@app.task(name=__name__ + '.OriginMetadata')
def origin_metadata(*args, **kwargs):
- results = OriginMetadataIndexer().run(*args, **kwargs)
- return getattr(results, 'results', results)
+ return OriginMetadataIndexer().run(*args, **kwargs)
@app.task(name=__name__ + '.Ctags')
def ctags(*args, **kwargs):
- results = CtagsIndexer().run(*args, **kwargs)
- return getattr(results, 'results', results)
+ return CtagsIndexer().run(*args, **kwargs)
@app.task(name=__name__ + '.ContentFossologyLicense')
def fossology_license(*args, **kwargs):
- results = FossologyLicenseIndexer().run(*args, **kwargs)
- return getattr(results, 'results', results)
+ return FossologyLicenseIndexer().run(*args, **kwargs)
@app.task(name=__name__ + '.RecomputeChecksums')
def recompute_checksums(*args, **kwargs):
- results = RecomputeChecksums().run(*args, **kwargs)
- return getattr(results, 'results', results)
+ return RecomputeChecksums().run(*args, **kwargs)
@app.task(name=__name__ + '.ContentMimetype')
def mimetype(*args, **kwargs):
- results = MimetypeIndexer().run(*args, **kwargs)
- return {'status': 'eventful' if results else 'uneventful'}
+ return MimetypeIndexer().run(*args, **kwargs)
@app.task(name=__name__ + '.ContentRangeMimetype')
def range_mimetype(*args, **kwargs):
- results = MimetypeRangeIndexer().run(*args, **kwargs)
- return {'status': 'eventful' if results else 'uneventful'}
+ return MimetypeRangeIndexer().run(*args, **kwargs)
@app.task(name=__name__ + '.ContentRangeFossologyLicense')
def range_license(*args, **kwargs):
- results = FossologyLicenseRangeIndexer().run(*args, **kwargs)
- return {'status': 'eventful' if results else 'uneventful'}
+ return FossologyLicenseRangeIndexer().run(*args, **kwargs)
diff --git a/swh/indexer/tests/test_fossology_license.py b/swh/indexer/tests/test_fossology_license.py
index 8b15b90..90bfa40 100644
--- a/swh/indexer/tests/test_fossology_license.py
+++ b/swh/indexer/tests/test_fossology_license.py
@@ -1,181 +1,181 @@
# Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import unittest
import pytest
from unittest.mock import patch
from typing import Any, Dict
from swh.indexer import fossology_license
from swh.indexer.fossology_license import (
FossologyLicenseIndexer, FossologyLicenseRangeIndexer,
compute_license
)
from swh.indexer.tests.utils import (
SHA1_TO_LICENSES, CommonContentIndexerTest, CommonContentIndexerRangeTest,
BASE_TEST_CONFIG, fill_storage, fill_obj_storage, filter_dict,
)
class BasicTest(unittest.TestCase):
@patch("swh.indexer.fossology_license.subprocess")
def test_compute_license(self, mock_subprocess):
"""Computing licenses from a raw content should return results
"""
for path, intermediary_result, output in [
(b'some/path', None,
[]),
(b'some/path/2', [],
[]),
(b'other/path', ' contains license(s) GPL,AGPL',
['GPL', 'AGPL'])]:
mock_subprocess.check_output.return_value = intermediary_result
- actual_result = compute_license(path, log=None)
+ actual_result = compute_license(path)
self.assertEqual(actual_result, {
'licenses': output,
'path': path,
})
-def mock_compute_license(path, log=None):
+def mock_compute_license(path):
"""path is the content identifier
"""
if isinstance(id, bytes):
path = path.decode('utf-8')
# path is something like /tmp/tmpXXX/<sha1> so we keep only the sha1 part
path = path.split('/')[-1]
return {
'licenses': SHA1_TO_LICENSES.get(path)
}
CONFIG = {
**BASE_TEST_CONFIG,
'workdir': '/tmp',
'tools': {
'name': 'nomos',
'version': '3.1.0rc2-31-ga2cbb8c',
'configuration': {
'command_line': 'nomossa <filepath>',
},
},
} # type: Dict[str, Any]
RANGE_CONFIG = dict(list(CONFIG.items()) + [('write_batch_size', 100)])
class TestFossologyLicenseIndexer(CommonContentIndexerTest, unittest.TestCase):
"""Language indexer test scenarios:
- Known sha1s in the input list have their data indexed
- Unknown sha1 in the input list are not indexed
"""
def get_indexer_results(self, ids):
yield from self.idx_storage.content_fossology_license_get(ids)
def setUp(self):
super().setUp()
# replace actual license computation with a mock
self.orig_compute_license = fossology_license.compute_license
fossology_license.compute_license = mock_compute_license
self.indexer = FossologyLicenseIndexer(CONFIG)
self.indexer.catch_exceptions = False
self.idx_storage = self.indexer.idx_storage
fill_storage(self.indexer.storage)
fill_obj_storage(self.indexer.objstorage)
self.id0 = '01c9379dfc33803963d07c1ccc748d3fe4c96bb5'
self.id1 = '688a5ef812c53907562fe379d4b3851e69c7cb15'
self.id2 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709' # empty content
tool = {k.replace('tool_', ''): v
for (k, v) in self.indexer.tool.items()}
# then
self.expected_results = {
self.id0: {
'tool': tool,
'licenses': SHA1_TO_LICENSES[self.id0],
},
self.id1: {
'tool': tool,
'licenses': SHA1_TO_LICENSES[self.id1],
},
self.id2: {
'tool': tool,
'licenses': SHA1_TO_LICENSES[self.id2],
}
}
def tearDown(self):
super().tearDown()
fossology_license.compute_license = self.orig_compute_license
class TestFossologyLicenseRangeIndexer(
CommonContentIndexerRangeTest, unittest.TestCase):
"""Range Fossology License Indexer tests.
- new data within range are indexed
- no data outside a range are indexed
- with filtering existing indexed data prior to compute new index
- without filtering existing indexed data prior to compute new index
"""
def setUp(self):
super().setUp()
# replace actual license computation with a mock
self.orig_compute_license = fossology_license.compute_license
fossology_license.compute_license = mock_compute_license
self.indexer = FossologyLicenseRangeIndexer(config=RANGE_CONFIG)
self.indexer.catch_exceptions = False
fill_storage(self.indexer.storage)
fill_obj_storage(self.indexer.objstorage)
self.id0 = '01c9379dfc33803963d07c1ccc748d3fe4c96bb5'
self.id1 = '02fb2c89e14f7fab46701478c83779c7beb7b069'
self.id2 = '103bc087db1d26afc3a0283f38663d081e9b01e6'
tool_id = self.indexer.tool['id']
self.expected_results = {
self.id0: {
'id': self.id0,
'indexer_configuration_id': tool_id,
'licenses': SHA1_TO_LICENSES[self.id0]
},
self.id1: {
'id': self.id1,
'indexer_configuration_id': tool_id,
'licenses': SHA1_TO_LICENSES[self.id1]
},
self.id2: {
'id': self.id2,
'indexer_configuration_id': tool_id,
'licenses': SHA1_TO_LICENSES[self.id2]
}
}
def tearDown(self):
super().tearDown()
fossology_license.compute_license = self.orig_compute_license
def test_fossology_w_no_tool():
with pytest.raises(ValueError):
FossologyLicenseIndexer(config=filter_dict(CONFIG, 'tools'))
def test_fossology_range_w_no_tool():
with pytest.raises(ValueError):
FossologyLicenseRangeIndexer(config=filter_dict(RANGE_CONFIG, 'tools'))
diff --git a/swh/indexer/tests/utils.py b/swh/indexer/tests/utils.py
index fee5c52..084460d 100644
--- a/swh/indexer/tests/utils.py
+++ b/swh/indexer/tests/utils.py
@@ -1,766 +1,765 @@
# Copyright (C) 2017-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import abc
import datetime
import functools
import random
from typing import Dict, Any
import unittest
from hypothesis import strategies
from swh.model import hashutil
from swh.model.hashutil import hash_to_bytes, hash_to_hex
from swh.indexer.storage import INDEXER_CFG_KEY
BASE_TEST_CONFIG: Dict[str, Dict[str, Any]] = {
'storage': {
'cls': 'pipeline',
'steps': [
{'cls': 'validate'},
{'cls': 'memory'},
]
},
'objstorage': {
'cls': 'memory',
'args': {
},
},
INDEXER_CFG_KEY: {
'cls': 'memory',
'args': {
},
},
}
ORIGIN_VISITS = [
{
'type': 'git',
'url': 'https://github.com/SoftwareHeritage/swh-storage'},
{
'type': 'ftp',
'url': 'rsync://ftp.gnu.org/gnu/3dldf'},
{
'type': 'deposit',
'url': 'https://forge.softwareheritage.org/source/jesuisgpl/'},
{
'type': 'pypi',
'url': 'https://pypi.org/project/limnoria/'},
{
'type': 'svn',
'url': 'http://0-512-md.googlecode.com/svn/'},
{
'type': 'git',
'url': 'https://github.com/librariesio/yarn-parser'},
{
'type': 'git',
'url': 'https://github.com/librariesio/yarn-parser.git'},
]
SNAPSHOTS = [
{
'origin': 'https://github.com/SoftwareHeritage/swh-storage',
'branches': {
b'refs/heads/add-revision-origin-cache': {
'target': b'L[\xce\x1c\x88\x8eF\t\xf1"\x19\x1e\xfb\xc0'
b's\xe7/\xe9l\x1e',
'target_type': 'revision'},
b'refs/head/master': {
'target': b'8K\x12\x00d\x03\xcc\xe4]bS\xe3\x8f{\xd7}'
b'\xac\xefrm',
'target_type': 'revision'},
b'HEAD': {
'target': b'refs/head/master',
'target_type': 'alias'},
b'refs/tags/v0.0.103': {
'target': b'\xb6"Im{\xfdLb\xb0\x94N\xea\x96m\x13x\x88+'
b'\x0f\xdd',
'target_type': 'release'},
}},
{
'origin': 'rsync://ftp.gnu.org/gnu/3dldf',
'branches': {
b'3DLDF-1.1.4.tar.gz': {
'target': b'dJ\xfb\x1c\x91\xf4\x82B%]6\xa2\x90|\xd3\xfc'
b'"G\x99\x11',
'target_type': 'revision'},
b'3DLDF-2.0.2.tar.gz': {
'target': b'\xb6\x0e\xe7\x9e9\xac\xaa\x19\x9e='
b'\xd1\xc5\x00\\\xc6\xfc\xe0\xa6\xb4V',
'target_type': 'revision'},
b'3DLDF-2.0.3-examples.tar.gz': {
'target': b'!H\x19\xc0\xee\x82-\x12F1\xbd\x97'
b'\xfe\xadZ\x80\x80\xc1\x83\xff',
'target_type': 'revision'},
b'3DLDF-2.0.3.tar.gz': {
'target': b'\x8e\xa9\x8e/\xea}\x9feF\xf4\x9f\xfd\xee'
b'\xcc\x1a\xb4`\x8c\x8by',
'target_type': 'revision'},
b'3DLDF-2.0.tar.gz': {
'target': b'F6*\xff(?\x19a\xef\xb6\xc2\x1fv$S\xe3G'
b'\xd3\xd1m',
'target_type': 'revision'}
}},
{
'origin': 'https://forge.softwareheritage.org/source/jesuisgpl/',
'branches': {
b'master': {
'target': b'\xe7n\xa4\x9c\x9f\xfb\xb7\xf76\x11\x08{'
b'\xa6\xe9\x99\xb1\x9e]q\xeb',
'target_type': 'revision'}
},
'id': b"h\xc0\xd2a\x04\xd4~'\x8d\xd6\xbe\x07\xeda\xfa\xfbV"
b"\x1d\r "},
{
'origin': 'https://pypi.org/project/limnoria/',
'branches': {
b'HEAD': {
'target': b'releases/2018.09.09',
'target_type': 'alias'},
b'releases/2018.09.01': {
'target': b'<\xee1(\xe8\x8d_\xc1\xc9\xa6rT\xf1\x1d'
b'\xbb\xdfF\xfdw\xcf',
'target_type': 'revision'},
b'releases/2018.09.09': {
'target': b'\x83\xb9\xb6\xc7\x05\xb1%\xd0\xfem\xd8k'
b'A\x10\x9d\xc5\xfa2\xf8t',
'target_type': 'revision'}},
'id': b'{\xda\x8e\x84\x7fX\xff\x92\x80^\x93V\x18\xa3\xfay'
b'\x12\x9e\xd6\xb3'},
{
'origin': 'http://0-512-md.googlecode.com/svn/',
'branches': {
b'master': {
'target': b'\xe4?r\xe1,\x88\xab\xec\xe7\x9a\x87\xb8'
b'\xc9\xad#.\x1bw=\x18',
'target_type': 'revision'}},
'id': b'\xa1\xa2\x8c\n\xb3\x87\xa8\xf9\xe0a\x8c\xb7'
b'\x05\xea\xb8\x1f\xc4H\xf4s'},
{
'origin': 'https://github.com/librariesio/yarn-parser',
'branches': {
b'HEAD': {
'target': hash_to_bytes(
'8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f'),
'target_type': 'revision'}}},
{
'origin': 'https://github.com/librariesio/yarn-parser.git',
'branches': {
b'HEAD': {
'target': hash_to_bytes(
'8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f'),
'target_type': 'revision'}}},
]
REVISIONS = [{
'id': hash_to_bytes('8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f'),
'message': 'Improve search functionality',
'author': {
'name': b'Andrew Nesbitt',
'fullname': b'Andrew Nesbitt <andrewnez@gmail.com>',
'email': b'andrewnez@gmail.com'
},
'committer': {
'name': b'Andrew Nesbitt',
'fullname': b'Andrew Nesbitt <andrewnez@gmail.com>',
'email': b'andrewnez@gmail.com'
},
'committer_date': {
'negative_utc': None,
'offset': 120,
'timestamp': {
'microseconds': 0,
'seconds': 1380883849
}
},
'type': 'git',
'synthetic': False,
'date': {
'negative_utc': False,
'timestamp': {
'seconds': 1487596456,
'microseconds': 0
},
'offset': 0
},
'directory': b'10'
}]
DIRECTORY_ID = b'10'
DIRECTORY_ENTRIES = [{
'name': b'index.js',
'type': 'file',
'target': b'abc',
'perms': 33188,
},
{
'name': b'package.json',
'type': 'file',
'target': b'cde',
'perms': 33188,
},
{
'name': b'.github',
'type': 'dir',
'target': b'11',
'perms': 16384,
}
]
SHA1_TO_LICENSES = {
'01c9379dfc33803963d07c1ccc748d3fe4c96bb5': ['GPL'],
'02fb2c89e14f7fab46701478c83779c7beb7b069': ['Apache2.0'],
'103bc087db1d26afc3a0283f38663d081e9b01e6': ['MIT'],
'688a5ef812c53907562fe379d4b3851e69c7cb15': ['AGPL'],
'da39a3ee5e6b4b0d3255bfef95601890afd80709': [],
}
SHA1_TO_CTAGS = {
'01c9379dfc33803963d07c1ccc748d3fe4c96bb5': [{
'name': 'foo',
'kind': 'str',
'line': 10,
'lang': 'bar',
}],
'd4c647f0fc257591cc9ba1722484229780d1c607': [{
'name': 'let',
'kind': 'int',
'line': 100,
'lang': 'haskell',
}],
'688a5ef812c53907562fe379d4b3851e69c7cb15': [{
'name': 'symbol',
'kind': 'float',
'line': 99,
'lang': 'python',
}],
}
OBJ_STORAGE_DATA = {
'01c9379dfc33803963d07c1ccc748d3fe4c96bb5': b'this is some text',
'688a5ef812c53907562fe379d4b3851e69c7cb15': b'another text',
'8986af901dd2043044ce8f0d8fc039153641cf17': b'yet another text',
'02fb2c89e14f7fab46701478c83779c7beb7b069': b"""
import unittest
import logging
from swh.indexer.mimetype import MimetypeIndexer
from swh.indexer.tests.test_utils import MockObjStorage
class MockStorage():
def content_mimetype_add(self, mimetypes):
self.state = mimetypes
self.conflict_update = conflict_update
def indexer_configuration_add(self, tools):
return [{
'id': 10,
}]
""",
'103bc087db1d26afc3a0283f38663d081e9b01e6': b"""
#ifndef __AVL__
#define __AVL__
typedef struct _avl_tree avl_tree;
typedef struct _data_t {
int content;
} data_t;
""",
'93666f74f1cf635c8c8ac118879da6ec5623c410': b"""
(should 'pygments (recognize 'lisp 'easily))
""",
'26a9f72a7c87cc9205725cfd879f514ff4f3d8d5': b"""
{
"name": "test_metadata",
"version": "0.0.1",
"description": "Simple package.json test for indexer",
"repository": {
"type": "git",
"url": "https://github.com/moranegg/metadata_test"
}
}
""",
'd4c647f0fc257591cc9ba1722484229780d1c607': b"""
{
"version": "5.0.3",
"name": "npm",
"description": "a package manager for JavaScript",
"keywords": [
"install",
"modules",
"package manager",
"package.json"
],
"preferGlobal": true,
"config": {
"publishtest": false
},
"homepage": "https://docs.npmjs.com/",
"author": "Isaac Z. Schlueter <i@izs.me> (http://blog.izs.me)",
"repository": {
"type": "git",
"url": "https://github.com/npm/npm"
},
"bugs": {
"url": "https://github.com/npm/npm/issues"
},
"dependencies": {
"JSONStream": "~1.3.1",
"abbrev": "~1.1.0",
"ansi-regex": "~2.1.1",
"ansicolors": "~0.3.2",
"ansistyles": "~0.1.3"
},
"devDependencies": {
"tacks": "~1.2.6",
"tap": "~10.3.2"
},
"license": "Artistic-2.0"
}
""",
'a7ab314d8a11d2c93e3dcf528ca294e7b431c449': b"""
""",
'da39a3ee5e6b4b0d3255bfef95601890afd80709': b'',
# 626364
hash_to_hex(b'bcd'): b'unimportant content for bcd',
# 636465
hash_to_hex(b'cde'): b"""
{
"name": "yarn-parser",
"version": "1.0.0",
"description": "Tiny web service for parsing yarn.lock files",
"main": "index.js",
"scripts": {
"start": "node index.js",
"test": "mocha"
},
"engines": {
"node": "9.8.0"
},
"repository": {
"type": "git",
"url": "git+https://github.com/librariesio/yarn-parser.git"
},
"keywords": [
"yarn",
"parse",
"lock",
"dependencies"
],
"author": "Andrew Nesbitt",
"license": "AGPL-3.0",
"bugs": {
"url": "https://github.com/librariesio/yarn-parser/issues"
},
"homepage": "https://github.com/librariesio/yarn-parser#readme",
"dependencies": {
"@yarnpkg/lockfile": "^1.0.0",
"body-parser": "^1.15.2",
"express": "^4.14.0"
},
"devDependencies": {
"chai": "^4.1.2",
"mocha": "^5.2.0",
"request": "^2.87.0",
"test": "^0.6.0"
}
}
"""
}
YARN_PARSER_METADATA = {
'@context': 'https://doi.org/10.5063/schema/codemeta-2.0',
'url':
'https://github.com/librariesio/yarn-parser#readme',
'codeRepository':
'git+git+https://github.com/librariesio/yarn-parser.git',
'author': [{
'type': 'Person',
'name': 'Andrew Nesbitt'
}],
'license': 'https://spdx.org/licenses/AGPL-3.0',
'version': '1.0.0',
'description':
"Tiny web service for parsing yarn.lock files",
'issueTracker':
'https://github.com/librariesio/yarn-parser/issues',
'name': 'yarn-parser',
'keywords': ['yarn', 'parse', 'lock', 'dependencies'],
'type': 'SoftwareSourceCode',
}
json_dict_keys = strategies.one_of(
strategies.characters(),
strategies.just('type'),
strategies.just('url'),
strategies.just('name'),
strategies.just('email'),
strategies.just('@id'),
strategies.just('@context'),
strategies.just('repository'),
strategies.just('license'),
strategies.just('repositories'),
strategies.just('licenses'),
)
"""Hypothesis strategy that generates strings, with an emphasis on those
that are often used as dictionary keys in metadata files."""
generic_json_document = strategies.recursive(
strategies.none() | strategies.booleans() | strategies.floats() |
strategies.characters(),
lambda children: (
strategies.lists(children, min_size=1) |
strategies.dictionaries(json_dict_keys, children, min_size=1)
)
)
"""Hypothesis strategy that generates possible values for values of JSON
metadata files."""
def json_document_strategy(keys=None):
"""Generates an hypothesis strategy that generates metadata files
for a JSON-based format that uses the given keys."""
if keys is None:
keys = strategies.characters()
else:
keys = strategies.one_of(map(strategies.just, keys))
return strategies.dictionaries(keys, generic_json_document, min_size=1)
def _tree_to_xml(root, xmlns, data):
def encode(s):
"Skips unpaired surrogates generated by json_document_strategy"
return s.encode('utf8', 'replace')
def to_xml(data, indent=b' '):
if data is None:
return b''
elif isinstance(data, (bool, str, int, float)):
return indent + encode(str(data))
elif isinstance(data, list):
return b'\n'.join(to_xml(v, indent=indent) for v in data)
elif isinstance(data, dict):
lines = []
for (key, value) in data.items():
lines.append(indent + encode('<{}>'.format(key)))
lines.append(to_xml(value, indent=indent+b' '))
lines.append(indent + encode('</{}>'.format(key)))
return b'\n'.join(lines)
else:
raise TypeError(data)
return b'\n'.join([
'<{} xmlns="{}">'.format(root, xmlns).encode(),
to_xml(data),
'</{}>'.format(root).encode(),
])
class TreeToXmlTest(unittest.TestCase):
def test_leaves(self):
self.assertEqual(
_tree_to_xml('root', 'http://example.com', None),
b'<root xmlns="http://example.com">\n\n</root>'
)
self.assertEqual(
_tree_to_xml('root', 'http://example.com', True),
b'<root xmlns="http://example.com">\n True\n</root>'
)
self.assertEqual(
_tree_to_xml('root', 'http://example.com', 'abc'),
b'<root xmlns="http://example.com">\n abc\n</root>'
)
self.assertEqual(
_tree_to_xml('root', 'http://example.com', 42),
b'<root xmlns="http://example.com">\n 42\n</root>'
)
self.assertEqual(
_tree_to_xml('root', 'http://example.com', 3.14),
b'<root xmlns="http://example.com">\n 3.14\n</root>'
)
def test_dict(self):
self.assertIn(
_tree_to_xml('root', 'http://example.com', {
'foo': 'bar',
'baz': 'qux'
}),
[
b'<root xmlns="http://example.com">\n'
b' <foo>\n bar\n </foo>\n'
b' <baz>\n qux\n </baz>\n'
b'</root>',
b'<root xmlns="http://example.com">\n'
b' <baz>\n qux\n </baz>\n'
b' <foo>\n bar\n </foo>\n'
b'</root>'
]
)
def test_list(self):
self.assertEqual(
_tree_to_xml('root', 'http://example.com', [
{'foo': 'bar'},
{'foo': 'baz'},
]),
b'<root xmlns="http://example.com">\n'
b' <foo>\n bar\n </foo>\n'
b' <foo>\n baz\n </foo>\n'
b'</root>'
)
def xml_document_strategy(keys, root, xmlns):
"""Generates an hypothesis strategy that generates metadata files
for an XML format that uses the given keys."""
return strategies.builds(
functools.partial(_tree_to_xml, root, xmlns),
json_document_strategy(keys))
def filter_dict(d, keys):
'return a copy of the dict with keys deleted'
if not isinstance(keys, (list, tuple)):
keys = (keys, )
return dict((k, v) for (k, v) in d.items() if k not in keys)
def fill_obj_storage(obj_storage):
"""Add some content in an object storage."""
for (obj_id, content) in OBJ_STORAGE_DATA.items():
obj_storage.add(content, obj_id=hash_to_bytes(obj_id))
def fill_storage(storage):
visit_types = {}
for visit in ORIGIN_VISITS:
storage.origin_add_one({'url': visit['url']})
visit_types[visit['url']] = visit['type']
for snap in SNAPSHOTS:
origin_url = snap['origin']
visit = storage.origin_visit_add(
origin=origin_url,
date=datetime.datetime.now(),
type=visit_types[origin_url])
snap_id = snap.get('id') or \
bytes([random.randint(0, 255) for _ in range(32)])
storage.snapshot_add([{
'id': snap_id,
'branches': snap['branches']
}])
storage.origin_visit_update(
origin_url, visit['visit'], status='full', snapshot=snap_id)
storage.revision_add(REVISIONS)
contents = []
for (obj_id, content) in OBJ_STORAGE_DATA.items():
content_hashes = hashutil.MultiHash.from_data(content).digest()
contents.append({
'data': content,
'length': len(content),
'status': 'visible',
'sha1': hash_to_bytes(obj_id),
'sha1_git': hash_to_bytes(obj_id),
'sha256': content_hashes['sha256'],
'blake2s256': content_hashes['blake2s256']
})
storage.content_add(contents)
storage.directory_add([{
'id': DIRECTORY_ID,
'entries': DIRECTORY_ENTRIES,
}])
class CommonContentIndexerTest(metaclass=abc.ABCMeta):
legacy_get_format = False
"""True if and only if the tested indexer uses the legacy format.
see: https://forge.softwareheritage.org/T1433
"""
def get_indexer_results(self, ids):
"""Override this for indexers that don't have a mock storage."""
return self.indexer.idx_storage.state
def assert_legacy_results_ok(self, sha1s, expected_results=None):
# XXX old format, remove this when all endpoints are
# updated to the new one
# see: https://forge.softwareheritage.org/T1433
sha1s = [sha1 if isinstance(sha1, bytes) else hash_to_bytes(sha1)
for sha1 in sha1s]
actual_results = list(self.get_indexer_results(sha1s))
if expected_results is None:
expected_results = self.expected_results
self.assertEqual(len(expected_results), len(actual_results),
(expected_results, actual_results))
for indexed_data in actual_results:
_id = indexed_data['id']
expected_data = expected_results[hashutil.hash_to_hex(_id)].copy()
expected_data['id'] = _id
self.assertEqual(indexed_data, expected_data)
def assert_results_ok(self, sha1s, expected_results=None):
if self.legacy_get_format:
self.assert_legacy_results_ok(sha1s, expected_results)
return
sha1s = [sha1 if isinstance(sha1, bytes) else hash_to_bytes(sha1)
for sha1 in sha1s]
actual_results = list(self.get_indexer_results(sha1s))
if expected_results is None:
expected_results = self.expected_results
self.assertEqual(len(expected_results), len(actual_results),
(expected_results, actual_results))
for indexed_data in actual_results:
(_id, indexed_data) = list(indexed_data.items())[0]
expected_data = expected_results[hashutil.hash_to_hex(_id)].copy()
expected_data = [expected_data]
self.assertEqual(indexed_data, expected_data)
def test_index(self):
"""Known sha1 have their data indexed
"""
sha1s = [self.id0, self.id1, self.id2]
# when
self.indexer.run(sha1s, policy_update='update-dups')
self.assert_results_ok(sha1s)
# 2nd pass
self.indexer.run(sha1s, policy_update='ignore-dups')
self.assert_results_ok(sha1s)
def test_index_one_unknown_sha1(self):
"""Unknown sha1 are not indexed"""
sha1s = [self.id1,
'799a5ef812c53907562fe379d4b3851e69c7cb15', # unknown
'800a5ef812c53907562fe379d4b3851e69c7cb15'] # unknown
# when
self.indexer.run(sha1s, policy_update='update-dups')
# then
expected_results = {
k: v for k, v in self.expected_results.items() if k in sha1s
}
self.assert_results_ok(sha1s, expected_results)
class CommonContentIndexerRangeTest:
"""Allows to factorize tests on range indexer.
"""
def setUp(self):
self.contents = sorted(OBJ_STORAGE_DATA)
def assert_results_ok(self, start, end, actual_results,
expected_results=None):
if expected_results is None:
expected_results = self.expected_results
actual_results = list(actual_results)
for indexed_data in actual_results:
_id = indexed_data['id']
assert isinstance(_id, bytes)
indexed_data = indexed_data.copy()
indexed_data['id'] = hash_to_hex(indexed_data['id'])
self.assertEqual(indexed_data, expected_results[hash_to_hex(_id)])
self.assertTrue(start <= _id <= end)
_tool_id = indexed_data['indexer_configuration_id']
self.assertEqual(_tool_id, self.indexer.tool['id'])
def test__index_contents(self):
"""Indexing contents without existing data results in indexed data
"""
_start, _end = [self.contents[0], self.contents[2]] # output hex ids
start, end = map(hashutil.hash_to_bytes, (_start, _end))
# given
actual_results = list(self.indexer._index_contents(
start, end, indexed={}))
self.assert_results_ok(start, end, actual_results)
def test__index_contents_with_indexed_data(self):
"""Indexing contents with existing data results in less indexed data
"""
_start, _end = [self.contents[0], self.contents[2]] # output hex ids
start, end = map(hashutil.hash_to_bytes, (_start, _end))
data_indexed = [self.id0, self.id2]
# given
actual_results = self.indexer._index_contents(
start, end, indexed=set(map(hash_to_bytes, data_indexed)))
# craft the expected results
expected_results = self.expected_results.copy()
for already_indexed_key in data_indexed:
expected_results.pop(already_indexed_key)
self.assert_results_ok(
start, end, actual_results, expected_results)
def test_generate_content_get(self):
"""Optimal indexing should result in indexed data
"""
_start, _end = [self.contents[0], self.contents[2]] # output hex ids
start, end = map(hashutil.hash_to_bytes, (_start, _end))
# given
actual_results = self.indexer.run(start, end)
# then
self.assertTrue(actual_results)
def test_generate_content_get_input_as_bytes(self):
"""Optimal indexing should result in indexed data
Input are in bytes here.
"""
_start, _end = [self.contents[0], self.contents[2]] # output hex ids
start, end = map(hashutil.hash_to_bytes, (_start, _end))
# given
- actual_results = self.indexer.run( # checks the bytes input this time
+ actual_results = self.indexer.run(
start, end, skip_existing=False)
# no already indexed data so same result as prior test
# then
- self.assertTrue(actual_results)
+ self.assertEquals(actual_results, {'status': 'uneventful'})
def test_generate_content_get_no_result(self):
"""No result indexed returns False"""
_start, _end = ['0000000000000000000000000000000000000000',
'0000000000000000000000000000000000000001']
start, end = map(hashutil.hash_to_bytes, (_start, _end))
# given
- actual_results = self.indexer.run(
- start, end, incremental=False)
+ actual_results = self.indexer.run(start, end, incremental=False)
# then
- self.assertFalse(actual_results)
+ self.assertEquals(actual_results, {'status': 'uneventful'})
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Jul 4 2025, 8:07 AM (10 w, 1 d ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3289143
Attached To
rDCIDX Metadata indexer
Event Timeline
Log In to Comment