Page Menu
Home
Software Heritage
Search
Configure Global Search
Log In
Files
F9342950
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
396 KB
Subscribers
None
View Options
This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/swh/indexer/cli.py b/swh/indexer/cli.py
index bdf7530..c4551e6 100644
--- a/swh/indexer/cli.py
+++ b/swh/indexer/cli.py
@@ -1,315 +1,320 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import Iterator
# WARNING: do not import unnecessary things here to keep cli startup time under
# control
import click
from swh.core.cli import CONTEXT_SETTINGS, AliasedGroup
from swh.core.cli import swh as swh_cli_group
@swh_cli_group.group(
name="indexer", context_settings=CONTEXT_SETTINGS, cls=AliasedGroup
)
@click.option(
"--config-file",
"-C",
default=None,
- type=click.Path(exists=True, dir_okay=False,),
+ type=click.Path(
+ exists=True,
+ dir_okay=False,
+ ),
help="Configuration file.",
)
@click.pass_context
def indexer_cli_group(ctx, config_file):
"""Software Heritage Indexer tools.
The Indexer is used to mine the content of the archive and extract derived
information from archive source code artifacts.
"""
from swh.core import config
ctx.ensure_object(dict)
conf = config.read(config_file)
ctx.obj["config"] = conf
def _get_api(getter, config, config_key, url):
if url:
config[config_key] = {"cls": "remote", "url": url}
elif config_key not in config:
raise click.ClickException("Missing configuration for {}".format(config_key))
return getter(**config[config_key])
@indexer_cli_group.group("mapping")
def mapping():
"""Manage Software Heritage Indexer mappings."""
pass
@mapping.command("list")
def mapping_list():
"""Prints the list of known mappings."""
from swh.indexer import metadata_dictionary
mapping_names = [mapping.name for mapping in metadata_dictionary.MAPPINGS.values()]
mapping_names.sort()
for mapping_name in mapping_names:
click.echo(mapping_name)
@mapping.command("list-terms")
@click.option(
"--exclude-mapping", multiple=True, help="Exclude the given mapping from the output"
)
@click.option(
"--concise",
is_flag=True,
default=False,
help="Don't print the list of mappings supporting each term.",
)
def mapping_list_terms(concise, exclude_mapping):
"""Prints the list of known CodeMeta terms, and which mappings
support them."""
from swh.indexer import metadata_dictionary
properties = metadata_dictionary.list_terms()
for (property_name, supported_mappings) in sorted(properties.items()):
supported_mappings = {m.name for m in supported_mappings}
supported_mappings -= set(exclude_mapping)
if supported_mappings:
if concise:
click.echo(property_name)
else:
click.echo("{}:".format(property_name))
click.echo("\t" + ", ".join(sorted(supported_mappings)))
@mapping.command("translate")
@click.argument("mapping-name")
@click.argument("file", type=click.File("rb"))
def mapping_translate(mapping_name, file):
"""Prints the list of known mappings."""
import json
from swh.indexer import metadata_dictionary
mapping_cls = [
cls for cls in metadata_dictionary.MAPPINGS.values() if cls.name == mapping_name
]
if not mapping_cls:
raise click.ClickException("Unknown mapping {}".format(mapping_name))
assert len(mapping_cls) == 1
mapping_cls = mapping_cls[0]
mapping = mapping_cls()
codemeta_doc = mapping.translate(file.read())
click.echo(json.dumps(codemeta_doc, indent=4))
@indexer_cli_group.group("schedule")
@click.option("--scheduler-url", "-s", default=None, help="URL of the scheduler API")
@click.option(
"--indexer-storage-url", "-i", default=None, help="URL of the indexer storage API"
)
@click.option(
"--storage-url", "-g", default=None, help="URL of the (graph) storage API"
)
@click.option(
"--dry-run/--no-dry-run",
is_flag=True,
default=False,
help="List only what would be scheduled.",
)
@click.pass_context
def schedule(ctx, scheduler_url, storage_url, indexer_storage_url, dry_run):
"""Manipulate Software Heritage Indexer tasks.
Via SWH Scheduler's API."""
from swh.indexer.storage import get_indexer_storage
from swh.scheduler import get_scheduler
from swh.storage import get_storage
ctx.obj["indexer_storage"] = _get_api(
get_indexer_storage, ctx.obj["config"], "indexer_storage", indexer_storage_url
)
ctx.obj["storage"] = _get_api(
get_storage, ctx.obj["config"], "storage", storage_url
)
ctx.obj["scheduler"] = _get_api(
get_scheduler, ctx.obj["config"], "scheduler", scheduler_url
)
if dry_run:
ctx.obj["scheduler"] = None
def list_origins_by_producer(idx_storage, mappings, tool_ids) -> Iterator[str]:
next_page_token = ""
limit = 10000
while next_page_token is not None:
result = idx_storage.origin_intrinsic_metadata_search_by_producer(
page_token=next_page_token,
limit=limit,
ids_only=True,
mappings=mappings or None,
tool_ids=tool_ids or None,
)
next_page_token = result.next_page_token
yield from result.results
@schedule.command("reindex_origin_metadata")
@click.option(
"--batch-size",
"-b",
"origin_batch_size",
default=10,
show_default=True,
type=int,
help="Number of origins per task",
)
@click.option(
"--tool-id",
"-t",
"tool_ids",
type=int,
multiple=True,
help="Restrict search of old metadata to this/these tool ids.",
)
@click.option(
"--mapping",
"-m",
"mappings",
multiple=True,
help="Mapping(s) that should be re-scheduled (eg. 'npm', 'gemspec', 'maven')",
)
@click.option(
"--task-type",
default="index-origin-metadata",
show_default=True,
help="Name of the task type to schedule.",
)
@click.pass_context
def schedule_origin_metadata_reindex(
ctx, origin_batch_size, tool_ids, mappings, task_type
):
"""Schedules indexing tasks for origins that were already indexed."""
from swh.scheduler.cli_utils import schedule_origin_batches
idx_storage = ctx.obj["indexer_storage"]
scheduler = ctx.obj["scheduler"]
origins = list_origins_by_producer(idx_storage, mappings, tool_ids)
kwargs = {"retries_left": 1}
schedule_origin_batches(scheduler, task_type, origins, origin_batch_size, kwargs)
@indexer_cli_group.command("journal-client")
@click.option("--scheduler-url", "-s", default=None, help="URL of the scheduler API")
@click.option(
"--origin-metadata-task-type",
default="index-origin-metadata",
help="Name of the task running the origin metadata indexer.",
)
@click.option(
"--broker", "brokers", type=str, multiple=True, help="Kafka broker to connect to."
)
@click.option(
"--prefix", type=str, default=None, help="Prefix of Kafka topic names to read from."
)
@click.option("--group-id", type=str, help="Consumer/group id for reading from Kafka.")
@click.option(
"--stop-after-objects",
"-m",
default=None,
type=int,
help="Maximum number of objects to replay. Default is to run forever.",
)
@click.pass_context
def journal_client(
ctx,
scheduler_url,
origin_metadata_task_type,
brokers,
prefix,
group_id,
stop_after_objects,
):
"""Listens for new objects from the SWH Journal, and schedules tasks
to run relevant indexers (currently, only origin-intrinsic-metadata)
on these new objects."""
import functools
from swh.indexer.journal_client import process_journal_objects
from swh.journal.client import get_journal_client
from swh.scheduler import get_scheduler
cfg = ctx.obj["config"]
journal_cfg = cfg.get("journal", {})
scheduler = _get_api(get_scheduler, cfg, "scheduler", scheduler_url)
brokers = brokers or journal_cfg.get("brokers")
if not brokers:
raise ValueError("The brokers configuration is mandatory.")
prefix = prefix or journal_cfg.get("prefix")
group_id = group_id or journal_cfg.get("group_id")
origin_metadata_task_type = origin_metadata_task_type or journal_cfg.get(
"origin_metadata_task_type"
)
stop_after_objects = stop_after_objects or journal_cfg.get("stop_after_objects")
client = get_journal_client(
cls="kafka",
brokers=brokers,
prefix=prefix,
group_id=group_id,
object_types=["origin_visit_status"],
stop_after_objects=stop_after_objects,
)
worker_fn = functools.partial(
process_journal_objects,
scheduler=scheduler,
- task_names={"origin_metadata": origin_metadata_task_type,},
+ task_names={
+ "origin_metadata": origin_metadata_task_type,
+ },
)
try:
client.process(worker_fn)
except KeyboardInterrupt:
ctx.exit(0)
else:
print("Done.")
finally:
client.close()
@indexer_cli_group.command("rpc-serve")
@click.argument("config-path", required=True)
@click.option("--host", default="0.0.0.0", help="Host to run the server")
@click.option("--port", default=5007, type=click.INT, help="Binding port of the server")
@click.option(
"--debug/--nodebug",
default=True,
help="Indicates if the server should run in debug mode",
)
def rpc_server(config_path, host, port, debug):
"""Starts a Software Heritage Indexer RPC HTTP server."""
from swh.indexer.storage.api.server import app, load_and_check_config
api_cfg = load_and_check_config(config_path, type="any")
app.config.update(api_cfg)
app.run(host, port=int(port), debug=bool(debug))
def main():
return indexer_cli_group(auto_envvar_prefix="SWH_INDEXER")
if __name__ == "__main__":
main()
diff --git a/swh/indexer/ctags.py b/swh/indexer/ctags.py
index e024477..d56204e 100644
--- a/swh/indexer/ctags.py
+++ b/swh/indexer/ctags.py
@@ -1,147 +1,153 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import json
import subprocess
from typing import Any, Dict, Iterator, List, Optional
from swh.core.config import merge_configs
from swh.indexer.storage import Sha1
from swh.indexer.storage.model import ContentCtagsRow
from swh.model import hashutil
from .indexer import ContentIndexer, write_to_temp
# Options used to compute tags
__FLAGS = [
"--fields=+lnz", # +l: language
# +n: line number of tag definition
# +z: include the symbol's kind (function, variable, ...)
"--sort=no", # sort output on tag name
"--links=no", # do not follow symlinks
"--output-format=json", # outputs in json
]
def compute_language(content, log=None):
raise NotImplementedError(
"Language detection was unreliable, so it is currently disabled. "
"See https://forge.softwareheritage.org/D1455"
)
def run_ctags(path, lang=None, ctags_command="ctags") -> Iterator[Dict[str, Any]]:
"""Run ctags on file path with optional language.
Args:
path: path to the file
lang: language for that path (optional)
Yields:
dict: ctags' output
"""
optional = []
if lang:
optional = ["--language-force=%s" % lang]
cmd = [ctags_command] + __FLAGS + optional + [path]
output = subprocess.check_output(cmd, universal_newlines=True)
for symbol in output.split("\n"):
if not symbol:
continue
js_symbol = json.loads(symbol)
yield {
"name": js_symbol["name"],
"kind": js_symbol["kind"],
"line": js_symbol["line"],
"lang": js_symbol["language"],
}
DEFAULT_CONFIG: Dict[str, Any] = {
"workdir": "/tmp/swh/indexer.ctags",
"tools": {
"name": "universal-ctags",
"version": "~git7859817b",
"configuration": {
"command_line": """ctags --fields=+lnz --sort=no --links=no """
"""--output-format=json <filepath>"""
},
},
"languages": {},
}
class CtagsIndexer(ContentIndexer[ContentCtagsRow]):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = merge_configs(DEFAULT_CONFIG, self.config)
self.working_directory = self.config["workdir"]
self.language_map = self.config["languages"]
def filter(self, ids):
- """Filter out known sha1s and return only missing ones.
-
- """
+ """Filter out known sha1s and return only missing ones."""
yield from self.idx_storage.content_ctags_missing(
- ({"id": sha1, "indexer_configuration_id": self.tool["id"],} for sha1 in ids)
+ (
+ {
+ "id": sha1,
+ "indexer_configuration_id": self.tool["id"],
+ }
+ for sha1 in ids
+ )
)
def index(
self, id: Sha1, data: Optional[bytes] = None, **kwargs
) -> List[ContentCtagsRow]:
"""Index sha1s' content and store result.
Args:
id (bytes): content's identifier
data (bytes): raw content in bytes
Returns:
dict: a dict representing a content_mimetype with keys:
- **id** (bytes): content's identifier (sha1)
- **ctags** ([dict]): ctags list of symbols
"""
assert isinstance(id, bytes)
assert data is not None
lang = compute_language(data, log=self.log)["lang"]
if not lang:
return []
ctags_lang = self.language_map.get(lang)
if not ctags_lang:
return []
ctags = []
filename = hashutil.hash_to_hex(id)
with write_to_temp(
filename=filename, data=data, working_directory=self.working_directory
) as content_path:
for ctag_kwargs in run_ctags(content_path, lang=ctags_lang):
ctags.append(
ContentCtagsRow(
- id=id, indexer_configuration_id=self.tool["id"], **ctag_kwargs,
+ id=id,
+ indexer_configuration_id=self.tool["id"],
+ **ctag_kwargs,
)
)
return ctags
def persist_index_computations(
self, results: List[ContentCtagsRow]
) -> Dict[str, int]:
"""Persist the results in storage.
Args:
results: list of ctags returned by index()
"""
return self.idx_storage.content_ctags_add(results)
diff --git a/swh/indexer/fossology_license.py b/swh/indexer/fossology_license.py
index ab8b8b3..76440a2 100644
--- a/swh/indexer/fossology_license.py
+++ b/swh/indexer/fossology_license.py
@@ -1,184 +1,192 @@
# Copyright (C) 2016-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import logging
import subprocess
from typing import Any, Dict, Iterable, List, Optional
from swh.core.api.classes import stream_results
from swh.core.config import merge_configs
from swh.indexer.storage.interface import IndexerStorageInterface, Sha1
from swh.indexer.storage.model import ContentLicenseRow
from swh.model import hashutil
from .indexer import ContentIndexer, ContentPartitionIndexer, write_to_temp
logger = logging.getLogger(__name__)
def compute_license(path) -> Dict:
"""Determine license from file at path.
Args:
path: filepath to determine the license
Returns:
dict: A dict with the following keys:
- licenses ([str]): associated detected licenses to path
- path (bytes): content filepath
"""
try:
properties = subprocess.check_output(["nomossa", path], universal_newlines=True)
if properties:
res = properties.rstrip().split(" contains license(s) ")
licenses = res[1].split(",")
else:
licenses = []
return {
"licenses": licenses,
"path": path,
}
except subprocess.CalledProcessError:
from os import path as __path
logger.exception(
"Problem during license detection for sha1 %s" % __path.basename(path)
)
return {
"licenses": [],
"path": path,
}
DEFAULT_CONFIG: Dict[str, Any] = {
"workdir": "/tmp/swh/indexer.fossology.license",
"tools": {
"name": "nomos",
"version": "3.1.0rc2-31-ga2cbb8c",
- "configuration": {"command_line": "nomossa <filepath>",},
+ "configuration": {
+ "command_line": "nomossa <filepath>",
+ },
},
"write_batch_size": 1000,
}
class MixinFossologyLicenseIndexer:
"""Mixin fossology license indexer.
See :class:`FossologyLicenseIndexer` and
:class:`FossologyLicensePartitionIndexer`
"""
tool: Any
idx_storage: IndexerStorageInterface
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = merge_configs(DEFAULT_CONFIG, self.config)
self.working_directory = self.config["workdir"]
def index(
self, id: Sha1, data: Optional[bytes] = None, **kwargs
) -> List[ContentLicenseRow]:
"""Index sha1s' content and store result.
Args:
id (bytes): content's identifier
raw_content (bytes): associated raw content to content id
Returns:
dict: A dict, representing a content_license, with keys:
- id (bytes): content's identifier (sha1)
- license (bytes): license in bytes
- path (bytes): path
- indexer_configuration_id (int): tool used to compute the output
"""
assert data is not None
with write_to_temp(
filename=hashutil.hash_to_hex(id), # use the id as pathname
data=data,
working_directory=self.working_directory,
) as content_path:
properties = compute_license(path=content_path)
return [
ContentLicenseRow(
- id=id, indexer_configuration_id=self.tool["id"], license=license,
+ id=id,
+ indexer_configuration_id=self.tool["id"],
+ license=license,
)
for license in properties["licenses"]
]
def persist_index_computations(
self, results: List[ContentLicenseRow]
) -> Dict[str, int]:
"""Persist the results in storage.
Args:
results: list of content_license dict with the
following keys:
- id (bytes): content's identifier (sha1)
- license (bytes): license in bytes
- path (bytes): path
"""
return self.idx_storage.content_fossology_license_add(results)
class FossologyLicenseIndexer(
MixinFossologyLicenseIndexer, ContentIndexer[ContentLicenseRow]
):
"""Indexer in charge of:
- filtering out content already indexed
- reading content from objstorage per the content's id (sha1)
- computing {license, encoding} from that content
- store result in storage
"""
def filter(self, ids):
- """Filter out known sha1s and return only missing ones.
-
- """
+ """Filter out known sha1s and return only missing ones."""
yield from self.idx_storage.content_fossology_license_missing(
- ({"id": sha1, "indexer_configuration_id": self.tool["id"],} for sha1 in ids)
+ (
+ {
+ "id": sha1,
+ "indexer_configuration_id": self.tool["id"],
+ }
+ for sha1 in ids
+ )
)
class FossologyLicensePartitionIndexer(
MixinFossologyLicenseIndexer, ContentPartitionIndexer[ContentLicenseRow]
):
"""FossologyLicense Range Indexer working on range/partition of content identifiers.
- filters out the non textual content
- (optionally) filters out content already indexed (cf
:meth:`.indexed_contents_in_partition`)
- reads content from objstorage per the content's id (sha1)
- computes {mimetype, encoding} from that content
- stores result in storage
"""
def indexed_contents_in_partition(
self, partition_id: int, nb_partitions: int, page_token: Optional[str] = None
) -> Iterable[Sha1]:
"""Retrieve indexed content id within the partition id
Args:
partition_id: Index of the partition to fetch
nb_partitions: Total number of partitions to split into
page_token: opaque token used for pagination
"""
return stream_results(
self.idx_storage.content_fossology_license_get_partition,
self.tool["id"],
partition_id,
nb_partitions,
)
diff --git a/swh/indexer/indexer.py b/swh/indexer/indexer.py
index 85c0d72..58ebce4 100644
--- a/swh/indexer/indexer.py
+++ b/swh/indexer/indexer.py
@@ -1,615 +1,611 @@
# Copyright (C) 2016-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import abc
from contextlib import contextmanager
import logging
import os
import shutil
import tempfile
from typing import (
Any,
Dict,
Generic,
Iterable,
Iterator,
List,
Optional,
Set,
TypeVar,
Union,
)
import warnings
from swh.core import utils
from swh.core.config import load_from_envvar, merge_configs
from swh.indexer.storage import INDEXER_CFG_KEY, Sha1, get_indexer_storage
from swh.indexer.storage.interface import IndexerStorageInterface
from swh.model import hashutil
from swh.model.model import Revision, Sha1Git
from swh.objstorage.exc import ObjNotFoundError
from swh.objstorage.factory import get_objstorage
from swh.scheduler import CONFIG as SWH_CONFIG
from swh.storage import get_storage
from swh.storage.interface import StorageInterface
@contextmanager
def write_to_temp(filename: str, data: bytes, working_directory: str) -> Iterator[str]:
"""Write the sha1's content in a temporary file.
Args:
filename: one of sha1's many filenames
data: the sha1's content to write in temporary
file
working_directory: the directory into which the
file is written
Returns:
The path to the temporary file created. That file is
filled in with the raw content's data.
"""
os.makedirs(working_directory, exist_ok=True)
temp_dir = tempfile.mkdtemp(dir=working_directory)
content_path = os.path.join(temp_dir, filename)
with open(content_path, "wb") as f:
f.write(data)
yield content_path
shutil.rmtree(temp_dir)
DEFAULT_CONFIG = {
INDEXER_CFG_KEY: {"cls": "memory"},
"storage": {"cls": "memory"},
"objstorage": {"cls": "memory"},
}
TId = TypeVar("TId")
"""type of the ids of index()ed objects."""
TData = TypeVar("TData")
"""type of the objects passed to index()."""
TResult = TypeVar("TResult")
"""return type of index()"""
class BaseIndexer(Generic[TId, TData, TResult], metaclass=abc.ABCMeta):
"""Base class for indexers to inherit from.
The main entry point is the :func:`run` function which is in
charge of triggering the computations on the batch dict/ids
received.
Indexers can:
- filter out ids whose data has already been indexed.
- retrieve ids data from storage or objstorage
- index this data depending on the object and store the result in
storage.
To implement a new object type indexer, inherit from the
BaseIndexer and implement indexing:
:meth:`~BaseIndexer.run`:
object_ids are different depending on object. For example: sha1 for
content, sha1_git for revision, directory, release, and id for origin
To implement a new concrete indexer, inherit from the object level
classes: :class:`ContentIndexer`, :class:`RevisionIndexer`,
:class:`OriginIndexer`.
Then you need to implement the following functions:
:meth:`~BaseIndexer.filter`:
filter out data already indexed (in storage).
:meth:`~BaseIndexer.index_object`:
compute index on id with data (retrieved from the storage or the
objstorage by the id key) and return the resulting index computation.
:meth:`~BaseIndexer.persist_index_computations`:
persist the results of multiple index computations in the storage.
The new indexer implementation can also override the following functions:
:meth:`~BaseIndexer.prepare`:
Configuration preparation for the indexer. When overriding, this must
call the `super().prepare()` instruction.
:meth:`~BaseIndexer.check`:
Configuration check for the indexer. When overriding, this must call the
`super().check()` instruction.
:meth:`~BaseIndexer.register_tools`:
This should return a dict of the tool(s) to use when indexing or
filtering.
"""
results: List[TResult]
USE_TOOLS = True
catch_exceptions = True
"""Prevents exceptions in `index()` from raising too high. Set to False
in tests to properly catch all exceptions."""
scheduler: Any
storage: StorageInterface
objstorage: Any
idx_storage: IndexerStorageInterface
def __init__(self, config=None, **kw) -> None:
- """Prepare and check that the indexer is ready to run.
-
- """
+ """Prepare and check that the indexer is ready to run."""
super().__init__()
if config is not None:
self.config = config
elif SWH_CONFIG:
self.config = SWH_CONFIG.copy()
else:
self.config = load_from_envvar()
self.config = merge_configs(DEFAULT_CONFIG, self.config)
self.prepare()
self.check()
self.log.debug("%s: config=%s", self, self.config)
def prepare(self) -> None:
"""Prepare the indexer's needed runtime configuration.
- Without this step, the indexer cannot possibly run.
+ Without this step, the indexer cannot possibly run.
"""
config_storage = self.config.get("storage")
if config_storage:
self.storage = get_storage(**config_storage)
self.objstorage = get_objstorage(**self.config["objstorage"])
idx_storage = self.config[INDEXER_CFG_KEY]
self.idx_storage = get_indexer_storage(**idx_storage)
_log = logging.getLogger("requests.packages.urllib3.connectionpool")
_log.setLevel(logging.WARN)
self.log = logging.getLogger("swh.indexer")
if self.USE_TOOLS:
self.tools = list(self.register_tools(self.config.get("tools", [])))
self.results = []
@property
def tool(self) -> Dict:
return self.tools[0]
def check(self) -> None:
"""Check the indexer's configuration is ok before proceeding.
- If ok, does nothing. If not raise error.
+ If ok, does nothing. If not raise error.
"""
if self.USE_TOOLS and not self.tools:
raise ValueError("Tools %s is unknown, cannot continue" % self.tools)
def _prepare_tool(self, tool: Dict[str, Any]) -> Dict[str, Any]:
- """Prepare the tool dict to be compliant with the storage api.
-
- """
+ """Prepare the tool dict to be compliant with the storage api."""
return {"tool_%s" % key: value for key, value in tool.items()}
def register_tools(
self, tools: Union[Dict[str, Any], List[Dict[str, Any]]]
) -> List[Dict[str, Any]]:
"""Permit to register tools to the storage.
Add a sensible default which can be overridden if not
sufficient. (For now, all indexers use only one tool)
Expects the self.config['tools'] property to be set with
one or more tools.
Args:
tools: Either a dict or a list of dict.
Returns:
list: List of dicts with additional id key.
Raises:
ValueError: if not a list nor a dict.
"""
if isinstance(tools, list):
tools = list(map(self._prepare_tool, tools))
elif isinstance(tools, dict):
tools = [self._prepare_tool(tools)]
else:
raise ValueError("Configuration tool(s) must be a dict or list!")
if tools:
return self.idx_storage.indexer_configuration_add(tools)
else:
return []
def index(self, id: TId, data: Optional[TData], **kwargs) -> List[TResult]:
"""Index computation for the id and associated raw data.
Args:
id: identifier or Dict object
data: id's data from storage or objstorage depending on
object type
Returns:
dict: a dict that makes sense for the
:meth:`.persist_index_computations` method.
"""
raise NotImplementedError()
def filter(self, ids: List[TId]) -> Iterator[TId]:
"""Filter missing ids for that particular indexer.
Args:
ids: list of ids
Yields:
iterator of missing ids
"""
yield from ids
@abc.abstractmethod
def persist_index_computations(self, results: List[TResult]) -> Dict[str, int]:
"""Persist the computation resulting from the index.
Args:
results: List of results. One result is the
result of the index function.
Returns:
a summary dict of what has been inserted in the storage
"""
return {}
class ContentIndexer(BaseIndexer[Sha1, bytes, TResult], Generic[TResult]):
"""A content indexer working on a list of ids directly.
To work on indexer partition, use the :class:`ContentPartitionIndexer`
instead.
Note: :class:`ContentIndexer` is not an instantiable object. To
use it, one should inherit from this class and override the
methods mentioned in the :class:`BaseIndexer` class.
"""
def run(self, ids: List[Sha1], **kwargs) -> Dict:
"""Given a list of ids:
- retrieve the content from the storage
- execute the indexing computations
- store the results
Args:
ids (Iterable[Union[bytes, str]]): sha1's identifier list
**kwargs: passed to the `index` method
Returns:
A summary Dict of the task's status
"""
if "policy_update" in kwargs:
warnings.warn(
"'policy_update' argument is deprecated and ignored.",
DeprecationWarning,
)
del kwargs["policy_update"]
sha1s = [
hashutil.hash_to_bytes(id_) if isinstance(id_, str) else id_ for id_ in ids
]
results = []
summary: Dict = {"status": "uneventful"}
try:
for sha1 in sha1s:
try:
raw_content = self.objstorage.get(sha1)
except ObjNotFoundError:
self.log.warning(
"Content %s not found in objstorage"
% hashutil.hash_to_hex(sha1)
)
continue
res = self.index(sha1, raw_content, **kwargs)
if res: # If no results, skip it
results.extend(res)
summary["status"] = "eventful"
summary = self.persist_index_computations(results)
self.results = results
except Exception:
if not self.catch_exceptions:
raise
self.log.exception("Problem when reading contents metadata.")
summary["status"] = "failed"
return summary
class ContentPartitionIndexer(BaseIndexer[Sha1, bytes, TResult], Generic[TResult]):
"""A content partition indexer.
This expects as input a partition_id and a nb_partitions. This will then index the
contents within that partition.
To work on a list of ids, use the :class:`ContentIndexer` instead.
Note: :class:`ContentPartitionIndexer` is not an instantiable
object. To use it, one should inherit from this class and override
the methods mentioned in the :class:`BaseIndexer` class.
"""
@abc.abstractmethod
def indexed_contents_in_partition(
self, partition_id: int, nb_partitions: int
) -> Iterable[Sha1]:
"""Retrieve indexed contents within range [start, end].
Args:
partition_id: Index of the partition to fetch
nb_partitions: Total number of partitions to split into
page_token: opaque token used for pagination
"""
pass
def _list_contents_to_index(
self, partition_id: int, nb_partitions: int, indexed: Set[Sha1]
) -> Iterable[Sha1]:
"""Compute from storage the new contents to index in the partition_id . The already
indexed contents are skipped.
Args:
partition_id: Index of the partition to fetch data from
nb_partitions: Total number of partition
indexed: Set of content already indexed.
Yields:
Sha1 id (bytes) of contents to index
"""
if not isinstance(partition_id, int) or not isinstance(nb_partitions, int):
raise TypeError(
f"identifiers must be int, not {partition_id!r} and {nb_partitions!r}."
)
next_page_token = None
while True:
result = self.storage.content_get_partition(
partition_id, nb_partitions, page_token=next_page_token
)
contents = result.results
for c in contents:
_id = hashutil.hash_to_bytes(c.sha1)
if _id in indexed:
continue
yield _id
next_page_token = result.next_page_token
if next_page_token is None:
break
def _index_contents(
self, partition_id: int, nb_partitions: int, indexed: Set[Sha1], **kwargs: Any
) -> Iterator[TResult]:
"""Index the contents within the partition_id.
Args:
start: Starting bound from range identifier
end: End range identifier
indexed: Set of content already indexed.
Yields:
indexing result as dict to persist in the indexer backend
"""
for sha1 in self._list_contents_to_index(partition_id, nb_partitions, indexed):
try:
raw_content = self.objstorage.get(sha1)
except ObjNotFoundError:
self.log.warning(f"Content {sha1.hex()} not found in objstorage")
continue
yield from self.index(sha1, raw_content, **kwargs)
def _index_with_skipping_already_done(
self, partition_id: int, nb_partitions: int
) -> Iterator[TResult]:
"""Index not already indexed contents within the partition partition_id
Args:
partition_id: Index of the partition to fetch
nb_partitions: Total number of partitions to split into
Yields:
indexing result as dict to persist in the indexer backend
"""
already_indexed_contents = set(
self.indexed_contents_in_partition(partition_id, nb_partitions)
)
return self._index_contents(
partition_id, nb_partitions, already_indexed_contents
)
def run(
self,
partition_id: int,
nb_partitions: int,
skip_existing: bool = True,
**kwargs,
) -> Dict:
"""Given a partition of content ids, index the contents within.
Either the indexer is incremental (filter out existing computed data) or it
computes everything from scratch.
Args:
partition_id: Index of the partition to fetch
nb_partitions: Total number of partitions to split into
skip_existing: Skip existing indexed data
(default) or not
**kwargs: passed to the `index` method
Returns:
dict with the indexing task status
"""
summary: Dict[str, Any] = {"status": "uneventful"}
count = 0
try:
if skip_existing:
gen = self._index_with_skipping_already_done(
partition_id, nb_partitions
)
else:
gen = self._index_contents(partition_id, nb_partitions, indexed=set([]))
count_object_added_key: Optional[str] = None
for contents in utils.grouper(gen, n=self.config["write_batch_size"]):
res = self.persist_index_computations(list(contents))
if not count_object_added_key:
count_object_added_key = list(res.keys())[0]
count += res[count_object_added_key]
if count > 0:
summary["status"] = "eventful"
except Exception:
if not self.catch_exceptions:
raise
self.log.exception("Problem when computing metadata.")
summary["status"] = "failed"
if count > 0 and count_object_added_key:
summary[count_object_added_key] = count
return summary
class OriginIndexer(BaseIndexer[str, None, TResult], Generic[TResult]):
"""An object type indexer, inherits from the :class:`BaseIndexer` and
implements Origin indexing using the run method
Note: the :class:`OriginIndexer` is not an instantiable object.
To use it in another context one should inherit from this class
and override the methods mentioned in the :class:`BaseIndexer`
class.
"""
def run(self, origin_urls: List[str], **kwargs) -> Dict:
"""Given a list of origin urls:
- retrieve origins from storage
- execute the indexing computations
- store the results
Args:
origin_urls: list of origin urls.
**kwargs: passed to the `index` method
"""
if "policy_update" in kwargs:
warnings.warn(
"'policy_update' argument is deprecated and ignored.",
DeprecationWarning,
)
del kwargs["policy_update"]
summary: Dict[str, Any] = {"status": "uneventful"}
try:
results = self.index_list(origin_urls, **kwargs)
except Exception:
if not self.catch_exceptions:
raise
summary["status"] = "failed"
return summary
summary_persist = self.persist_index_computations(results)
self.results = results
if summary_persist:
for value in summary_persist.values():
if value > 0:
summary["status"] = "eventful"
summary.update(summary_persist)
return summary
def index_list(self, origin_urls: List[str], **kwargs) -> List[TResult]:
results = []
for origin_url in origin_urls:
try:
results.extend(self.index(origin_url, **kwargs))
except Exception:
self.log.exception("Problem when processing origin %s", origin_url)
raise
return results
class RevisionIndexer(BaseIndexer[Sha1Git, Revision, TResult], Generic[TResult]):
"""An object type indexer, inherits from the :class:`BaseIndexer` and
implements Revision indexing using the run method
Note: the :class:`RevisionIndexer` is not an instantiable object.
To use it in another context one should inherit from this class
and override the methods mentioned in the :class:`BaseIndexer`
class.
"""
def run(self, ids: List[Sha1Git], **kwargs) -> Dict:
"""Given a list of sha1_gits:
- retrieve revisions from storage
- execute the indexing computations
- store the results
Args:
ids: sha1_git's identifier list
"""
if "policy_update" in kwargs:
warnings.warn(
"'policy_update' argument is deprecated and ignored.",
DeprecationWarning,
)
del kwargs["policy_update"]
summary: Dict[str, Any] = {"status": "uneventful"}
results = []
revision_ids = [
hashutil.hash_to_bytes(id_) if isinstance(id_, str) else id_ for id_ in ids
]
for (rev_id, rev) in zip(revision_ids, self.storage.revision_get(revision_ids)):
if not rev:
# TODO: call self.index() with rev=None?
self.log.warning(
"Revision %s not found in storage", hashutil.hash_to_hex(rev_id)
)
continue
try:
results.extend(self.index(rev_id, rev))
except Exception:
if not self.catch_exceptions:
raise
self.log.exception("Problem when processing revision")
summary["status"] = "failed"
return summary
summary_persist = self.persist_index_computations(results)
if summary_persist:
for value in summary_persist.values():
if value > 0:
summary["status"] = "eventful"
summary.update(summary_persist)
self.results = results
return summary
diff --git a/swh/indexer/metadata.py b/swh/indexer/metadata.py
index 8b207a6..9a4e6cb 100644
--- a/swh/indexer/metadata.py
+++ b/swh/indexer/metadata.py
@@ -1,379 +1,392 @@
# Copyright (C) 2017-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from copy import deepcopy
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Tuple,
TypeVar,
)
from swh.core.config import merge_configs
from swh.core.utils import grouper
from swh.indexer.codemeta import merge_documents
from swh.indexer.indexer import ContentIndexer, OriginIndexer, RevisionIndexer
from swh.indexer.metadata_detector import detect_metadata
from swh.indexer.metadata_dictionary import MAPPINGS
from swh.indexer.origin_head import OriginHeadIndexer
from swh.indexer.storage import INDEXER_CFG_KEY, Sha1
from swh.indexer.storage.model import (
ContentMetadataRow,
OriginIntrinsicMetadataRow,
RevisionIntrinsicMetadataRow,
)
from swh.model import hashutil
from swh.model.model import Revision, Sha1Git
REVISION_GET_BATCH_SIZE = 10
ORIGIN_GET_BATCH_SIZE = 10
T1 = TypeVar("T1")
T2 = TypeVar("T2")
def call_with_batches(
- f: Callable[[List[T1]], Iterable[T2]], args: List[T1], batch_size: int,
+ f: Callable[[List[T1]], Iterable[T2]],
+ args: List[T1],
+ batch_size: int,
) -> Iterator[T2]:
- """Calls a function with batches of args, and concatenates the results.
- """
+ """Calls a function with batches of args, and concatenates the results."""
groups = grouper(args, batch_size)
for group in groups:
yield from f(list(group))
class ContentMetadataIndexer(ContentIndexer[ContentMetadataRow]):
"""Content-level indexer
This indexer is in charge of:
- filtering out content already indexed in content_metadata
- reading content from objstorage with the content's id sha1
- computing metadata by given context
- using the metadata_dictionary as the 'swh-metadata-translator' tool
- store result in content_metadata table
"""
def filter(self, ids):
- """Filter out known sha1s and return only missing ones.
- """
+ """Filter out known sha1s and return only missing ones."""
yield from self.idx_storage.content_metadata_missing(
- ({"id": sha1, "indexer_configuration_id": self.tool["id"],} for sha1 in ids)
+ (
+ {
+ "id": sha1,
+ "indexer_configuration_id": self.tool["id"],
+ }
+ for sha1 in ids
+ )
)
def index(
self,
id: Sha1,
data: Optional[bytes] = None,
log_suffix="unknown revision",
**kwargs,
) -> List[ContentMetadataRow]:
"""Index sha1s' content and store result.
Args:
id: content's identifier
data: raw content in bytes
Returns:
dict: dictionary representing a content_metadata. If the
translation wasn't successful the metadata keys will
be returned as None
"""
assert isinstance(id, bytes)
assert data is not None
metadata = None
try:
mapping_name = self.tool["tool_configuration"]["context"]
log_suffix += ", content_id=%s" % hashutil.hash_to_hex(id)
metadata = MAPPINGS[mapping_name](log_suffix).translate(data)
except Exception:
self.log.exception(
"Problem during metadata translation "
"for content %s" % hashutil.hash_to_hex(id)
)
if metadata is None:
return []
return [
ContentMetadataRow(
- id=id, indexer_configuration_id=self.tool["id"], metadata=metadata,
+ id=id,
+ indexer_configuration_id=self.tool["id"],
+ metadata=metadata,
)
]
def persist_index_computations(
self, results: List[ContentMetadataRow]
) -> Dict[str, int]:
"""Persist the results in storage.
Args:
results: list of content_metadata, dict with the
following keys:
- id (bytes): content's identifier (sha1)
- metadata (jsonb): detected metadata
"""
return self.idx_storage.content_metadata_add(results)
DEFAULT_CONFIG: Dict[str, Any] = {
"tools": {
"name": "swh-metadata-detector",
"version": "0.0.2",
"configuration": {},
},
}
class RevisionMetadataIndexer(RevisionIndexer[RevisionIntrinsicMetadataRow]):
"""Revision-level indexer
This indexer is in charge of:
- filtering revisions already indexed in revision_intrinsic_metadata table
with defined computation tool
- retrieve all entry_files in root directory
- use metadata_detector for file_names containing metadata
- compute metadata translation if necessary and possible (depends on tool)
- send sha1s to content indexing if possible
- store the results for revision
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = merge_configs(DEFAULT_CONFIG, self.config)
def filter(self, sha1_gits):
- """Filter out known sha1s and return only missing ones.
-
- """
+ """Filter out known sha1s and return only missing ones."""
yield from self.idx_storage.revision_intrinsic_metadata_missing(
(
- {"id": sha1_git, "indexer_configuration_id": self.tool["id"],}
+ {
+ "id": sha1_git,
+ "indexer_configuration_id": self.tool["id"],
+ }
for sha1_git in sha1_gits
)
)
def index(
self, id: Sha1Git, data: Optional[Revision], **kwargs
) -> List[RevisionIntrinsicMetadataRow]:
"""Index rev by processing it and organizing result.
use metadata_detector to iterate on filenames
- if one filename detected -> sends file to content indexer
- if multiple file detected -> translation needed at revision level
Args:
id: sha1_git of the revision
data: revision model object from storage
Returns:
dict: dictionary representing a revision_intrinsic_metadata, with
keys:
- id (str): rev's identifier (sha1_git)
- indexer_configuration_id (bytes): tool used
- metadata: dict of retrieved metadata
"""
rev = data
assert isinstance(rev, Revision)
try:
root_dir = rev.directory
dir_ls = list(self.storage.directory_ls(root_dir, recursive=False))
if [entry["type"] for entry in dir_ls] == ["dir"]:
# If the root is just a single directory, recurse into it
# eg. PyPI packages, GNU tarballs
subdir = dir_ls[0]["target"]
dir_ls = list(self.storage.directory_ls(subdir, recursive=False))
files = [entry for entry in dir_ls if entry["type"] == "file"]
detected_files = detect_metadata(files)
(mappings, metadata) = self.translate_revision_intrinsic_metadata(
- detected_files, log_suffix="revision=%s" % hashutil.hash_to_hex(rev.id),
+ detected_files,
+ log_suffix="revision=%s" % hashutil.hash_to_hex(rev.id),
)
except Exception as e:
self.log.exception("Problem when indexing rev: %r", e)
return [
RevisionIntrinsicMetadataRow(
id=rev.id,
indexer_configuration_id=self.tool["id"],
mappings=mappings,
metadata=metadata,
)
]
def persist_index_computations(
self, results: List[RevisionIntrinsicMetadataRow]
) -> Dict[str, int]:
"""Persist the results in storage.
Args:
results: list of content_mimetype, dict with the
following keys:
- id (bytes): content's identifier (sha1)
- mimetype (bytes): mimetype in bytes
- encoding (bytes): encoding in bytes
"""
# TODO: add functions in storage to keep data in
# revision_intrinsic_metadata
return self.idx_storage.revision_intrinsic_metadata_add(results)
def translate_revision_intrinsic_metadata(
self, detected_files: Dict[str, List[Any]], log_suffix: str
) -> Tuple[List[Any], Any]:
"""
Determine plan of action to translate metadata when containing
one or multiple detected files:
Args:
detected_files: dictionary mapping context names (e.g.,
"npm", "authors") to list of sha1
Returns:
(List[str], dict): list of mappings used and dict with
translated metadata according to the CodeMeta vocabulary
"""
used_mappings = [MAPPINGS[context].name for context in detected_files]
metadata = []
tool = {
"name": "swh-metadata-translator",
"version": "0.0.2",
"configuration": {},
}
# TODO: iterate on each context, on each file
# -> get raw_contents
# -> translate each content
config = {k: self.config[k] for k in [INDEXER_CFG_KEY, "objstorage", "storage"]}
config["tools"] = [tool]
for context in detected_files.keys():
cfg = deepcopy(config)
cfg["tools"][0]["configuration"]["context"] = context
c_metadata_indexer = ContentMetadataIndexer(config=cfg)
# sha1s that are in content_metadata table
sha1s_in_storage = []
metadata_generator = self.idx_storage.content_metadata_get(
detected_files[context]
)
for c in metadata_generator:
# extracting metadata
sha1 = c.id
sha1s_in_storage.append(sha1)
local_metadata = c.metadata
# local metadata is aggregated
if local_metadata:
metadata.append(local_metadata)
sha1s_filtered = [
item for item in detected_files[context] if item not in sha1s_in_storage
]
if sha1s_filtered:
# content indexing
try:
c_metadata_indexer.run(
- sha1s_filtered, log_suffix=log_suffix,
+ sha1s_filtered,
+ log_suffix=log_suffix,
)
# on the fly possibility:
for result in c_metadata_indexer.results:
local_metadata = result.metadata
metadata.append(local_metadata)
except Exception:
self.log.exception("Exception while indexing metadata on contents")
metadata = merge_documents(metadata)
return (used_mappings, metadata)
class OriginMetadataIndexer(
OriginIndexer[Tuple[OriginIntrinsicMetadataRow, RevisionIntrinsicMetadataRow]]
):
USE_TOOLS = False
def __init__(self, config=None, **kwargs) -> None:
super().__init__(config=config, **kwargs)
self.origin_head_indexer = OriginHeadIndexer(config=config)
self.revision_metadata_indexer = RevisionMetadataIndexer(config=config)
def index_list(
self, origin_urls: List[str], **kwargs
) -> List[Tuple[OriginIntrinsicMetadataRow, RevisionIntrinsicMetadataRow]]:
head_rev_ids = []
origins_with_head = []
origins = list(
call_with_batches(
- self.storage.origin_get, origin_urls, ORIGIN_GET_BATCH_SIZE,
+ self.storage.origin_get,
+ origin_urls,
+ ORIGIN_GET_BATCH_SIZE,
)
)
for origin in origins:
if origin is None:
continue
head_results = self.origin_head_indexer.index(origin.url)
if head_results:
(head_result,) = head_results
origins_with_head.append(origin)
head_rev_ids.append(head_result["revision_id"])
head_revs = list(
call_with_batches(
self.storage.revision_get, head_rev_ids, REVISION_GET_BATCH_SIZE
)
)
assert len(head_revs) == len(head_rev_ids)
results = []
for (origin, rev) in zip(origins_with_head, head_revs):
if not rev:
self.log.warning("Missing head revision of origin %r", origin.url)
continue
for rev_metadata in self.revision_metadata_indexer.index(rev.id, rev):
# There is at most one rev_metadata
orig_metadata = OriginIntrinsicMetadataRow(
from_revision=rev_metadata.id,
id=origin.url,
metadata=rev_metadata.metadata,
mappings=rev_metadata.mappings,
indexer_configuration_id=rev_metadata.indexer_configuration_id,
)
results.append((orig_metadata, rev_metadata))
return results
def persist_index_computations(
self,
results: List[Tuple[OriginIntrinsicMetadataRow, RevisionIntrinsicMetadataRow]],
) -> Dict[str, int]:
# Deduplicate revisions
rev_metadata: List[RevisionIntrinsicMetadataRow] = []
orig_metadata: List[OriginIntrinsicMetadataRow] = []
summary: Dict = {}
for (orig_item, rev_item) in results:
assert rev_item.metadata == orig_item.metadata
if rev_item.metadata and not (rev_item.metadata.keys() <= {"@context"}):
# Only store non-empty metadata sets
if rev_item not in rev_metadata:
rev_metadata.append(rev_item)
if orig_item not in orig_metadata:
orig_metadata.append(orig_item)
if rev_metadata:
summary_rev = self.idx_storage.revision_intrinsic_metadata_add(rev_metadata)
summary.update(summary_rev)
if orig_metadata:
summary_ori = self.idx_storage.origin_intrinsic_metadata_add(orig_metadata)
summary.update(summary_ori)
return summary
diff --git a/swh/indexer/mimetype.py b/swh/indexer/mimetype.py
index aa5fb4e..57c0972 100644
--- a/swh/indexer/mimetype.py
+++ b/swh/indexer/mimetype.py
@@ -1,163 +1,169 @@
# Copyright (C) 2016-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import Any, Dict, Iterable, List, Optional
import magic
from swh.core.api.classes import stream_results
from swh.core.config import merge_configs
from swh.indexer.storage.interface import IndexerStorageInterface, Sha1
from swh.indexer.storage.model import ContentMimetypeRow
from .indexer import ContentIndexer, ContentPartitionIndexer
if not hasattr(magic.Magic, "from_buffer"):
raise ImportError(
'Expected "import magic" to import python-magic, but file_magic '
"was imported instead."
)
def compute_mimetype_encoding(raw_content: bytes) -> Dict[str, str]:
"""Determine mimetype and encoding from the raw content.
Args:
raw_content: content's raw data
Returns:
dict: mimetype and encoding key and corresponding values.
"""
m = magic.Magic(mime=True, mime_encoding=True)
res = m.from_buffer(raw_content)
try:
mimetype, encoding = res.split("; charset=")
except ValueError:
mimetype, encoding = res, ""
return {
"mimetype": mimetype,
"encoding": encoding,
}
DEFAULT_CONFIG: Dict[str, Any] = {
"tools": {
"name": "file",
"version": "1:5.30-1+deb9u1",
"configuration": {"type": "library", "debian-package": "python3-magic"},
},
"write_batch_size": 1000,
}
class MixinMimetypeIndexer:
"""Mixin mimetype indexer.
See :class:`MimetypeIndexer` and :class:`MimetypePartitionIndexer`
"""
tool: Any
idx_storage: IndexerStorageInterface
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = merge_configs(DEFAULT_CONFIG, self.config)
def index(
self, id: Sha1, data: Optional[bytes] = None, **kwargs
) -> List[ContentMimetypeRow]:
"""Index sha1s' content and store result.
Args:
id: content's identifier
data: raw content in bytes
Returns:
dict: content's mimetype; dict keys being
- id: content's identifier (sha1)
- mimetype: mimetype in bytes
- encoding: encoding in bytes
"""
assert data is not None
properties = compute_mimetype_encoding(data)
return [
ContentMimetypeRow(
id=id,
indexer_configuration_id=self.tool["id"],
mimetype=properties["mimetype"],
encoding=properties["encoding"],
)
]
def persist_index_computations(
self, results: List[ContentMimetypeRow]
) -> Dict[str, int]:
"""Persist the results in storage.
Args:
results: list of content's mimetype dicts
(see :meth:`.index`)
"""
return self.idx_storage.content_mimetype_add(results)
class MimetypeIndexer(MixinMimetypeIndexer, ContentIndexer[ContentMimetypeRow]):
"""Mimetype Indexer working on list of content identifiers.
It:
- (optionally) filters out content already indexed (cf.
:meth:`.filter`)
- reads content from objstorage per the content's id (sha1)
- computes {mimetype, encoding} from that content
- stores result in storage
"""
def filter(self, ids):
- """Filter out known sha1s and return only missing ones.
-
- """
+ """Filter out known sha1s and return only missing ones."""
yield from self.idx_storage.content_mimetype_missing(
- ({"id": sha1, "indexer_configuration_id": self.tool["id"],} for sha1 in ids)
+ (
+ {
+ "id": sha1,
+ "indexer_configuration_id": self.tool["id"],
+ }
+ for sha1 in ids
+ )
)
class MimetypePartitionIndexer(
MixinMimetypeIndexer, ContentPartitionIndexer[ContentMimetypeRow]
):
"""Mimetype Range Indexer working on range of content identifiers.
It:
- (optionally) filters out content already indexed (cf
:meth:`.indexed_contents_in_partition`)
- reads content from objstorage per the content's id (sha1)
- computes {mimetype, encoding} from that content
- stores result in storage
"""
def indexed_contents_in_partition(
- self, partition_id: int, nb_partitions: int,
+ self,
+ partition_id: int,
+ nb_partitions: int,
) -> Iterable[Sha1]:
"""Retrieve indexed content ids within partition_id.
Args:
partition_id: Index of the partition to fetch
nb_partitions: Total number of partitions to split into
page_token: opaque token used for pagination
"""
return stream_results(
self.idx_storage.content_mimetype_get_partition,
self.tool["id"],
partition_id,
nb_partitions,
)
diff --git a/swh/indexer/origin_head.py b/swh/indexer/origin_head.py
index fec42c8..1b955dd 100644
--- a/swh/indexer/origin_head.py
+++ b/swh/indexer/origin_head.py
@@ -1,154 +1,159 @@
# Copyright (C) 2018-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import logging
import re
from typing import Any, Dict, List, Tuple, Union
import click
from swh.indexer.indexer import OriginIndexer
from swh.model.model import SnapshotBranch, TargetType
from swh.storage.algos.origin import origin_get_latest_visit_status
from swh.storage.algos.snapshot import snapshot_get_all_branches
class OriginHeadIndexer(OriginIndexer[Dict]):
"""Origin-level indexer.
This indexer is in charge of looking up the revision that acts as the
"head" of an origin.
In git, this is usually the commit pointed to by the 'master' branch."""
USE_TOOLS = False
def persist_index_computations(self, results: Any) -> Dict[str, int]:
"""Do nothing. The indexer's results are not persistent, they
should only be piped to another indexer."""
return {}
# Dispatch
def index(self, id: str, data: None = None, **kwargs) -> List[Dict]:
origin_url = id
visit_status = origin_get_latest_visit_status(
self.storage, origin_url, allowed_statuses=["full"], require_snapshot=True
)
if not visit_status:
return []
assert visit_status.snapshot is not None
snapshot = snapshot_get_all_branches(self.storage, visit_status.snapshot)
if snapshot is None:
return []
method = getattr(
self, "_try_get_%s_head" % visit_status.type, self._try_get_head_generic
)
rev_id = method(snapshot.branches) # type: ignore
if rev_id is not None:
- return [{"origin_url": origin_url, "revision_id": rev_id,}]
+ return [
+ {
+ "origin_url": origin_url,
+ "revision_id": rev_id,
+ }
+ ]
# could not find a head revision
return []
# Tarballs
_archive_filename_re = re.compile(
rb"^"
rb"(?P<pkgname>.*)[-_]"
rb"(?P<version>[0-9]+(\.[0-9])*)"
rb"(?P<preversion>[-+][a-zA-Z0-9.~]+?)?"
rb"(?P<extension>(\.[a-zA-Z0-9]+)+)"
rb"$"
)
@classmethod
def _parse_version(cls: Any, filename: bytes) -> Tuple[Union[float, int], ...]:
"""Extracts the release version from an archive filename,
to get an ordering whose maximum is likely to be the last
version of the software
>>> OriginHeadIndexer._parse_version(b'foo')
(-inf,)
>>> OriginHeadIndexer._parse_version(b'foo.tar.gz')
(-inf,)
>>> OriginHeadIndexer._parse_version(b'gnu-hello-0.0.1.tar.gz')
(0, 0, 1, 0)
>>> OriginHeadIndexer._parse_version(b'gnu-hello-0.0.1-beta2.tar.gz')
(0, 0, 1, -1, 'beta2')
>>> OriginHeadIndexer._parse_version(b'gnu-hello-0.0.1+foobar.tar.gz')
(0, 0, 1, 1, 'foobar')
"""
res = cls._archive_filename_re.match(filename)
if res is None:
return (float("-infinity"),)
version = [int(n) for n in res.group("version").decode().split(".")]
if res.group("preversion") is None:
version.append(0)
else:
preversion = res.group("preversion").decode()
if preversion.startswith("-"):
version.append(-1)
version.append(preversion[1:])
elif preversion.startswith("+"):
version.append(1)
version.append(preversion[1:])
else:
assert False, res.group("preversion")
return tuple(version)
def _try_get_ftp_head(self, branches: Dict[bytes, SnapshotBranch]) -> Any:
archive_names = list(branches)
max_archive_name = max(archive_names, key=self._parse_version)
r = self._try_resolve_target(branches, max_archive_name)
return r
# Generic
def _try_get_head_generic(self, branches: Dict[bytes, SnapshotBranch]) -> Any:
# Works on 'deposit', 'pypi', and VCSs.
return self._try_resolve_target(branches, b"HEAD") or self._try_resolve_target(
branches, b"master"
)
def _try_resolve_target(
self, branches: Dict[bytes, SnapshotBranch], branch_name: bytes
) -> Any:
try:
branch = branches[branch_name]
if branch is None:
return None
while branch.target_type == TargetType.ALIAS:
branch = branches[branch.target]
if branch is None:
return None
if branch.target_type == TargetType.REVISION:
return branch.target
elif branch.target_type == TargetType.CONTENT:
return None # TODO
elif branch.target_type == TargetType.DIRECTORY:
return None # TODO
elif branch.target_type == TargetType.RELEASE:
return None # TODO
else:
assert False, branch
except KeyError:
return None
@click.command()
@click.option(
"--origins", "-i", help='Origins to lookup, in the "type+url" format', multiple=True
)
def main(origins: List[str]) -> None:
rev_metadata_indexer = OriginHeadIndexer()
rev_metadata_indexer.run(origins)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
diff --git a/swh/indexer/rehash.py b/swh/indexer/rehash.py
index 0db0f84..bcebffe 100644
--- a/swh/indexer/rehash.py
+++ b/swh/indexer/rehash.py
@@ -1,175 +1,173 @@
# Copyright (C) 2017-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from collections import defaultdict
import itertools
import logging
from typing import Any, Dict, Generator, List, Optional, Tuple
from swh.core import utils
from swh.core.config import load_from_envvar
from swh.model import hashutil
from swh.model.model import Content
from swh.objstorage.exc import ObjNotFoundError
from swh.objstorage.factory import get_objstorage
from swh.storage import get_storage
DEFAULT_CONFIG: Dict[str, Any] = {
"storage": {"cls": "memory"},
"objstorage": {"cls": "memory"},
# the set of checksums that should be computed.
# Examples: 'sha1_git', 'blake2b512', 'blake2s256'
"compute_checksums": [],
# whether checksums that already exist in the DB should be
# recomputed/updated or left untouched
"recompute_checksums": False,
# Number of contents to retrieve blobs at the same time
"batch_size_retrieve_content": 10,
# Number of contents to update at the same time
"batch_size_update": 100,
}
class RecomputeChecksums:
"""Class in charge of (re)computing content's hashes.
Hashes to compute are defined across 2 configuration options:
compute_checksums ([str])
list of hash algorithms that
py:func:`swh.model.hashutil.MultiHash.from_data` function should
be able to deal with. For variable-length checksums, a desired
checksum length should also be provided. Their format is
<algorithm's name>:<variable-length> e.g: blake2:512
recompute_checksums (bool)
a boolean to notify that we also want to recompute potential existing
hashes specified in compute_checksums. Default to False.
"""
def __init__(self) -> None:
self.config = load_from_envvar(DEFAULT_CONFIG)
self.storage = get_storage(**self.config["storage"])
self.objstorage = get_objstorage(**self.config["objstorage"])
self.compute_checksums = self.config["compute_checksums"]
self.recompute_checksums = self.config["recompute_checksums"]
self.batch_size_retrieve_content = self.config["batch_size_retrieve_content"]
self.batch_size_update = self.config["batch_size_update"]
self.log = logging.getLogger("swh.indexer.rehash")
if not self.compute_checksums:
raise ValueError("Checksums list should not be empty.")
def _read_content_ids(
self, contents: List[Dict[str, Any]]
) -> Generator[bytes, Any, None]:
- """Read the content identifiers from the contents.
-
- """
+ """Read the content identifiers from the contents."""
for c in contents:
h = c["sha1"]
if isinstance(h, str):
h = hashutil.hash_to_bytes(h)
yield h
def get_new_contents_metadata(
self, all_contents: List[Dict[str, Any]]
) -> Generator[Tuple[Dict[str, Any], List[Any]], Any, None]:
"""Retrieve raw contents and compute new checksums on the
contents. Unknown or corrupted contents are skipped.
Args:
all_contents: List of contents as dictionary with
the necessary primary keys
Yields:
tuple: tuple of (content to update, list of checksums computed)
"""
content_ids = self._read_content_ids(all_contents)
for contents in utils.grouper(content_ids, self.batch_size_retrieve_content):
contents_iter = itertools.tee(contents, 2)
try:
sha1s = [s for s in contents_iter[0]]
content_metadata: List[Optional[Content]] = self.storage.content_get(
sha1s
)
except Exception:
self.log.exception("Problem when reading contents metadata.")
continue
for sha1, content_model in zip(sha1s, content_metadata):
if not content_model:
continue
content: Dict = content_model.to_dict()
# Recompute checksums provided in compute_checksums options
if self.recompute_checksums:
checksums_to_compute = list(self.compute_checksums)
else:
# Compute checksums provided in compute_checksums
# options not already defined for that content
checksums_to_compute = [
h for h in self.compute_checksums if not content.get(h)
]
if not checksums_to_compute: # Nothing to recompute
continue
try:
raw_content = self.objstorage.get(sha1)
except ObjNotFoundError:
self.log.warning("Content %s not found in objstorage!", sha1)
continue
content_hashes = hashutil.MultiHash.from_data(
raw_content, hash_names=checksums_to_compute
).digest()
content.update(content_hashes)
yield content, checksums_to_compute
def run(self, contents: List[Dict[str, Any]]) -> Dict:
"""Given a list of content:
- (re)compute a given set of checksums on contents available in our
object storage
- update those contents with the new metadata
Args:
contents: contents as dictionary with necessary keys.
key present in such dictionary should be the ones defined in
the 'primary_key' option.
Returns:
A summary dict with key 'status', task' status and 'count' the
number of updated contents.
"""
status = "uneventful"
count = 0
for data in utils.grouper(
self.get_new_contents_metadata(contents), self.batch_size_update
):
groups: Dict[str, List[Any]] = defaultdict(list)
for content, keys_to_update in data:
keys_str = ",".join(keys_to_update)
groups[keys_str].append(content)
for keys_to_update, contents in groups.items():
keys: List[str] = keys_to_update.split(",")
try:
self.storage.content_update(contents, keys=keys)
count += len(contents)
status = "eventful"
except Exception:
self.log.exception("Problem during update.")
continue
return {
"status": status,
"count": count,
}
diff --git a/swh/indexer/storage/__init__.py b/swh/indexer/storage/__init__.py
index edb8704..da5fc27 100644
--- a/swh/indexer/storage/__init__.py
+++ b/swh/indexer/storage/__init__.py
@@ -1,729 +1,751 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from collections import Counter
from importlib import import_module
import json
from typing import Dict, Iterable, List, Optional, Tuple, Union
import warnings
import psycopg2
import psycopg2.pool
from swh.core.db.common import db_transaction
from swh.indexer.storage.interface import IndexerStorageInterface
from swh.model.hashutil import hash_to_bytes, hash_to_hex
from swh.model.model import SHA1_SIZE
from swh.storage.exc import StorageDBError
from swh.storage.utils import get_partition_bounds_bytes
from . import converters
from .db import Db
from .exc import DuplicateId, IndexerStorageArgumentException
from .interface import PagedResult, Sha1
from .metrics import process_metrics, send_metric, timed
from .model import (
ContentCtagsRow,
ContentLanguageRow,
ContentLicenseRow,
ContentMetadataRow,
ContentMimetypeRow,
OriginIntrinsicMetadataRow,
RevisionIntrinsicMetadataRow,
)
from .writer import JournalWriter
INDEXER_CFG_KEY = "indexer_storage"
MAPPING_NAMES = ["cff", "codemeta", "gemspec", "maven", "npm", "pkg-info"]
SERVER_IMPLEMENTATIONS: Dict[str, str] = {
"postgresql": ".IndexerStorage",
"remote": ".api.client.RemoteStorage",
"memory": ".in_memory.IndexerStorage",
# deprecated
"local": ".IndexerStorage",
}
def get_indexer_storage(cls: str, **kwargs) -> IndexerStorageInterface:
"""Instantiate an indexer storage implementation of class `cls` with arguments
`kwargs`.
Args:
cls: indexer storage class (local, remote or memory)
kwargs: dictionary of arguments passed to the
indexer storage class constructor
Returns:
an instance of swh.indexer.storage
Raises:
ValueError if passed an unknown storage class.
"""
if "args" in kwargs:
warnings.warn(
'Explicit "args" key is deprecated, use keys directly instead.',
DeprecationWarning,
)
kwargs = kwargs["args"]
class_path = SERVER_IMPLEMENTATIONS.get(cls)
if class_path is None:
raise ValueError(
f"Unknown indexer storage class `{cls}`. "
f"Supported: {', '.join(SERVER_IMPLEMENTATIONS)}"
)
(module_path, class_name) = class_path.rsplit(".", 1)
module = import_module(module_path if module_path else ".", package=__package__)
BackendClass = getattr(module, class_name)
check_config = kwargs.pop("check_config", {})
idx_storage = BackendClass(**kwargs)
if check_config:
if not idx_storage.check_config(**check_config):
raise EnvironmentError("Indexer storage check config failed")
return idx_storage
def check_id_duplicates(data):
"""
If any two row models in `data` have the same unique key, raises
a `ValueError`.
Values associated to the key must be hashable.
Args:
data (List[dict]): List of dictionaries to be inserted
>>> check_id_duplicates([
... ContentLanguageRow(id=b'foo', indexer_configuration_id=42, lang="python"),
... ContentLanguageRow(id=b'foo', indexer_configuration_id=32, lang="python"),
... ])
>>> check_id_duplicates([
... ContentLanguageRow(id=b'foo', indexer_configuration_id=42, lang="python"),
... ContentLanguageRow(id=b'foo', indexer_configuration_id=42, lang="python"),
... ])
Traceback (most recent call last):
...
swh.indexer.storage.exc.DuplicateId: [{'id': b'foo', 'indexer_configuration_id': 42}]
""" # noqa
counter = Counter(tuple(sorted(item.unique_key().items())) for item in data)
duplicates = [id_ for (id_, count) in counter.items() if count >= 2]
if duplicates:
raise DuplicateId(list(map(dict, duplicates)))
class IndexerStorage:
- """SWH Indexer Storage
-
- """
+ """SWH Indexer Storage"""
def __init__(self, db, min_pool_conns=1, max_pool_conns=10, journal_writer=None):
"""
Args:
db: either a libpq connection string, or a psycopg2 connection
journal_writer: configuration passed to
`swh.journal.writer.get_journal_writer`
"""
self.journal_writer = JournalWriter(self._tool_get_from_id, journal_writer)
try:
if isinstance(db, psycopg2.extensions.connection):
self._pool = None
self._db = Db(db)
else:
self._pool = psycopg2.pool.ThreadedConnectionPool(
min_pool_conns, max_pool_conns, db
)
self._db = None
except psycopg2.OperationalError as e:
raise StorageDBError(e)
def get_db(self):
if self._db:
return self._db
return Db.from_pool(self._pool)
def put_db(self, db):
if db is not self._db:
db.put_conn()
@db_transaction()
def get_current_version(self, *, db=None, cur=None):
return db.current_version
@timed
@db_transaction()
def check_config(self, *, check_write, db=None, cur=None):
# Check permissions on one of the tables
if check_write:
check = "INSERT"
else:
check = "SELECT"
cur.execute(
"select has_table_privilege(current_user, 'content_mimetype', %s)", # noqa
(check,),
)
return cur.fetchone()[0]
@timed
@db_transaction()
def content_mimetype_missing(
self, mimetypes: Iterable[Dict], db=None, cur=None
) -> List[Tuple[Sha1, int]]:
return [obj[0] for obj in db.content_mimetype_missing_from_list(mimetypes, cur)]
@timed
@db_transaction()
def get_partition(
self,
indexer_type: str,
indexer_configuration_id: int,
partition_id: int,
nb_partitions: int,
page_token: Optional[str] = None,
limit: int = 1000,
with_textual_data=False,
db=None,
cur=None,
) -> PagedResult[Sha1]:
"""Retrieve ids of content with `indexer_type` within within partition partition_id
bound by limit.
Args:
**indexer_type**: Type of data content to index (mimetype, language, etc...)
**indexer_configuration_id**: The tool used to index data
**partition_id**: index of the partition to fetch
**nb_partitions**: total number of partitions to split into
**page_token**: opaque token used for pagination
**limit**: Limit result (default to 1000)
**with_textual_data** (bool): Deal with only textual content (True) or all
content (all contents by defaults, False)
Raises:
IndexerStorageArgumentException for;
- limit to None
- wrong indexer_type provided
Returns:
PagedResult of Sha1. If next_page_token is None, there is no more data to
fetch
"""
if limit is None:
raise IndexerStorageArgumentException("limit should not be None")
if indexer_type not in db.content_indexer_names:
err = f"Wrong type. Should be one of [{','.join(db.content_indexer_names)}]"
raise IndexerStorageArgumentException(err)
start, end = get_partition_bounds_bytes(partition_id, nb_partitions, SHA1_SIZE)
if page_token is not None:
start = hash_to_bytes(page_token)
if end is None:
end = b"\xff" * SHA1_SIZE
next_page_token: Optional[str] = None
ids = [
row[0]
for row in db.content_get_range(
indexer_type,
start,
end,
indexer_configuration_id,
limit=limit + 1,
with_textual_data=with_textual_data,
cur=cur,
)
]
if len(ids) >= limit:
next_page_token = hash_to_hex(ids[-1])
ids = ids[:limit]
assert len(ids) <= limit
return PagedResult(results=ids, next_page_token=next_page_token)
@timed
@db_transaction()
def content_mimetype_get_partition(
self,
indexer_configuration_id: int,
partition_id: int,
nb_partitions: int,
page_token: Optional[str] = None,
limit: int = 1000,
db=None,
cur=None,
) -> PagedResult[Sha1]:
return self.get_partition(
"mimetype",
indexer_configuration_id,
partition_id,
nb_partitions,
page_token=page_token,
limit=limit,
db=db,
cur=cur,
)
@timed
@process_metrics
@db_transaction()
def content_mimetype_add(
- self, mimetypes: List[ContentMimetypeRow], db=None, cur=None,
+ self,
+ mimetypes: List[ContentMimetypeRow],
+ db=None,
+ cur=None,
) -> Dict[str, int]:
check_id_duplicates(mimetypes)
mimetypes.sort(key=lambda m: m.id)
self.journal_writer.write_additions("content_mimetype", mimetypes)
db.mktemp_content_mimetype(cur)
db.copy_to(
[m.to_dict() for m in mimetypes],
"tmp_content_mimetype",
["id", "mimetype", "encoding", "indexer_configuration_id"],
cur,
)
count = db.content_mimetype_add_from_temp(cur)
return {"content_mimetype:add": count}
@timed
@db_transaction()
def content_mimetype_get(
self, ids: Iterable[Sha1], db=None, cur=None
) -> List[ContentMimetypeRow]:
return [
ContentMimetypeRow.from_dict(
converters.db_to_mimetype(dict(zip(db.content_mimetype_cols, c)))
)
for c in db.content_mimetype_get_from_list(ids, cur)
]
@timed
@db_transaction()
def content_language_missing(
self, languages: Iterable[Dict], db=None, cur=None
) -> List[Tuple[Sha1, int]]:
return [obj[0] for obj in db.content_language_missing_from_list(languages, cur)]
@timed
@db_transaction()
def content_language_get(
self, ids: Iterable[Sha1], db=None, cur=None
) -> List[ContentLanguageRow]:
return [
ContentLanguageRow.from_dict(
converters.db_to_language(dict(zip(db.content_language_cols, c)))
)
for c in db.content_language_get_from_list(ids, cur)
]
@timed
@process_metrics
@db_transaction()
def content_language_add(
- self, languages: List[ContentLanguageRow], db=None, cur=None,
+ self,
+ languages: List[ContentLanguageRow],
+ db=None,
+ cur=None,
) -> Dict[str, int]:
check_id_duplicates(languages)
languages.sort(key=lambda m: m.id)
self.journal_writer.write_additions("content_language", languages)
db.mktemp_content_language(cur)
# empty language is mapped to 'unknown'
db.copy_to(
(
{
"id": lang.id,
"lang": lang.lang or "unknown",
"indexer_configuration_id": lang.indexer_configuration_id,
}
for lang in languages
),
"tmp_content_language",
["id", "lang", "indexer_configuration_id"],
cur,
)
count = db.content_language_add_from_temp(cur)
return {"content_language:add": count}
@timed
@db_transaction()
def content_ctags_missing(
self, ctags: Iterable[Dict], db=None, cur=None
) -> List[Tuple[Sha1, int]]:
return [obj[0] for obj in db.content_ctags_missing_from_list(ctags, cur)]
@timed
@db_transaction()
def content_ctags_get(
self, ids: Iterable[Sha1], db=None, cur=None
) -> List[ContentCtagsRow]:
return [
ContentCtagsRow.from_dict(
converters.db_to_ctags(dict(zip(db.content_ctags_cols, c)))
)
for c in db.content_ctags_get_from_list(ids, cur)
]
@timed
@process_metrics
@db_transaction()
def content_ctags_add(
- self, ctags: List[ContentCtagsRow], db=None, cur=None,
+ self,
+ ctags: List[ContentCtagsRow],
+ db=None,
+ cur=None,
) -> Dict[str, int]:
check_id_duplicates(ctags)
ctags.sort(key=lambda m: m.id)
self.journal_writer.write_additions("content_ctags", ctags)
db.mktemp_content_ctags(cur)
db.copy_to(
[ctag.to_dict() for ctag in ctags],
tblname="tmp_content_ctags",
columns=["id", "name", "kind", "line", "lang", "indexer_configuration_id"],
cur=cur,
)
count = db.content_ctags_add_from_temp(cur)
return {"content_ctags:add": count}
@timed
@db_transaction()
def content_ctags_search(
self,
expression: str,
limit: int = 10,
last_sha1: Optional[Sha1] = None,
db=None,
cur=None,
) -> List[ContentCtagsRow]:
return [
ContentCtagsRow.from_dict(
converters.db_to_ctags(dict(zip(db.content_ctags_cols, obj)))
)
for obj in db.content_ctags_search(expression, last_sha1, limit, cur=cur)
]
@timed
@db_transaction()
def content_fossology_license_get(
self, ids: Iterable[Sha1], db=None, cur=None
) -> List[ContentLicenseRow]:
return [
ContentLicenseRow.from_dict(
converters.db_to_fossology_license(
dict(zip(db.content_fossology_license_cols, c))
)
)
for c in db.content_fossology_license_get_from_list(ids, cur)
]
@timed
@process_metrics
@db_transaction()
def content_fossology_license_add(
- self, licenses: List[ContentLicenseRow], db=None, cur=None,
+ self,
+ licenses: List[ContentLicenseRow],
+ db=None,
+ cur=None,
) -> Dict[str, int]:
check_id_duplicates(licenses)
licenses.sort(key=lambda m: m.id)
self.journal_writer.write_additions("content_fossology_license", licenses)
db.mktemp_content_fossology_license(cur)
db.copy_to(
[license.to_dict() for license in licenses],
tblname="tmp_content_fossology_license",
columns=["id", "license", "indexer_configuration_id"],
cur=cur,
)
count = db.content_fossology_license_add_from_temp(cur)
return {"content_fossology_license:add": count}
@timed
@db_transaction()
def content_fossology_license_get_partition(
self,
indexer_configuration_id: int,
partition_id: int,
nb_partitions: int,
page_token: Optional[str] = None,
limit: int = 1000,
db=None,
cur=None,
) -> PagedResult[Sha1]:
return self.get_partition(
"fossology_license",
indexer_configuration_id,
partition_id,
nb_partitions,
page_token=page_token,
limit=limit,
with_textual_data=True,
db=db,
cur=cur,
)
@timed
@db_transaction()
def content_metadata_missing(
self, metadata: Iterable[Dict], db=None, cur=None
) -> List[Tuple[Sha1, int]]:
return [obj[0] for obj in db.content_metadata_missing_from_list(metadata, cur)]
@timed
@db_transaction()
def content_metadata_get(
self, ids: Iterable[Sha1], db=None, cur=None
) -> List[ContentMetadataRow]:
return [
ContentMetadataRow.from_dict(
converters.db_to_metadata(dict(zip(db.content_metadata_cols, c)))
)
for c in db.content_metadata_get_from_list(ids, cur)
]
@timed
@process_metrics
@db_transaction()
def content_metadata_add(
- self, metadata: List[ContentMetadataRow], db=None, cur=None,
+ self,
+ metadata: List[ContentMetadataRow],
+ db=None,
+ cur=None,
) -> Dict[str, int]:
check_id_duplicates(metadata)
metadata.sort(key=lambda m: m.id)
self.journal_writer.write_additions("content_metadata", metadata)
db.mktemp_content_metadata(cur)
db.copy_to(
[m.to_dict() for m in metadata],
"tmp_content_metadata",
["id", "metadata", "indexer_configuration_id"],
cur,
)
count = db.content_metadata_add_from_temp(cur)
return {
"content_metadata:add": count,
}
@timed
@db_transaction()
def revision_intrinsic_metadata_missing(
self, metadata: Iterable[Dict], db=None, cur=None
) -> List[Tuple[Sha1, int]]:
return [
obj[0]
for obj in db.revision_intrinsic_metadata_missing_from_list(metadata, cur)
]
@timed
@db_transaction()
def revision_intrinsic_metadata_get(
self, ids: Iterable[Sha1], db=None, cur=None
) -> List[RevisionIntrinsicMetadataRow]:
return [
RevisionIntrinsicMetadataRow.from_dict(
converters.db_to_metadata(
dict(zip(db.revision_intrinsic_metadata_cols, c))
)
)
for c in db.revision_intrinsic_metadata_get_from_list(ids, cur)
]
@timed
@process_metrics
@db_transaction()
def revision_intrinsic_metadata_add(
- self, metadata: List[RevisionIntrinsicMetadataRow], db=None, cur=None,
+ self,
+ metadata: List[RevisionIntrinsicMetadataRow],
+ db=None,
+ cur=None,
) -> Dict[str, int]:
check_id_duplicates(metadata)
metadata.sort(key=lambda m: m.id)
self.journal_writer.write_additions("revision_intrinsic_metadata", metadata)
db.mktemp_revision_intrinsic_metadata(cur)
db.copy_to(
[m.to_dict() for m in metadata],
"tmp_revision_intrinsic_metadata",
["id", "metadata", "mappings", "indexer_configuration_id"],
cur,
)
count = db.revision_intrinsic_metadata_add_from_temp(cur)
return {
"revision_intrinsic_metadata:add": count,
}
@timed
@db_transaction()
def origin_intrinsic_metadata_get(
self, urls: Iterable[str], db=None, cur=None
) -> List[OriginIntrinsicMetadataRow]:
return [
OriginIntrinsicMetadataRow.from_dict(
converters.db_to_metadata(
dict(zip(db.origin_intrinsic_metadata_cols, c))
)
)
for c in db.origin_intrinsic_metadata_get_from_list(urls, cur)
]
@timed
@process_metrics
@db_transaction()
def origin_intrinsic_metadata_add(
- self, metadata: List[OriginIntrinsicMetadataRow], db=None, cur=None,
+ self,
+ metadata: List[OriginIntrinsicMetadataRow],
+ db=None,
+ cur=None,
) -> Dict[str, int]:
check_id_duplicates(metadata)
metadata.sort(key=lambda m: m.id)
self.journal_writer.write_additions("origin_intrinsic_metadata", metadata)
db.mktemp_origin_intrinsic_metadata(cur)
db.copy_to(
[m.to_dict() for m in metadata],
"tmp_origin_intrinsic_metadata",
["id", "metadata", "indexer_configuration_id", "from_revision", "mappings"],
cur,
)
count = db.origin_intrinsic_metadata_add_from_temp(cur)
return {
"origin_intrinsic_metadata:add": count,
}
@timed
@db_transaction()
def origin_intrinsic_metadata_search_fulltext(
self, conjunction: List[str], limit: int = 100, db=None, cur=None
) -> List[OriginIntrinsicMetadataRow]:
return [
OriginIntrinsicMetadataRow.from_dict(
converters.db_to_metadata(
dict(zip(db.origin_intrinsic_metadata_cols, c))
)
)
for c in db.origin_intrinsic_metadata_search_fulltext(
conjunction, limit=limit, cur=cur
)
]
@timed
@db_transaction()
def origin_intrinsic_metadata_search_by_producer(
self,
page_token: str = "",
limit: int = 100,
ids_only: bool = False,
mappings: Optional[List[str]] = None,
tool_ids: Optional[List[int]] = None,
db=None,
cur=None,
) -> PagedResult[Union[str, OriginIntrinsicMetadataRow]]:
assert isinstance(page_token, str)
# we go to limit+1 to check whether we should add next_page_token in
# the response
rows = db.origin_intrinsic_metadata_search_by_producer(
page_token, limit + 1, ids_only, mappings, tool_ids, cur
)
next_page_token = None
if ids_only:
results = [origin for (origin,) in rows]
if len(results) > limit:
results[limit:] = []
next_page_token = results[-1]
else:
results = [
OriginIntrinsicMetadataRow.from_dict(
converters.db_to_metadata(
dict(zip(db.origin_intrinsic_metadata_cols, row))
)
)
for row in rows
]
if len(results) > limit:
results[limit:] = []
next_page_token = results[-1].id
- return PagedResult(results=results, next_page_token=next_page_token,)
+ return PagedResult(
+ results=results,
+ next_page_token=next_page_token,
+ )
@timed
@db_transaction()
def origin_intrinsic_metadata_stats(self, db=None, cur=None):
mapping_names = [m for m in MAPPING_NAMES]
select_parts = []
# Count rows for each mapping
for mapping_name in mapping_names:
select_parts.append(
(
"sum(case when (mappings @> ARRAY['%s']) "
" then 1 else 0 end)"
)
% mapping_name
)
# Total
select_parts.append("sum(1)")
# Rows whose metadata has at least one key that is not '@context'
select_parts.append(
"sum(case when ('{}'::jsonb @> (metadata - '@context')) "
" then 0 else 1 end)"
)
cur.execute(
"select " + ", ".join(select_parts) + " from origin_intrinsic_metadata"
)
results = dict(zip(mapping_names + ["total", "non_empty"], cur.fetchone()))
return {
"total": results.pop("total"),
"non_empty": results.pop("non_empty"),
"per_mapping": results,
}
@timed
@db_transaction()
def indexer_configuration_add(self, tools, db=None, cur=None):
db.mktemp_indexer_configuration(cur)
db.copy_to(
tools,
"tmp_indexer_configuration",
["tool_name", "tool_version", "tool_configuration"],
cur,
)
tools = db.indexer_configuration_add_from_temp(cur)
results = [dict(zip(db.indexer_configuration_cols, line)) for line in tools]
send_metric(
"indexer_configuration:add",
len(results),
method_name="indexer_configuration_add",
)
return results
@timed
@db_transaction()
def indexer_configuration_get(self, tool, db=None, cur=None):
tool_conf = tool["tool_configuration"]
if isinstance(tool_conf, dict):
tool_conf = json.dumps(tool_conf)
idx = db.indexer_configuration_get(
tool["tool_name"], tool["tool_version"], tool_conf
)
if not idx:
return None
return dict(zip(db.indexer_configuration_cols, idx))
@db_transaction()
def _tool_get_from_id(self, id_, db, cur):
tool = dict(
zip(
db.indexer_configuration_cols,
db.indexer_configuration_get_from_id(id_, cur),
)
)
return {
"id": tool["id"],
"name": tool["tool_name"],
"version": tool["tool_version"],
"configuration": tool["tool_configuration"],
}
diff --git a/swh/indexer/storage/api/server.py b/swh/indexer/storage/api/server.py
index a6d6f32..adc59fe 100644
--- a/swh/indexer/storage/api/server.py
+++ b/swh/indexer/storage/api/server.py
@@ -1,120 +1,120 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import logging
import os
from typing import Any, Dict, Optional
from swh.core import config
from swh.core.api import RPCServerApp
from swh.core.api import encode_data_server as encode_data
from swh.core.api import error_handler
from swh.indexer.storage import INDEXER_CFG_KEY, get_indexer_storage
from swh.indexer.storage.exc import IndexerStorageArgumentException
from swh.indexer.storage.interface import IndexerStorageInterface
from .serializers import DECODERS, ENCODERS
def get_storage():
global storage
if not storage:
storage = get_indexer_storage(**app.config[INDEXER_CFG_KEY])
return storage
class IndexerStorageServerApp(RPCServerApp):
extra_type_decoders = DECODERS
extra_type_encoders = ENCODERS
app = IndexerStorageServerApp(
__name__, backend_class=IndexerStorageInterface, backend_factory=get_storage
)
storage = None
@app.errorhandler(Exception)
def my_error_handler(exception):
return error_handler(exception, encode_data)
@app.errorhandler(IndexerStorageArgumentException)
def argument_error_handler(exception):
return error_handler(exception, encode_data, status_code=400)
@app.route("/")
def index():
return "SWH Indexer Storage API server"
api_cfg = None
def load_and_check_config(
config_path: Optional[str], type: str = "local"
) -> Dict[str, Any]:
"""Check the minimal configuration is set to run the api or raise an
error explanation.
Args:
config_path: Path to the configuration file to load
type: configuration type. For 'local' type, more
checks are done.
Raises:
Error if the setup is not as expected
Returns:
configuration as a dict
"""
if not config_path:
raise EnvironmentError("Configuration file must be defined")
if not os.path.exists(config_path):
raise FileNotFoundError(f"Configuration file {config_path} does not exist")
cfg = config.read(config_path)
if "indexer_storage" not in cfg:
raise KeyError("Missing '%indexer_storage' configuration")
if type == "local":
vcfg = cfg["indexer_storage"]
cls = vcfg.get("cls")
if cls != "local":
raise ValueError(
"The indexer_storage backend can only be started with a "
"'local' configuration"
)
if not vcfg.get("db"):
raise ValueError("Invalid configuration; missing 'db' config entry")
return cfg
def make_app_from_configfile():
"""Run the WSGI app from the webserver, loading the configuration from
- a configuration file.
+ a configuration file.
- SWH_CONFIG_FILENAME environment variable defines the
- configuration path to load.
+ SWH_CONFIG_FILENAME environment variable defines the
+ configuration path to load.
"""
global api_cfg
if not api_cfg:
config_path = os.environ.get("SWH_CONFIG_FILENAME")
api_cfg = load_and_check_config(config_path)
app.config.update(api_cfg)
handler = logging.StreamHandler()
app.logger.addHandler(handler)
return app
if __name__ == "__main__":
print("Deprecated. Use swh-indexer")
diff --git a/swh/indexer/storage/converters.py b/swh/indexer/storage/converters.py
index 7141664..c74bb68 100644
--- a/swh/indexer/storage/converters.py
+++ b/swh/indexer/storage/converters.py
@@ -1,141 +1,135 @@
# Copyright (C) 2015-2017 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
def ctags_to_db(ctags):
"""Convert a ctags entry into a ready ctags entry.
Args:
ctags (dict): ctags entry with the following keys:
- id (bytes): content's identifier
- tool_id (int): tool id used to compute ctags
- ctags ([dict]): List of dictionary with the following keys:
- name (str): symbol's name
- kind (str): symbol's kind
- line (int): symbol's line in the content
- language (str): language
Returns:
list: list of ctags entries as dicts with the following keys:
- id (bytes): content's identifier
- name (str): symbol's name
- kind (str): symbol's kind
- language (str): language for that content
- tool_id (int): tool id used to compute ctags
"""
id = ctags["id"]
tool_id = ctags["indexer_configuration_id"]
for ctag in ctags["ctags"]:
yield {
"id": id,
"name": ctag["name"],
"kind": ctag["kind"],
"line": ctag["line"],
"lang": ctag["lang"],
"indexer_configuration_id": tool_id,
}
def db_to_ctags(ctag):
"""Convert a ctags entry into a ready ctags entry.
Args:
ctags (dict): ctags entry with the following keys:
- id (bytes): content's identifier
- ctags ([dict]): List of dictionary with the following keys:
- name (str): symbol's name
- kind (str): symbol's kind
- line (int): symbol's line in the content
- language (str): language
Returns:
list: list of ctags ready entry (dict with the following keys):
- id (bytes): content's identifier
- name (str): symbol's name
- kind (str): symbol's kind
- language (str): language for that content
- tool (dict): tool used to compute the ctags
"""
return {
"id": ctag["id"],
"name": ctag["name"],
"kind": ctag["kind"],
"line": ctag["line"],
"lang": ctag["lang"],
"tool": {
"id": ctag["tool_id"],
"name": ctag["tool_name"],
"version": ctag["tool_version"],
"configuration": ctag["tool_configuration"],
},
}
def db_to_mimetype(mimetype):
- """Convert a ctags entry into a ready ctags output.
-
- """
+ """Convert a ctags entry into a ready ctags output."""
return {
"id": mimetype["id"],
"encoding": mimetype["encoding"],
"mimetype": mimetype["mimetype"],
"tool": {
"id": mimetype["tool_id"],
"name": mimetype["tool_name"],
"version": mimetype["tool_version"],
"configuration": mimetype["tool_configuration"],
},
}
def db_to_language(language):
- """Convert a language entry into a ready language output.
-
- """
+ """Convert a language entry into a ready language output."""
return {
"id": language["id"],
"lang": language["lang"],
"tool": {
"id": language["tool_id"],
"name": language["tool_name"],
"version": language["tool_version"],
"configuration": language["tool_configuration"],
},
}
def db_to_metadata(metadata):
- """Convert a metadata entry into a ready metadata output.
-
- """
+ """Convert a metadata entry into a ready metadata output."""
metadata["tool"] = {
"id": metadata["tool_id"],
"name": metadata["tool_name"],
"version": metadata["tool_version"],
"configuration": metadata["tool_configuration"],
}
del metadata["tool_id"], metadata["tool_configuration"]
del metadata["tool_version"], metadata["tool_name"]
return metadata
def db_to_fossology_license(license):
return {
"id": license["id"],
"license": license["license"],
"tool": {
"id": license["tool_id"],
"name": license["tool_name"],
"version": license["tool_version"],
"configuration": license["tool_configuration"],
},
}
diff --git a/swh/indexer/storage/db.py b/swh/indexer/storage/db.py
index 6526625..e8e8d31 100644
--- a/swh/indexer/storage/db.py
+++ b/swh/indexer/storage/db.py
@@ -1,551 +1,535 @@
# Copyright (C) 2015-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import Dict, Iterable, Iterator, List
from swh.core.db import BaseDb
from swh.core.db.db_utils import execute_values_generator, stored_procedure
from swh.model import hashutil
from .interface import Sha1
class Db(BaseDb):
- """Proxy to the SWH Indexer DB, with wrappers around stored procedures
-
- """
+ """Proxy to the SWH Indexer DB, with wrappers around stored procedures"""
content_mimetype_hash_keys = ["id", "indexer_configuration_id"]
current_version = 133
def _missing_from_list(
self, table: str, data: Iterable[Dict], hash_keys: List[str], cur=None
):
"""Read from table the data with hash_keys that are missing.
Args:
table: Table name (e.g content_mimetype, content_language,
etc...)
data: Dict of data to read from
hash_keys: List of keys to read in the data dict.
Yields:
The data which is missing from the db.
"""
cur = self._cursor(cur)
keys = ", ".join(hash_keys)
equality = " AND ".join(("t.%s = c.%s" % (key, key)) for key in hash_keys)
yield from execute_values_generator(
cur,
"""
select %s from (values %%s) as t(%s)
where not exists (
select 1 from %s c
where %s
)
"""
% (keys, keys, table, equality),
(tuple(m[k] for k in hash_keys) for m in data),
)
def content_mimetype_missing_from_list(
self, mimetypes: Iterable[Dict], cur=None
) -> Iterator[Sha1]:
- """List missing mimetypes.
-
- """
+ """List missing mimetypes."""
yield from self._missing_from_list(
"content_mimetype", mimetypes, self.content_mimetype_hash_keys, cur=cur
)
content_mimetype_cols = [
"id",
"mimetype",
"encoding",
"tool_id",
"tool_name",
"tool_version",
"tool_configuration",
]
@stored_procedure("swh_mktemp_content_mimetype")
def mktemp_content_mimetype(self, cur=None):
pass
def content_mimetype_add_from_temp(self, cur=None):
cur = self._cursor(cur)
cur.execute("select * from swh_content_mimetype_add()")
return cur.fetchone()[0]
def _convert_key(self, key, main_table="c"):
"""Convert keys according to specific use in the module.
Args:
key (str): Key expression to change according to the alias
used in the query
main_table (str): Alias to use for the main table. Default
to c for content_{something}.
Expected:
Tables content_{something} being aliased as 'c' (something
in {language, mimetype, ...}), table indexer_configuration
being aliased as 'i'.
"""
if key == "id":
return "%s.id" % main_table
elif key == "tool_id":
return "i.id as tool_id"
elif key == "license":
return (
"""
(
select name
from fossology_license
where id = %s.license_id
)
as licenses"""
% main_table
)
return key
def _get_from_list(self, table, ids, cols, cur=None, id_col="id"):
"""Fetches entries from the `table` such that their `id` field
(or whatever is given to `id_col`) is in `ids`.
Returns the columns `cols`.
The `cur` parameter is used to connect to the database.
"""
cur = self._cursor(cur)
keys = map(self._convert_key, cols)
query = """
select {keys}
from (values %s) as t(id)
inner join {table} c
on c.{id_col}=t.id
inner join indexer_configuration i
on c.indexer_configuration_id=i.id;
""".format(
keys=", ".join(keys), id_col=id_col, table=table
)
yield from execute_values_generator(cur, query, ((_id,) for _id in ids))
content_indexer_names = {
"mimetype": "content_mimetype",
"fossology_license": "content_fossology_license",
}
def content_get_range(
self,
content_type,
start,
end,
indexer_configuration_id,
limit=1000,
with_textual_data=False,
cur=None,
):
"""Retrieve contents with content_type, within range [start, end]
- bound by limit and associated to the given indexer
- configuration id.
+ bound by limit and associated to the given indexer
+ configuration id.
- When asking to work on textual content, that filters on the
- mimetype table with any mimetype that is not binary.
+ When asking to work on textual content, that filters on the
+ mimetype table with any mimetype that is not binary.
"""
cur = self._cursor(cur)
table = self.content_indexer_names[content_type]
if with_textual_data:
extra = """inner join content_mimetype cm
on (t.id=cm.id and cm.mimetype like 'text/%%' and
%(start)s <= cm.id and cm.id <= %(end)s)
"""
else:
extra = ""
query = f"""select t.id
from {table} t
{extra}
where t.indexer_configuration_id=%(tool_id)s
and %(start)s <= t.id and t.id <= %(end)s
order by t.indexer_configuration_id, t.id
limit %(limit)s"""
cur.execute(
query,
{
"start": start,
"end": end,
"tool_id": indexer_configuration_id,
"limit": limit,
},
)
yield from cur
def content_mimetype_get_from_list(self, ids, cur=None):
yield from self._get_from_list(
"content_mimetype", ids, self.content_mimetype_cols, cur=cur
)
content_language_hash_keys = ["id", "indexer_configuration_id"]
def content_language_missing_from_list(self, languages, cur=None):
- """List missing languages.
-
- """
+ """List missing languages."""
yield from self._missing_from_list(
"content_language", languages, self.content_language_hash_keys, cur=cur
)
content_language_cols = [
"id",
"lang",
"tool_id",
"tool_name",
"tool_version",
"tool_configuration",
]
@stored_procedure("swh_mktemp_content_language")
def mktemp_content_language(self, cur=None):
pass
def content_language_add_from_temp(self, cur=None):
cur = self._cursor(cur)
cur.execute("select * from swh_content_language_add()")
return cur.fetchone()[0]
def content_language_get_from_list(self, ids, cur=None):
yield from self._get_from_list(
"content_language", ids, self.content_language_cols, cur=cur
)
content_ctags_hash_keys = ["id", "indexer_configuration_id"]
def content_ctags_missing_from_list(self, ctags, cur=None):
- """List missing ctags.
-
- """
+ """List missing ctags."""
yield from self._missing_from_list(
"content_ctags", ctags, self.content_ctags_hash_keys, cur=cur
)
content_ctags_cols = [
"id",
"name",
"kind",
"line",
"lang",
"tool_id",
"tool_name",
"tool_version",
"tool_configuration",
]
@stored_procedure("swh_mktemp_content_ctags")
def mktemp_content_ctags(self, cur=None):
pass
def content_ctags_add_from_temp(self, cur=None):
cur = self._cursor(cur)
cur.execute("select * from swh_content_ctags_add()")
return cur.fetchone()[0]
def content_ctags_get_from_list(self, ids, cur=None):
cur = self._cursor(cur)
keys = map(self._convert_key, self.content_ctags_cols)
yield from execute_values_generator(
cur,
"""
select %s
from (values %%s) as t(id)
inner join content_ctags c
on c.id=t.id
inner join indexer_configuration i
on c.indexer_configuration_id=i.id
order by line
"""
% ", ".join(keys),
((_id,) for _id in ids),
)
def content_ctags_search(self, expression, last_sha1, limit, cur=None):
cur = self._cursor(cur)
if not last_sha1:
query = """SELECT %s
FROM swh_content_ctags_search(%%s, %%s)""" % (
",".join(self.content_ctags_cols)
)
cur.execute(query, (expression, limit))
else:
if last_sha1 and isinstance(last_sha1, bytes):
last_sha1 = "\\x%s" % hashutil.hash_to_hex(last_sha1)
elif last_sha1:
last_sha1 = "\\x%s" % last_sha1
query = """SELECT %s
FROM swh_content_ctags_search(%%s, %%s, %%s)""" % (
",".join(self.content_ctags_cols)
)
cur.execute(query, (expression, limit, last_sha1))
yield from cur
content_fossology_license_cols = [
"id",
"tool_id",
"tool_name",
"tool_version",
"tool_configuration",
"license",
]
@stored_procedure("swh_mktemp_content_fossology_license")
def mktemp_content_fossology_license(self, cur=None):
pass
def content_fossology_license_add_from_temp(self, cur=None):
- """Add new licenses per content.
-
- """
+ """Add new licenses per content."""
cur = self._cursor(cur)
cur.execute("select * from swh_content_fossology_license_add()")
return cur.fetchone()[0]
def content_fossology_license_get_from_list(self, ids, cur=None):
- """Retrieve licenses per id.
-
- """
+ """Retrieve licenses per id."""
cur = self._cursor(cur)
keys = map(self._convert_key, self.content_fossology_license_cols)
yield from execute_values_generator(
cur,
"""
select %s
from (values %%s) as t(id)
inner join content_fossology_license c on t.id=c.id
inner join indexer_configuration i
on i.id=c.indexer_configuration_id
"""
% ", ".join(keys),
((_id,) for _id in ids),
)
content_metadata_hash_keys = ["id", "indexer_configuration_id"]
def content_metadata_missing_from_list(self, metadata, cur=None):
- """List missing metadata.
-
- """
+ """List missing metadata."""
yield from self._missing_from_list(
"content_metadata", metadata, self.content_metadata_hash_keys, cur=cur
)
content_metadata_cols = [
"id",
"metadata",
"tool_id",
"tool_name",
"tool_version",
"tool_configuration",
]
@stored_procedure("swh_mktemp_content_metadata")
def mktemp_content_metadata(self, cur=None):
pass
def content_metadata_add_from_temp(self, cur=None):
cur = self._cursor(cur)
cur.execute("select * from swh_content_metadata_add()")
return cur.fetchone()[0]
def content_metadata_get_from_list(self, ids, cur=None):
yield from self._get_from_list(
"content_metadata", ids, self.content_metadata_cols, cur=cur
)
revision_intrinsic_metadata_hash_keys = ["id", "indexer_configuration_id"]
def revision_intrinsic_metadata_missing_from_list(self, metadata, cur=None):
- """List missing metadata.
-
- """
+ """List missing metadata."""
yield from self._missing_from_list(
"revision_intrinsic_metadata",
metadata,
self.revision_intrinsic_metadata_hash_keys,
cur=cur,
)
revision_intrinsic_metadata_cols = [
"id",
"metadata",
"mappings",
"tool_id",
"tool_name",
"tool_version",
"tool_configuration",
]
@stored_procedure("swh_mktemp_revision_intrinsic_metadata")
def mktemp_revision_intrinsic_metadata(self, cur=None):
pass
def revision_intrinsic_metadata_add_from_temp(self, cur=None):
cur = self._cursor(cur)
cur.execute("select * from swh_revision_intrinsic_metadata_add()")
return cur.fetchone()[0]
def revision_intrinsic_metadata_get_from_list(self, ids, cur=None):
yield from self._get_from_list(
"revision_intrinsic_metadata",
ids,
self.revision_intrinsic_metadata_cols,
cur=cur,
)
origin_intrinsic_metadata_cols = [
"id",
"metadata",
"from_revision",
"mappings",
"tool_id",
"tool_name",
"tool_version",
"tool_configuration",
]
origin_intrinsic_metadata_regconfig = "pg_catalog.simple"
"""The dictionary used to normalize 'metadata' and queries.
'pg_catalog.simple' provides no stopword, so it should be suitable
for proper names and non-English content.
When updating this value, make sure to add a new index on
origin_intrinsic_metadata.metadata."""
@stored_procedure("swh_mktemp_origin_intrinsic_metadata")
def mktemp_origin_intrinsic_metadata(self, cur=None):
pass
def origin_intrinsic_metadata_add_from_temp(self, cur=None):
cur = self._cursor(cur)
cur.execute("select * from swh_origin_intrinsic_metadata_add()")
return cur.fetchone()[0]
def origin_intrinsic_metadata_get_from_list(self, ids, cur=None):
yield from self._get_from_list(
"origin_intrinsic_metadata",
ids,
self.origin_intrinsic_metadata_cols,
cur=cur,
id_col="id",
)
def origin_intrinsic_metadata_search_fulltext(self, terms, *, limit, cur):
regconfig = self.origin_intrinsic_metadata_regconfig
tsquery_template = " && ".join(
"plainto_tsquery('%s', %%s)" % regconfig for _ in terms
)
tsquery_args = [(term,) for term in terms]
keys = (
self._convert_key(col, "oim") for col in self.origin_intrinsic_metadata_cols
)
query = (
"SELECT {keys} FROM origin_intrinsic_metadata AS oim "
"INNER JOIN indexer_configuration AS i "
"ON oim.indexer_configuration_id=i.id "
"JOIN LATERAL (SELECT {tsquery_template}) AS s(tsq) ON true "
"WHERE oim.metadata_tsvector @@ tsq "
"ORDER BY ts_rank(oim.metadata_tsvector, tsq, 1) DESC "
"LIMIT %s;"
).format(keys=", ".join(keys), tsquery_template=tsquery_template)
cur.execute(query, tsquery_args + [limit])
yield from cur
def origin_intrinsic_metadata_search_by_producer(
self, last, limit, ids_only, mappings, tool_ids, cur
):
if ids_only:
keys = "oim.id"
else:
keys = ", ".join(
(
self._convert_key(col, "oim")
for col in self.origin_intrinsic_metadata_cols
)
)
query_parts = [
"SELECT %s" % keys,
"FROM origin_intrinsic_metadata AS oim",
"INNER JOIN indexer_configuration AS i",
"ON oim.indexer_configuration_id=i.id",
]
args = []
where = []
if last:
where.append("oim.id > %s")
args.append(last)
if mappings is not None:
where.append("oim.mappings && %s")
args.append(list(mappings))
if tool_ids is not None:
where.append("oim.indexer_configuration_id = ANY(%s)")
args.append(list(tool_ids))
if where:
query_parts.append("WHERE")
query_parts.append(" AND ".join(where))
if limit:
query_parts.append("LIMIT %s")
args.append(limit)
cur.execute(" ".join(query_parts), args)
yield from cur
indexer_configuration_cols = [
"id",
"tool_name",
"tool_version",
"tool_configuration",
]
@stored_procedure("swh_mktemp_indexer_configuration")
def mktemp_indexer_configuration(self, cur=None):
pass
def indexer_configuration_add_from_temp(self, cur=None):
cur = self._cursor(cur)
cur.execute(
"SELECT %s from swh_indexer_configuration_add()"
% (",".join(self.indexer_configuration_cols),)
)
yield from cur
def indexer_configuration_get(
self, tool_name, tool_version, tool_configuration, cur=None
):
cur = self._cursor(cur)
cur.execute(
"""select %s
from indexer_configuration
where tool_name=%%s and
tool_version=%%s and
tool_configuration=%%s"""
% (",".join(self.indexer_configuration_cols)),
(tool_name, tool_version, tool_configuration),
)
return cur.fetchone()
def indexer_configuration_get_from_id(self, id_, cur=None):
cur = self._cursor(cur)
cur.execute(
"""select %s
from indexer_configuration
where id=%%s"""
% (",".join(self.indexer_configuration_cols)),
(id_,),
)
return cur.fetchone()
diff --git a/swh/indexer/storage/in_memory.py b/swh/indexer/storage/in_memory.py
index 0071ebe..9576b55 100644
--- a/swh/indexer/storage/in_memory.py
+++ b/swh/indexer/storage/in_memory.py
@@ -1,501 +1,506 @@
# Copyright (C) 2018-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from collections import Counter, defaultdict
import itertools
import json
import math
import operator
import re
from typing import (
Any,
Dict,
Generic,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from swh.core.collections import SortedList
from swh.model.hashutil import hash_to_bytes, hash_to_hex
from swh.model.model import SHA1_SIZE, Sha1Git
from swh.storage.utils import get_partition_bounds_bytes
from . import MAPPING_NAMES, check_id_duplicates
from .exc import IndexerStorageArgumentException
from .interface import PagedResult, Sha1
from .model import (
BaseRow,
ContentCtagsRow,
ContentLanguageRow,
ContentLicenseRow,
ContentMetadataRow,
ContentMimetypeRow,
OriginIntrinsicMetadataRow,
RevisionIntrinsicMetadataRow,
)
from .writer import JournalWriter
SHA1_DIGEST_SIZE = 160
ToolId = int
def _transform_tool(tool):
return {
"id": tool["id"],
"name": tool["tool_name"],
"version": tool["tool_version"],
"configuration": tool["tool_configuration"],
}
def check_id_types(data: List[Dict[str, Any]]):
"""Checks all elements of the list have an 'id' whose type is 'bytes'."""
if not all(isinstance(item.get("id"), bytes) for item in data):
raise IndexerStorageArgumentException("identifiers must be bytes.")
def _key_from_dict(d):
return tuple(sorted(d.items()))
TValue = TypeVar("TValue", bound=BaseRow)
class SubStorage(Generic[TValue]):
"""Implements common missing/get/add logic for each indexer type."""
_data: Dict[Sha1, Dict[Tuple, Dict[str, Any]]]
_tools_per_id: Dict[Sha1, Set[ToolId]]
def __init__(self, row_class: Type[TValue], tools, journal_writer):
self.row_class = row_class
self._tools = tools
self._sorted_ids = SortedList[bytes, Sha1]()
self._data = defaultdict(dict)
self._journal_writer = journal_writer
self._tools_per_id = defaultdict(set)
def _key_from_dict(self, d) -> Tuple:
"""Like the global _key_from_dict, but filters out dict keys that don't
belong in the unique key."""
return _key_from_dict({k: d[k] for k in self.row_class.UNIQUE_KEY_FIELDS})
def missing(self, keys: Iterable[Dict]) -> List[Sha1]:
"""List data missing from storage.
Args:
data (iterable): dictionaries with keys:
- **id** (bytes): sha1 identifier
- **indexer_configuration_id** (int): tool used to compute
the results
Yields:
missing sha1s
"""
results = []
for key in keys:
tool_id = key["indexer_configuration_id"]
id_ = key["id"]
if tool_id not in self._tools_per_id.get(id_, set()):
results.append(id_)
return results
def get(self, ids: Iterable[Sha1]) -> List[TValue]:
"""Retrieve data per id.
Args:
ids (iterable): sha1 checksums
Yields:
dict: dictionaries with the following keys:
- **id** (bytes)
- **tool** (dict): tool used to compute metadata
- arbitrary data (as provided to `add`)
"""
results = []
for id_ in ids:
for entry in self._data[id_].values():
entry = entry.copy()
tool_id = entry.pop("indexer_configuration_id")
results.append(
self.row_class(
- id=id_, tool=_transform_tool(self._tools[tool_id]), **entry,
+ id=id_,
+ tool=_transform_tool(self._tools[tool_id]),
+ **entry,
)
)
return results
def get_all(self) -> List[TValue]:
return self.get(self._sorted_ids)
def get_partition(
self,
indexer_configuration_id: int,
partition_id: int,
nb_partitions: int,
page_token: Optional[str] = None,
limit: int = 1000,
) -> PagedResult[Sha1]:
"""Retrieve ids of content with `indexer_type` within partition partition_id
bound by limit.
Args:
**indexer_type**: Type of data content to index (mimetype, language, etc...)
**indexer_configuration_id**: The tool used to index data
**partition_id**: index of the partition to fetch
**nb_partitions**: total number of partitions to split into
**page_token**: opaque token used for pagination
**limit**: Limit result (default to 1000)
**with_textual_data** (bool): Deal with only textual content (True) or all
content (all contents by defaults, False)
Raises:
IndexerStorageArgumentException for;
- limit to None
- wrong indexer_type provided
Returns:
PagedResult of Sha1. If next_page_token is None, there is no more data to
fetch
"""
if limit is None:
raise IndexerStorageArgumentException("limit should not be None")
(start, end) = get_partition_bounds_bytes(
partition_id, nb_partitions, SHA1_SIZE
)
if page_token:
start = hash_to_bytes(page_token)
if end is None:
end = b"\xff" * SHA1_SIZE
next_page_token: Optional[str] = None
ids: List[Sha1] = []
sha1s = (sha1 for sha1 in self._sorted_ids.iter_from(start))
for counter, sha1 in enumerate(sha1s):
if sha1 > end:
break
if counter >= limit:
next_page_token = hash_to_hex(sha1)
break
ids.append(sha1)
assert len(ids) <= limit
return PagedResult(results=ids, next_page_token=next_page_token)
def add(self, data: Iterable[TValue]) -> int:
"""Add data not present in storage.
Args:
data (iterable): dictionaries with keys:
- **id**: sha1
- **indexer_configuration_id**: tool used to compute the
results
- arbitrary data
"""
data = list(data)
check_id_duplicates(data)
object_type = self.row_class.object_type # type: ignore
self._journal_writer.write_additions(object_type, data)
count = 0
for obj in data:
item = obj.to_dict()
id_ = item.pop("id")
tool_id = item["indexer_configuration_id"]
key = _key_from_dict(obj.unique_key())
self._data[id_][key] = item
self._tools_per_id[id_].add(tool_id)
count += 1
if id_ not in self._sorted_ids:
self._sorted_ids.add(id_)
return count
class IndexerStorage:
"""In-memory SWH indexer storage."""
def __init__(self, journal_writer=None):
self._tools = {}
def tool_getter(id_):
tool = self._tools[id_]
return {
"id": tool["id"],
"name": tool["tool_name"],
"version": tool["tool_version"],
"configuration": tool["tool_configuration"],
}
self.journal_writer = JournalWriter(tool_getter, journal_writer)
args = (self._tools, self.journal_writer)
self._mimetypes = SubStorage(ContentMimetypeRow, *args)
self._languages = SubStorage(ContentLanguageRow, *args)
self._content_ctags = SubStorage(ContentCtagsRow, *args)
self._licenses = SubStorage(ContentLicenseRow, *args)
self._content_metadata = SubStorage(ContentMetadataRow, *args)
self._revision_intrinsic_metadata = SubStorage(
RevisionIntrinsicMetadataRow, *args
)
self._origin_intrinsic_metadata = SubStorage(OriginIntrinsicMetadataRow, *args)
def check_config(self, *, check_write):
return True
def content_mimetype_missing(
self, mimetypes: Iterable[Dict]
) -> List[Tuple[Sha1, int]]:
return self._mimetypes.missing(mimetypes)
def content_mimetype_get_partition(
self,
indexer_configuration_id: int,
partition_id: int,
nb_partitions: int,
page_token: Optional[str] = None,
limit: int = 1000,
) -> PagedResult[Sha1]:
return self._mimetypes.get_partition(
indexer_configuration_id, partition_id, nb_partitions, page_token, limit
)
def content_mimetype_add(
self, mimetypes: List[ContentMimetypeRow]
) -> Dict[str, int]:
added = self._mimetypes.add(mimetypes)
return {"content_mimetype:add": added}
def content_mimetype_get(self, ids: Iterable[Sha1]) -> List[ContentMimetypeRow]:
return self._mimetypes.get(ids)
def content_language_missing(
self, languages: Iterable[Dict]
) -> List[Tuple[Sha1, int]]:
return self._languages.missing(languages)
def content_language_get(self, ids: Iterable[Sha1]) -> List[ContentLanguageRow]:
return self._languages.get(ids)
def content_language_add(
self, languages: List[ContentLanguageRow]
) -> Dict[str, int]:
added = self._languages.add(languages)
return {"content_language:add": added}
def content_ctags_missing(self, ctags: Iterable[Dict]) -> List[Tuple[Sha1, int]]:
return self._content_ctags.missing(ctags)
def content_ctags_get(self, ids: Iterable[Sha1]) -> List[ContentCtagsRow]:
return self._content_ctags.get(ids)
def content_ctags_add(self, ctags: List[ContentCtagsRow]) -> Dict[str, int]:
added = self._content_ctags.add(ctags)
return {"content_ctags:add": added}
def content_ctags_search(
self, expression: str, limit: int = 10, last_sha1: Optional[Sha1] = None
) -> List[ContentCtagsRow]:
nb_matches = 0
items_per_id: Dict[Tuple[Sha1Git, ToolId], List[ContentCtagsRow]] = {}
for item in sorted(self._content_ctags.get_all()):
if item.id <= (last_sha1 or bytes(0 for _ in range(SHA1_DIGEST_SIZE))):
continue
items_per_id.setdefault(
(item.id, item.indexer_configuration_id), []
).append(item)
results = []
for items in items_per_id.values():
for item in items:
if item.name != expression:
continue
nb_matches += 1
if nb_matches > limit:
break
results.append(item)
return results
def content_fossology_license_get(
self, ids: Iterable[Sha1]
) -> List[ContentLicenseRow]:
return self._licenses.get(ids)
def content_fossology_license_add(
self, licenses: List[ContentLicenseRow]
) -> Dict[str, int]:
added = self._licenses.add(licenses)
return {"content_fossology_license:add": added}
def content_fossology_license_get_partition(
self,
indexer_configuration_id: int,
partition_id: int,
nb_partitions: int,
page_token: Optional[str] = None,
limit: int = 1000,
) -> PagedResult[Sha1]:
return self._licenses.get_partition(
indexer_configuration_id, partition_id, nb_partitions, page_token, limit
)
def content_metadata_missing(
self, metadata: Iterable[Dict]
) -> List[Tuple[Sha1, int]]:
return self._content_metadata.missing(metadata)
def content_metadata_get(self, ids: Iterable[Sha1]) -> List[ContentMetadataRow]:
return self._content_metadata.get(ids)
def content_metadata_add(
self, metadata: List[ContentMetadataRow]
) -> Dict[str, int]:
added = self._content_metadata.add(metadata)
return {"content_metadata:add": added}
def revision_intrinsic_metadata_missing(
self, metadata: Iterable[Dict]
) -> List[Tuple[Sha1, int]]:
return self._revision_intrinsic_metadata.missing(metadata)
def revision_intrinsic_metadata_get(
self, ids: Iterable[Sha1]
) -> List[RevisionIntrinsicMetadataRow]:
return self._revision_intrinsic_metadata.get(ids)
def revision_intrinsic_metadata_add(
self, metadata: List[RevisionIntrinsicMetadataRow]
) -> Dict[str, int]:
added = self._revision_intrinsic_metadata.add(metadata)
return {"revision_intrinsic_metadata:add": added}
def origin_intrinsic_metadata_get(
self, urls: Iterable[str]
) -> List[OriginIntrinsicMetadataRow]:
return self._origin_intrinsic_metadata.get(urls)
def origin_intrinsic_metadata_add(
self, metadata: List[OriginIntrinsicMetadataRow]
) -> Dict[str, int]:
added = self._origin_intrinsic_metadata.add(metadata)
return {"origin_intrinsic_metadata:add": added}
def origin_intrinsic_metadata_search_fulltext(
self, conjunction: List[str], limit: int = 100
) -> List[OriginIntrinsicMetadataRow]:
# A very crude fulltext search implementation, but that's enough
# to work on English metadata
tokens_re = re.compile("[a-zA-Z0-9]+")
search_tokens = list(itertools.chain(*map(tokens_re.findall, conjunction)))
def rank(data):
# Tokenize the metadata
text = json.dumps(data.metadata)
text_tokens = tokens_re.findall(text)
text_token_occurences = Counter(text_tokens)
# Count the number of occurrences of search tokens in the text
score = 0
for search_token in search_tokens:
if text_token_occurences[search_token] == 0:
# Search token is not in the text.
return 0
score += text_token_occurences[search_token]
# Normalize according to the text's length
return score / math.log(len(text_tokens))
results = [
(rank(data), data) for data in self._origin_intrinsic_metadata.get_all()
]
results = [(rank_, data) for (rank_, data) in results if rank_ > 0]
results.sort(
key=operator.itemgetter(0), reverse=True # Don't try to order 'data'
)
return [result for (rank_, result) in results[:limit]]
def origin_intrinsic_metadata_search_by_producer(
self,
page_token: str = "",
limit: int = 100,
ids_only: bool = False,
mappings: Optional[List[str]] = None,
tool_ids: Optional[List[int]] = None,
) -> PagedResult[Union[str, OriginIntrinsicMetadataRow]]:
assert isinstance(page_token, str)
nb_results = 0
if mappings is not None:
mapping_set = frozenset(mappings)
if tool_ids is not None:
tool_id_set = frozenset(tool_ids)
rows = []
# we go to limit+1 to check whether we should add next_page_token in
# the response
for entry in self._origin_intrinsic_metadata.get_all():
if entry.id <= page_token:
continue
if nb_results >= (limit + 1):
break
if mappings and mapping_set.isdisjoint(entry.mappings):
continue
if tool_ids and entry.tool["id"] not in tool_id_set:
continue
rows.append(entry)
nb_results += 1
if len(rows) > limit:
rows = rows[:limit]
next_page_token = rows[-1].id
else:
next_page_token = None
if ids_only:
rows = [row.id for row in rows]
- return PagedResult(results=rows, next_page_token=next_page_token,)
+ return PagedResult(
+ results=rows,
+ next_page_token=next_page_token,
+ )
def origin_intrinsic_metadata_stats(self):
mapping_count = {m: 0 for m in MAPPING_NAMES}
total = non_empty = 0
for data in self._origin_intrinsic_metadata.get_all():
total += 1
if set(data.metadata) - {"@context"}:
non_empty += 1
for mapping in data.mappings:
mapping_count[mapping] += 1
return {"per_mapping": mapping_count, "total": total, "non_empty": non_empty}
def indexer_configuration_add(self, tools):
inserted = []
for tool in tools:
tool = tool.copy()
id_ = self._tool_key(tool)
tool["id"] = id_
self._tools[id_] = tool
inserted.append(tool)
return inserted
def indexer_configuration_get(self, tool):
return self._tools.get(self._tool_key(tool))
def _tool_key(self, tool):
return hash(
(
tool["tool_name"],
tool["tool_version"],
json.dumps(tool["tool_configuration"], sort_keys=True),
)
)
diff --git a/swh/indexer/storage/interface.py b/swh/indexer/storage/interface.py
index 7a7dc2f..6a58615 100644
--- a/swh/indexer/storage/interface.py
+++ b/swh/indexer/storage/interface.py
@@ -1,519 +1,520 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import Dict, Iterable, List, Optional, Tuple, TypeVar, Union
from typing_extensions import Protocol, runtime_checkable
from swh.core.api import remote_api_endpoint
from swh.core.api.classes import PagedResult as CorePagedResult
from swh.indexer.storage.model import (
ContentCtagsRow,
ContentLanguageRow,
ContentLicenseRow,
ContentMetadataRow,
ContentMimetypeRow,
OriginIntrinsicMetadataRow,
RevisionIntrinsicMetadataRow,
)
TResult = TypeVar("TResult")
PagedResult = CorePagedResult[TResult, str]
Sha1 = bytes
@runtime_checkable
class IndexerStorageInterface(Protocol):
@remote_api_endpoint("check_config")
def check_config(self, *, check_write):
"""Check that the storage is configured and ready to go."""
...
@remote_api_endpoint("content_mimetype/missing")
def content_mimetype_missing(
self, mimetypes: Iterable[Dict]
) -> List[Tuple[Sha1, int]]:
"""Generate mimetypes missing from storage.
Args:
mimetypes (iterable): iterable of dict with keys:
- **id** (bytes): sha1 identifier
- **indexer_configuration_id** (int): tool used to compute the
results
Returns:
list of tuple (id, indexer_configuration_id) missing
"""
...
@remote_api_endpoint("content_mimetype/range")
def content_mimetype_get_partition(
self,
indexer_configuration_id: int,
partition_id: int,
nb_partitions: int,
page_token: Optional[str] = None,
limit: int = 1000,
) -> PagedResult[Sha1]:
"""Retrieve mimetypes within partition partition_id bound by limit.
Args:
**indexer_configuration_id**: The tool used to index data
**partition_id**: index of the partition to fetch
**nb_partitions**: total number of partitions to split into
**page_token**: opaque token used for pagination
**limit**: Limit result (default to 1000)
Raises:
IndexerStorageArgumentException for;
- limit to None
- wrong indexer_type provided
Returns:
PagedResult of Sha1. If next_page_token is None, there is no more data
to fetch
"""
...
@remote_api_endpoint("content_mimetype/add")
def content_mimetype_add(
self, mimetypes: List[ContentMimetypeRow]
) -> Dict[str, int]:
"""Add mimetypes not present in storage.
Args:
mimetypes: mimetype rows to be added, with their `tool` attribute set to
None.
overwrite (``True``) or skip duplicates (``False``, the
default)
Returns:
Dict summary of number of rows added
"""
...
@remote_api_endpoint("content_mimetype")
def content_mimetype_get(self, ids: Iterable[Sha1]) -> List[ContentMimetypeRow]:
"""Retrieve full content mimetype per ids.
Args:
ids: sha1 identifiers
Returns:
mimetype row objects
"""
...
@remote_api_endpoint("content_language/missing")
def content_language_missing(
self, languages: Iterable[Dict]
) -> List[Tuple[Sha1, int]]:
"""List languages missing from storage.
Args:
languages (iterable): dictionaries with keys:
- **id** (bytes): sha1 identifier
- **indexer_configuration_id** (int): tool used to compute
the results
Returns:
list of tuple (id, indexer_configuration_id) missing
"""
...
@remote_api_endpoint("content_language")
def content_language_get(self, ids: Iterable[Sha1]) -> List[ContentLanguageRow]:
"""Retrieve full content language per ids.
Args:
ids (iterable): sha1 identifier
Returns:
language row objects
"""
...
@remote_api_endpoint("content_language/add")
def content_language_add(
self, languages: List[ContentLanguageRow]
) -> Dict[str, int]:
"""Add languages not present in storage.
Args:
languages: language row objects
Returns:
Dict summary of number of rows added
"""
...
@remote_api_endpoint("content/ctags/missing")
def content_ctags_missing(self, ctags: Iterable[Dict]) -> List[Tuple[Sha1, int]]:
"""List ctags missing from storage.
Args:
ctags (iterable): dicts with keys:
- **id** (bytes): sha1 identifier
- **indexer_configuration_id** (int): tool used to compute
the results
Returns:
list of missing id for the tuple (id,
indexer_configuration_id)
"""
...
@remote_api_endpoint("content/ctags")
def content_ctags_get(self, ids: Iterable[Sha1]) -> List[ContentCtagsRow]:
"""Retrieve ctags per id.
Args:
ids (iterable): sha1 checksums
Returns:
list of language rows
"""
...
@remote_api_endpoint("content/ctags/add")
def content_ctags_add(self, ctags: List[ContentCtagsRow]) -> Dict[str, int]:
"""Add ctags not present in storage
Args:
ctags (iterable): dictionaries with keys:
- **id** (bytes): sha1
- **ctags** ([list): List of dictionary with keys: name, kind,
line, lang
Returns:
Dict summary of number of rows added
"""
...
@remote_api_endpoint("content/ctags/search")
def content_ctags_search(
self, expression: str, limit: int = 10, last_sha1: Optional[Sha1] = None
) -> List[ContentCtagsRow]:
"""Search through content's raw ctags symbols.
Args:
expression (str): Expression to search for
limit (int): Number of rows to return (default to 10).
last_sha1 (str): Offset from which retrieving data (default to '').
Returns:
rows of ctags including id, name, lang, kind, line, etc...
"""
...
@remote_api_endpoint("content/fossology_license")
def content_fossology_license_get(
self, ids: Iterable[Sha1]
) -> List[ContentLicenseRow]:
"""Retrieve licenses per id.
Args:
ids: sha1 identifiers
Yields:
license rows; possibly more than one per (sha1, tool_id) if there
are multiple licenses.
"""
...
@remote_api_endpoint("content/fossology_license/add")
def content_fossology_license_add(
self, licenses: List[ContentLicenseRow]
) -> Dict[str, int]:
"""Add licenses not present in storage.
Args:
license: license rows to be added, with their `tool` attribute set to
None.
Returns:
Dict summary of number of rows added
"""
...
@remote_api_endpoint("content/fossology_license/range")
def content_fossology_license_get_partition(
self,
indexer_configuration_id: int,
partition_id: int,
nb_partitions: int,
page_token: Optional[str] = None,
limit: int = 1000,
) -> PagedResult[Sha1]:
"""Retrieve licenses within the partition partition_id bound by limit.
Args:
**indexer_configuration_id**: The tool used to index data
**partition_id**: index of the partition to fetch
**nb_partitions**: total number of partitions to split into
**page_token**: opaque token used for pagination
**limit**: Limit result (default to 1000)
Raises:
IndexerStorageArgumentException for;
- limit to None
- wrong indexer_type provided
Returns: PagedResult of Sha1. If next_page_token is None, there is no more data
to fetch
"""
...
@remote_api_endpoint("content_metadata/missing")
def content_metadata_missing(
self, metadata: Iterable[Dict]
) -> List[Tuple[Sha1, int]]:
"""List metadata missing from storage.
Args:
metadata (iterable): dictionaries with keys:
- **id** (bytes): sha1 identifier
- **indexer_configuration_id** (int): tool used to compute
the results
Yields:
missing sha1s
"""
...
@remote_api_endpoint("content_metadata")
def content_metadata_get(self, ids: Iterable[Sha1]) -> List[ContentMetadataRow]:
"""Retrieve metadata per id.
Args:
ids (iterable): sha1 checksums
Yields:
dictionaries with the following keys:
id (bytes)
metadata (str): associated metadata
tool (dict): tool used to compute metadata
"""
...
@remote_api_endpoint("content_metadata/add")
def content_metadata_add(
self, metadata: List[ContentMetadataRow]
) -> Dict[str, int]:
"""Add metadata not present in storage.
Args:
metadata (iterable): dictionaries with keys:
- **id**: sha1
- **metadata**: arbitrary dict
Returns:
Dict summary of number of rows added
"""
...
@remote_api_endpoint("revision_intrinsic_metadata/missing")
def revision_intrinsic_metadata_missing(
self, metadata: Iterable[Dict]
) -> List[Tuple[Sha1, int]]:
"""List metadata missing from storage.
Args:
metadata (iterable): dictionaries with keys:
- **id** (bytes): sha1_git revision identifier
- **indexer_configuration_id** (int): tool used to compute
the results
Returns:
missing ids
"""
...
@remote_api_endpoint("revision_intrinsic_metadata")
def revision_intrinsic_metadata_get(
self, ids: Iterable[Sha1]
) -> List[RevisionIntrinsicMetadataRow]:
"""Retrieve revision metadata per id.
Args:
ids (iterable): sha1 checksums
Returns:
ContentMetadataRow objects
"""
...
@remote_api_endpoint("revision_intrinsic_metadata/add")
def revision_intrinsic_metadata_add(
- self, metadata: List[RevisionIntrinsicMetadataRow],
+ self,
+ metadata: List[RevisionIntrinsicMetadataRow],
) -> Dict[str, int]:
"""Add metadata not present in storage.
Args:
metadata: ContentMetadataRow objects
Returns:
Dict summary of number of rows added
"""
...
@remote_api_endpoint("origin_intrinsic_metadata")
def origin_intrinsic_metadata_get(
self, urls: Iterable[str]
) -> List[OriginIntrinsicMetadataRow]:
"""Retrieve origin metadata per id.
Args:
urls (iterable): origin URLs
Returns: list of OriginIntrinsicMetadataRow
"""
...
@remote_api_endpoint("origin_intrinsic_metadata/add")
def origin_intrinsic_metadata_add(
self, metadata: List[OriginIntrinsicMetadataRow]
) -> Dict[str, int]:
"""Add origin metadata not present in storage.
Args:
metadata: list of OriginIntrinsicMetadataRow objects
Returns:
Dict summary of number of rows added
"""
...
@remote_api_endpoint("origin_intrinsic_metadata/search/fulltext")
def origin_intrinsic_metadata_search_fulltext(
self, conjunction: List[str], limit: int = 100
) -> List[OriginIntrinsicMetadataRow]:
"""Returns the list of origins whose metadata contain all the terms.
Args:
conjunction: List of terms to be searched for.
limit: The maximum number of results to return
Returns:
list of OriginIntrinsicMetadataRow
"""
...
@remote_api_endpoint("origin_intrinsic_metadata/search/by_producer")
def origin_intrinsic_metadata_search_by_producer(
self,
page_token: str = "",
limit: int = 100,
ids_only: bool = False,
mappings: Optional[List[str]] = None,
tool_ids: Optional[List[int]] = None,
) -> PagedResult[Union[str, OriginIntrinsicMetadataRow]]:
"""Returns the list of origins whose metadata contain all the terms.
Args:
page_token (str): Opaque token used for pagination.
limit (int): The maximum number of results to return
ids_only (bool): Determines whether only origin urls are
returned or the content as well
mappings (List[str]): Returns origins whose intrinsic metadata
were generated using at least one of these mappings.
Returns:
OriginIntrinsicMetadataRow objects
"""
...
@remote_api_endpoint("origin_intrinsic_metadata/stats")
def origin_intrinsic_metadata_stats(self):
"""Returns counts of indexed metadata per origins, broken down
into metadata types.
Returns:
dict: dictionary with keys:
- total (int): total number of origins that were indexed
(possibly yielding an empty metadata dictionary)
- non_empty (int): total number of origins that we extracted
a non-empty metadata dictionary from
- per_mapping (dict): a dictionary with mapping names as
keys and number of origins whose indexing used this
mapping. Note that indexing a given origin may use
0, 1, or many mappings.
"""
...
@remote_api_endpoint("indexer_configuration/add")
def indexer_configuration_add(self, tools):
"""Add new tools to the storage.
Args:
tools ([dict]): List of dictionary representing tool to
insert in the db. Dictionary with the following keys:
- **tool_name** (str): tool's name
- **tool_version** (str): tool's version
- **tool_configuration** (dict): tool's configuration
(free form dict)
Returns:
List of dict inserted in the db (holding the id key as
well). The order of the list is not guaranteed to match
the order of the initial list.
"""
...
@remote_api_endpoint("indexer_configuration/data")
def indexer_configuration_get(self, tool):
"""Retrieve tool information.
Args:
tool (dict): Dictionary representing a tool with the
following keys:
- **tool_name** (str): tool's name
- **tool_version** (str): tool's version
- **tool_configuration** (dict): tool's configuration
(free form dict)
Returns:
The same dictionary with an `id` key, None otherwise.
"""
...
diff --git a/swh/indexer/storage/metrics.py b/swh/indexer/storage/metrics.py
index 55634d2..e5aa4fd 100644
--- a/swh/indexer/storage/metrics.py
+++ b/swh/indexer/storage/metrics.py
@@ -1,83 +1,79 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from functools import wraps
import logging
from swh.core.statsd import statsd
OPERATIONS_METRIC = "swh_indexer_storage_operations_total"
OPERATIONS_UNIT_METRIC = "swh_indexer_storage_operations_{unit}_total"
DURATION_METRIC = "swh_indexer_storage_request_duration_seconds"
def timed(f):
- """Time that function!
-
- """
+ """Time that function!"""
@wraps(f)
def d(*a, **kw):
with statsd.timed(DURATION_METRIC, tags={"endpoint": f.__name__}):
return f(*a, **kw)
return d
def send_metric(metric, count, method_name):
"""Send statsd metric with count for method `method_name`
If count is 0, the metric is discarded. If the metric is not
parseable, the metric is discarded with a log message.
Args:
metric (str): Metric's name (e.g content:add, content:add:bytes)
count (int): Associated value for the metric
method_name (str): Method's name
Returns:
Bool to explicit if metric has been set or not
"""
if count == 0:
return False
metric_type = metric.split(":")
_length = len(metric_type)
if _length == 2:
object_type, operation = metric_type
metric_name = OPERATIONS_METRIC
elif _length == 3:
object_type, operation, unit = metric_type
metric_name = OPERATIONS_UNIT_METRIC.format(unit=unit)
else:
logging.warning("Skipping unknown metric {%s: %s}" % (metric, count))
return False
statsd.increment(
metric_name,
count,
tags={
"endpoint": method_name,
"object_type": object_type,
"operation": operation,
},
)
return True
def process_metrics(f):
- """Increment object counters for the decorated function.
-
- """
+ """Increment object counters for the decorated function."""
@wraps(f)
def d(*a, **kw):
r = f(*a, **kw)
for metric, count in r.items():
send_metric(metric=metric, count=count, method_name=f.__name__)
return r
return d
diff --git a/swh/indexer/tests/conftest.py b/swh/indexer/tests/conftest.py
index d42c3e3..d5f0031 100644
--- a/swh/indexer/tests/conftest.py
+++ b/swh/indexer/tests/conftest.py
@@ -1,134 +1,134 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from datetime import timedelta
from functools import partial
import os
from typing import List, Tuple
from unittest.mock import patch
import pytest
from pytest_postgresql import factories
import yaml
from swh.core.db.pytest_plugin import initialize_database_for_module, postgresql_fact
from swh.indexer.storage import get_indexer_storage
from swh.indexer.storage.db import Db as IndexerDb
from swh.objstorage.factory import get_objstorage
from swh.storage import get_storage
from .utils import fill_obj_storage, fill_storage
TASK_NAMES: List[Tuple[str, str]] = [
# (scheduler-task-type, task-class-test-name)
("index-revision-metadata", "revision_intrinsic_metadata"),
("index-origin-metadata", "origin_intrinsic_metadata"),
]
idx_postgresql_proc = factories.postgresql_proc(
dbname="indexer_storage",
load=[
partial(
initialize_database_for_module,
modname="indexer",
version=IndexerDb.current_version,
)
],
)
idx_storage_postgresql = postgresql_fact("idx_postgresql_proc")
@pytest.fixture
def indexer_scheduler(swh_scheduler):
# Insert the expected task types within the scheduler
for task_name, task_class_name in TASK_NAMES:
swh_scheduler.create_task_type(
{
"type": task_name,
"description": f"The {task_class_name} indexer testing task",
"backend_name": f"swh.indexer.tests.tasks.{task_class_name}",
"default_interval": timedelta(days=1),
"min_interval": timedelta(hours=6),
"max_interval": timedelta(days=12),
"num_retries": 3,
}
)
return swh_scheduler
@pytest.fixture
def idx_storage_backend_config(idx_storage_postgresql):
"""Basic pg storage configuration with no journal collaborator for the indexer
storage (to avoid pulling optional dependency on clients of this fixture)
"""
return {
"cls": "local",
"db": idx_storage_postgresql.dsn,
}
@pytest.fixture
def swh_indexer_config(
swh_storage_backend_config, idx_storage_backend_config, swh_scheduler_config
):
return {
"storage": swh_storage_backend_config,
"objstorage": {"cls": "memory"},
"indexer_storage": idx_storage_backend_config,
"scheduler": {"cls": "local", **swh_scheduler_config},
"tools": {
"name": "file",
"version": "1:5.30-1+deb9u1",
"configuration": {"type": "library", "debian-package": "python3-magic"},
},
"compute_checksums": ["blake2b512"], # for rehash indexer
}
@pytest.fixture
def idx_storage(swh_indexer_config):
"""An instance of in-memory indexer storage that gets injected into all
indexers classes.
"""
idx_storage_config = swh_indexer_config["indexer_storage"]
return get_indexer_storage(**idx_storage_config)
@pytest.fixture
def storage(swh_indexer_config):
"""An instance of in-memory storage that gets injected into all indexers
- classes.
+ classes.
"""
storage = get_storage(**swh_indexer_config["storage"])
fill_storage(storage)
return storage
@pytest.fixture
def obj_storage(swh_indexer_config):
"""An instance of in-memory objstorage that gets injected into all indexers
classes.
"""
objstorage = get_objstorage(**swh_indexer_config["objstorage"])
fill_obj_storage(objstorage)
with patch.dict(
"swh.objstorage.factory._STORAGE_CLASSES", {"memory": lambda: objstorage}
):
yield objstorage
@pytest.fixture
def swh_config(swh_indexer_config, monkeypatch, tmp_path):
conffile = os.path.join(str(tmp_path), "indexer.yml")
with open(conffile, "w") as f:
f.write(yaml.dump(swh_indexer_config))
monkeypatch.setenv("SWH_CONFIG_FILENAME", conffile)
return conffile
diff --git a/swh/indexer/tests/storage/conftest.py b/swh/indexer/tests/storage/conftest.py
index c301e92..60cf1be 100644
--- a/swh/indexer/tests/storage/conftest.py
+++ b/swh/indexer/tests/storage/conftest.py
@@ -1,78 +1,80 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from os.path import join
import pytest
from swh.indexer.storage import get_indexer_storage
from swh.indexer.storage.model import ContentLicenseRow, ContentMimetypeRow
from swh.indexer.tests.conftest import idx_storage_postgresql
from swh.model.hashutil import hash_to_bytes
from . import SQL_DIR
from .generate_data_test import FOSSOLOGY_LICENSES, MIMETYPE_OBJECTS, TOOLS
DUMP_FILES = join(SQL_DIR, "*.sql")
class DataObj(dict):
def __getattr__(self, key):
return self.__getitem__(key)
def __setattr__(self, key, value):
return self.__setitem__(key, value)
@pytest.fixture
def swh_indexer_storage_with_data(swh_indexer_storage):
data = DataObj()
tools = {
tool["tool_name"]: {
"id": tool["id"],
"name": tool["tool_name"],
"version": tool["tool_version"],
"configuration": tool["tool_configuration"],
}
for tool in swh_indexer_storage.indexer_configuration_add(TOOLS)
}
data.tools = tools
data.sha1_1 = hash_to_bytes("34973274ccef6ab4dfaaf86599792fa9c3fe4689")
data.sha1_2 = hash_to_bytes("61c2b3a30496d329e21af70dd2d7e097046d07b7")
data.revision_id_1 = hash_to_bytes("7026b7c1a2af56521e951c01ed20f255fa054238")
data.revision_id_2 = hash_to_bytes("7026b7c1a2af56521e9587659012345678904321")
data.revision_id_3 = hash_to_bytes("7026b7c1a2af56521e9587659012345678904320")
data.origin_url_1 = "file:///dev/0/zero" # 44434341
data.origin_url_2 = "file:///dev/1/one" # 44434342
data.origin_url_3 = "file:///dev/2/two" # 54974445
data.mimetypes = [
ContentMimetypeRow(indexer_configuration_id=tools["file"]["id"], **mimetype_obj)
for mimetype_obj in MIMETYPE_OBJECTS
]
swh_indexer_storage.content_mimetype_add(data.mimetypes)
data.fossology_licenses = [
ContentLicenseRow(
id=fossology_obj["id"],
indexer_configuration_id=tools["nomos"]["id"],
license=license,
)
for fossology_obj in FOSSOLOGY_LICENSES
for license in fossology_obj["licenses"]
]
swh_indexer_storage._test_data = data
return (swh_indexer_storage, data)
swh_indexer_storage_postgresql = idx_storage_postgresql
@pytest.fixture
def swh_indexer_storage(swh_indexer_storage_postgresql):
return get_indexer_storage(
"local",
db=swh_indexer_storage_postgresql.dsn,
- journal_writer={"cls": "memory",},
+ journal_writer={
+ "cls": "memory",
+ },
)
diff --git a/swh/indexer/tests/storage/generate_data_test.py b/swh/indexer/tests/storage/generate_data_test.py
index 3d6e4e0..9fa73fb 100644
--- a/swh/indexer/tests/storage/generate_data_test.py
+++ b/swh/indexer/tests/storage/generate_data_test.py
@@ -1,202 +1,212 @@
# Copyright (C) 2018-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from uuid import uuid1
from hypothesis.strategies import composite, one_of, sampled_from, sets, tuples, uuids
from swh.model.hashutil import MultiHash
MIMETYPES = [
b"application/json",
b"application/octet-stream",
b"application/xml",
b"text/plain",
]
ENCODINGS = [
b"iso8859-1",
b"iso8859-15",
b"latin1",
b"utf-8",
]
def gen_mimetype():
- """Generate one mimetype strategy.
-
- """
+ """Generate one mimetype strategy."""
return one_of(sampled_from(MIMETYPES))
def gen_encoding():
- """Generate one encoding strategy.
-
- """
+ """Generate one encoding strategy."""
return one_of(sampled_from(ENCODINGS))
def _init_content(uuid):
- """Given a uuid, initialize a content
-
- """
+ """Given a uuid, initialize a content"""
return {
"id": MultiHash.from_data(uuid.bytes, {"sha1"}).digest()["sha1"],
"indexer_configuration_id": 1,
}
@composite
def gen_content_mimetypes(draw, *, min_size=0, max_size=100):
"""Generate valid and consistent content_mimetypes.
Context: Test purposes
Args:
**draw** (callable): Used by hypothesis to generate data
**min_size** (int): Minimal number of elements to generate
(default: 0)
**max_size** (int): Maximal number of elements to generate
(default: 100)
Returns:
List of content_mimetypes as expected by the
content_mimetype_add api endpoint.
"""
_ids = draw(
sets(
tuples(uuids(), gen_mimetype(), gen_encoding()),
min_size=min_size,
max_size=max_size,
)
)
content_mimetypes = []
for uuid, mimetype, encoding in _ids:
content_mimetypes.append(
- {**_init_content(uuid), "mimetype": mimetype, "encoding": encoding,}
+ {
+ **_init_content(uuid),
+ "mimetype": mimetype,
+ "encoding": encoding,
+ }
)
return content_mimetypes
TOOLS = [
{
"tool_name": "universal-ctags",
"tool_version": "~git7859817b",
"tool_configuration": {
"command_line": "ctags --fields=+lnz --sort=no --links=no "
"--output-format=json <filepath>"
},
},
{
"tool_name": "swh-metadata-translator",
"tool_version": "0.0.1",
"tool_configuration": {"type": "local", "context": "NpmMapping"},
},
{
"tool_name": "swh-metadata-detector",
"tool_version": "0.0.1",
"tool_configuration": {
"type": "local",
"context": ["NpmMapping", "CodemetaMapping"],
},
},
{
"tool_name": "swh-metadata-detector2",
"tool_version": "0.0.1",
"tool_configuration": {
"type": "local",
"context": ["NpmMapping", "CodemetaMapping"],
},
},
{
"tool_name": "file",
"tool_version": "5.22",
"tool_configuration": {"command_line": "file --mime <filepath>"},
},
{
"tool_name": "pygments",
"tool_version": "2.0.1+dfsg-1.1+deb8u1",
"tool_configuration": {"type": "library", "debian-package": "python3-pygments"},
},
{
"tool_name": "pygments2",
"tool_version": "2.0.1+dfsg-1.1+deb8u1",
"tool_configuration": {
"type": "library",
"debian-package": "python3-pygments",
"max_content_size": 10240,
},
},
{
"tool_name": "nomos",
"tool_version": "3.1.0rc2-31-ga2cbb8c",
"tool_configuration": {"command_line": "nomossa <filepath>"},
},
]
MIMETYPE_OBJECTS = [
{
"id": MultiHash.from_data(uuid1().bytes, {"sha1"}).digest()["sha1"],
"mimetype": mt,
"encoding": enc,
# 'indexer_configuration_id' will be added after TOOLS get registered
}
for mt in MIMETYPES
for enc in ENCODINGS
]
LICENSES = [
b"3DFX",
b"BSD",
b"GPL",
b"Apache2",
b"MIT",
]
FOSSOLOGY_LICENSES = [
{
"id": MultiHash.from_data(uuid1().bytes, {"sha1"}).digest()["sha1"],
- "licenses": [LICENSES[i % len(LICENSES)],],
+ "licenses": [
+ LICENSES[i % len(LICENSES)],
+ ],
# 'indexer_configuration_id' will be added after TOOLS get registered
}
for i in range(10)
]
def gen_license():
return one_of(sampled_from(LICENSES))
@composite
def gen_content_fossology_licenses(draw, *, min_size=0, max_size=100):
"""Generate valid and consistent content_fossology_licenses.
Context: Test purposes
Args:
**draw** (callable): Used by hypothesis to generate data
**min_size** (int): Minimal number of elements to generate
(default: 0)
**max_size** (int): Maximal number of elements to generate
(default: 100)
Returns:
List of content_fossology_licenses as expected by the
content_fossology_license_add api endpoint.
"""
_ids = draw(
- sets(tuples(uuids(), gen_license(),), min_size=min_size, max_size=max_size)
+ sets(
+ tuples(
+ uuids(),
+ gen_license(),
+ ),
+ min_size=min_size,
+ max_size=max_size,
+ )
)
content_licenses = []
for uuid, license in _ids:
content_licenses.append(
- {**_init_content(uuid), "licenses": [license],}
+ {
+ **_init_content(uuid),
+ "licenses": [license],
+ }
)
return content_licenses
diff --git a/swh/indexer/tests/storage/test_api_client.py b/swh/indexer/tests/storage/test_api_client.py
index 993596d..250b6d8 100644
--- a/swh/indexer/tests/storage/test_api_client.py
+++ b/swh/indexer/tests/storage/test_api_client.py
@@ -1,54 +1,56 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import pytest
from swh.indexer.storage import get_indexer_storage
from swh.indexer.storage.api.client import RemoteStorage
import swh.indexer.storage.api.server as server
from .test_storage import * # noqa
@pytest.fixture
def app_server(swh_indexer_storage_postgresql):
server.storage = get_indexer_storage(
"local",
db=swh_indexer_storage_postgresql.dsn,
- journal_writer={"cls": "memory",},
+ journal_writer={
+ "cls": "memory",
+ },
)
yield server
@pytest.fixture
def app(app_server):
return app_server.app
@pytest.fixture
def swh_rpc_client_class():
# these are needed for the swh_indexer_storage_with_data fixture
assert hasattr(RemoteStorage, "indexer_configuration_add")
assert hasattr(RemoteStorage, "content_mimetype_add")
return RemoteStorage
@pytest.fixture
def swh_indexer_storage(swh_rpc_client, app_server):
# This version of the swh_storage fixture uses the swh_rpc_client fixture
# to instantiate a RemoteStorage (see swh_rpc_client_class above) that
# proxies, via the swh.core RPC mechanism, the local (in memory) storage
# configured in the app fixture above.
#
# Also note that, for the sake of
# making it easier to write tests, the in-memory journal writer of the
# in-memory backend storage is attached to the RemoteStorage as its
# journal_writer attribute.
storage = swh_rpc_client
journal_writer = getattr(storage, "journal_writer", None)
storage.journal_writer = app_server.storage.journal_writer
yield storage
storage.journal_writer = journal_writer
diff --git a/swh/indexer/tests/storage/test_converters.py b/swh/indexer/tests/storage/test_converters.py
index 4f605c1..4119293 100644
--- a/swh/indexer/tests/storage/test_converters.py
+++ b/swh/indexer/tests/storage/test_converters.py
@@ -1,176 +1,191 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from swh.indexer.storage import converters
def test_ctags_to_db() -> None:
input_ctag = {
"id": b"some-id",
"indexer_configuration_id": 100,
"ctags": [
- {"name": "some-name", "kind": "some-kind", "line": 10, "lang": "Yaml",},
- {"name": "main", "kind": "function", "line": 12, "lang": "Yaml",},
+ {
+ "name": "some-name",
+ "kind": "some-kind",
+ "line": 10,
+ "lang": "Yaml",
+ },
+ {
+ "name": "main",
+ "kind": "function",
+ "line": 12,
+ "lang": "Yaml",
+ },
],
}
expected_ctags = [
{
"id": b"some-id",
"name": "some-name",
"kind": "some-kind",
"line": 10,
"lang": "Yaml",
"indexer_configuration_id": 100,
},
{
"id": b"some-id",
"name": "main",
"kind": "function",
"line": 12,
"lang": "Yaml",
"indexer_configuration_id": 100,
},
]
# when
actual_ctags = list(converters.ctags_to_db(input_ctag))
# then
assert actual_ctags == expected_ctags
def test_db_to_ctags() -> None:
input_ctags = {
"id": b"some-id",
"name": "some-name",
"kind": "some-kind",
"line": 10,
"lang": "Yaml",
"tool_id": 200,
"tool_name": "some-toolname",
"tool_version": "some-toolversion",
"tool_configuration": {},
}
expected_ctags = {
"id": b"some-id",
"name": "some-name",
"kind": "some-kind",
"line": 10,
"lang": "Yaml",
"tool": {
"id": 200,
"name": "some-toolname",
"version": "some-toolversion",
"configuration": {},
},
}
# when
actual_ctags = converters.db_to_ctags(input_ctags)
# then
assert actual_ctags == expected_ctags
def test_db_to_mimetype() -> None:
input_mimetype = {
"id": b"some-id",
"tool_id": 10,
"tool_name": "some-toolname",
"tool_version": "some-toolversion",
"tool_configuration": {},
"encoding": b"ascii",
"mimetype": b"text/plain",
}
expected_mimetype = {
"id": b"some-id",
"encoding": b"ascii",
"mimetype": b"text/plain",
"tool": {
"id": 10,
"name": "some-toolname",
"version": "some-toolversion",
"configuration": {},
},
}
actual_mimetype = converters.db_to_mimetype(input_mimetype)
assert actual_mimetype == expected_mimetype
def test_db_to_language() -> None:
input_language = {
"id": b"some-id",
"tool_id": 20,
"tool_name": "some-toolname",
"tool_version": "some-toolversion",
"tool_configuration": {},
"lang": b"css",
}
expected_language = {
"id": b"some-id",
"lang": b"css",
"tool": {
"id": 20,
"name": "some-toolname",
"version": "some-toolversion",
"configuration": {},
},
}
actual_language = converters.db_to_language(input_language)
assert actual_language == expected_language
def test_db_to_fossology_license() -> None:
input_license = {
"id": b"some-id",
"tool_id": 20,
"tool_name": "nomossa",
"tool_version": "5.22",
"tool_configuration": {},
"license": "GPL2.0",
}
expected_license = {
"id": b"some-id",
"license": "GPL2.0",
- "tool": {"id": 20, "name": "nomossa", "version": "5.22", "configuration": {},},
+ "tool": {
+ "id": 20,
+ "name": "nomossa",
+ "version": "5.22",
+ "configuration": {},
+ },
}
actual_license = converters.db_to_fossology_license(input_license)
assert actual_license == expected_license
def test_db_to_metadata() -> None:
input_metadata = {
"id": b"some-id",
"tool_id": 20,
"tool_name": "some-toolname",
"tool_version": "some-toolversion",
"tool_configuration": {},
"metadata": b"metadata",
}
expected_metadata = {
"id": b"some-id",
"metadata": b"metadata",
"tool": {
"id": 20,
"name": "some-toolname",
"version": "some-toolversion",
"configuration": {},
},
}
actual_metadata = converters.db_to_metadata(input_metadata)
assert actual_metadata == expected_metadata
diff --git a/swh/indexer/tests/storage/test_in_memory.py b/swh/indexer/tests/storage/test_in_memory.py
index 854d4fd..810ebb3 100644
--- a/swh/indexer/tests/storage/test_in_memory.py
+++ b/swh/indexer/tests/storage/test_in_memory.py
@@ -1,15 +1,20 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import pytest
from swh.indexer.storage import get_indexer_storage
from .test_storage import * # noqa
@pytest.fixture
def swh_indexer_storage():
- return get_indexer_storage("memory", journal_writer={"cls": "memory",})
+ return get_indexer_storage(
+ "memory",
+ journal_writer={
+ "cls": "memory",
+ },
+ )
diff --git a/swh/indexer/tests/storage/test_metrics.py b/swh/indexer/tests/storage/test_metrics.py
index 482a256..804c2b8 100644
--- a/swh/indexer/tests/storage/test_metrics.py
+++ b/swh/indexer/tests/storage/test_metrics.py
@@ -1,58 +1,62 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from unittest.mock import patch
from swh.indexer.storage.metrics import (
OPERATIONS_METRIC,
OPERATIONS_UNIT_METRIC,
send_metric,
)
def test_send_metric_unknown_unit() -> None:
r = send_metric("content", count=10, method_name="content_add")
assert r is False
r = send_metric("sthg:add:bytes:extra", count=10, method_name="sthg_add")
assert r is False
def test_send_metric_no_value() -> None:
r = send_metric("content_mimetype:add", count=0, method_name="content_mimetype_add")
assert r is False
@patch("swh.indexer.storage.metrics.statsd.increment")
def test_send_metric_no_unit(mock_statsd) -> None:
r = send_metric(
"content_mimetype:add", count=10, method_name="content_mimetype_add"
)
mock_statsd.assert_called_with(
OPERATIONS_METRIC,
10,
tags={
"endpoint": "content_mimetype_add",
"object_type": "content_mimetype",
"operation": "add",
},
)
assert r
@patch("swh.indexer.storage.metrics.statsd.increment")
def test_send_metric_unit(mock_statsd) -> None:
unit_ = "bytes"
r = send_metric("c:add:%s" % unit_, count=100, method_name="c_add")
expected_metric = OPERATIONS_UNIT_METRIC.format(unit=unit_)
mock_statsd.assert_called_with(
expected_metric,
100,
- tags={"endpoint": "c_add", "object_type": "c", "operation": "add",},
+ tags={
+ "endpoint": "c_add",
+ "object_type": "c",
+ "operation": "add",
+ },
)
assert r
diff --git a/swh/indexer/tests/storage/test_server.py b/swh/indexer/tests/storage/test_server.py
index 91cf037..87aea2d 100644
--- a/swh/indexer/tests/storage/test_server.py
+++ b/swh/indexer/tests/storage/test_server.py
@@ -1,99 +1,104 @@
# Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import pytest
import yaml
from swh.indexer.storage.api.server import load_and_check_config
def prepare_config_file(tmpdir, content, name="config.yml") -> str:
"""Prepare configuration file in `$tmpdir/name` with content `content`.
Args:
tmpdir (LocalPath): root directory
content (str/dict): Content of the file either as string or as a dict.
If a dict, converts the dict into a yaml string.
name (str): configuration filename
Returns
path (str) of the configuration file prepared.
"""
config_path = tmpdir / name
if isinstance(content, dict): # convert if needed
content = yaml.dump(content)
config_path.write_text(content, encoding="utf-8")
# pytest on python3.5 does not support LocalPath manipulation, so
# convert path to string
return str(config_path)
@pytest.mark.parametrize("config_path", [None, ""])
def test_load_and_check_config_no_configuration(config_path) -> None:
"""Irrelevant configuration file path raises"""
with pytest.raises(EnvironmentError, match="Configuration file must be defined"):
load_and_check_config(config_path)
def test_load_and_check_inexistent_config_path() -> None:
"""Inexistent configuration file raises"""
config_path = "/indexer/inexistent/config.yml"
expected_error = f"Configuration file {config_path} does not exist"
with pytest.raises(FileNotFoundError, match=expected_error):
load_and_check_config(config_path)
def test_load_and_check_config_wrong_configuration(tmpdir) -> None:
"""Wrong configuration raises"""
config_path = prepare_config_file(tmpdir, "something: useless")
with pytest.raises(KeyError, match="Missing '%indexer_storage' configuration"):
load_and_check_config(config_path)
@pytest.mark.parametrize("class_storage", ["remote", "memory"])
def test_load_and_check_config_remote_config_local_type_raise(
class_storage, tmpdir
) -> None:
"""Any other configuration than 'local' (the default) is rejected"""
assert class_storage != "local"
incompatible_config = {"indexer_storage": {"cls": class_storage}}
config_path = prepare_config_file(tmpdir, incompatible_config)
expected_error = (
"The indexer_storage backend can only be started with a 'local' "
"configuration"
)
with pytest.raises(ValueError, match=expected_error):
load_and_check_config(config_path)
with pytest.raises(ValueError, match=expected_error):
load_and_check_config(config_path, type="local")
def test_load_and_check_config_remote_config_fine(tmpdir) -> None:
"""'Remote configuration is fine (when changing the default type)"""
config = {"indexer_storage": {"cls": "remote"}}
config_path = prepare_config_file(tmpdir, config)
cfg = load_and_check_config(config_path, type="any")
assert cfg == config
def test_load_and_check_config_local_incomplete_configuration(tmpdir) -> None:
"""Incomplete 'local' configuration should raise"""
config = {"indexer_storage": {"cls": "local"}}
expected_error = "Invalid configuration; missing 'db' config entry"
config_path = prepare_config_file(tmpdir, config)
with pytest.raises(ValueError, match=expected_error):
load_and_check_config(config_path)
def test_load_and_check_config_local_config_fine(tmpdir) -> None:
"""'Complete 'local' configuration is fine"""
- config = {"indexer_storage": {"cls": "local", "db": "db",}}
+ config = {
+ "indexer_storage": {
+ "cls": "local",
+ "db": "db",
+ }
+ }
config_path = prepare_config_file(tmpdir, config)
cfg = load_and_check_config(config_path, type="local")
assert cfg == config
diff --git a/swh/indexer/tests/storage/test_storage.py b/swh/indexer/tests/storage/test_storage.py
index 3067dc2..75f509a 100644
--- a/swh/indexer/tests/storage/test_storage.py
+++ b/swh/indexer/tests/storage/test_storage.py
@@ -1,1723 +1,1840 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import math
import threading
from typing import Any, Dict, List, Tuple, Type
import attr
import pytest
from swh.indexer.storage.exc import DuplicateId, IndexerStorageArgumentException
from swh.indexer.storage.interface import IndexerStorageInterface, PagedResult
from swh.indexer.storage.model import (
BaseRow,
ContentCtagsRow,
ContentLanguageRow,
ContentLicenseRow,
ContentMetadataRow,
ContentMimetypeRow,
OriginIntrinsicMetadataRow,
RevisionIntrinsicMetadataRow,
)
from swh.model.hashutil import hash_to_bytes
def prepare_mimetypes_from_licenses(
fossology_licenses: List[ContentLicenseRow],
) -> List[ContentMimetypeRow]:
- """Fossology license needs some consistent data in db to run.
-
- """
+ """Fossology license needs some consistent data in db to run."""
mimetypes = []
for c in fossology_licenses:
mimetypes.append(
ContentMimetypeRow(
id=c.id,
mimetype="text/plain", # for filtering on textual data to work
encoding="utf-8",
indexer_configuration_id=c.indexer_configuration_id,
)
)
return mimetypes
def endpoint_name(etype: str, ename: str) -> str:
"""Compute the storage's endpoint's name
>>> endpoint_name('content_mimetype', 'add')
'content_mimetype_add'
>>> endpoint_name('content_fosso_license', 'delete')
'content_fosso_license_delete'
"""
return f"{etype}_{ename}"
def endpoint(storage, etype: str, ename: str):
return getattr(storage, endpoint_name(etype, ename))
def expected_summary(count: int, etype: str, ename: str = "add") -> Dict[str, int]:
"""Compute the expected summary
The key is determine according to etype and ename
>>> expected_summary(10, 'content_mimetype', 'add')
{'content_mimetype:add': 10}
>>> expected_summary(9, 'origin_intrinsic_metadata', 'delete')
{'origin_intrinsic_metadata:del': 9}
"""
pattern = ename[0:3]
key = endpoint_name(etype, ename).replace(f"_{ename}", f":{pattern}")
return {key: count}
def test_check_config(swh_indexer_storage) -> None:
assert swh_indexer_storage.check_config(check_write=True)
assert swh_indexer_storage.check_config(check_write=False)
class StorageETypeTester:
"""Base class for testing a series of common behaviour between a bunch of
endpoint types supported by an IndexerStorage.
This is supposed to be inherited with the following class attributes:
- endpoint_type
- tool_name
- example_data
See below for example usage.
"""
endpoint_type: str
tool_name: str
example_data: List[Dict]
row_class: Type[BaseRow]
def test_missing(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
etype = self.endpoint_type
tool_id = data.tools[self.tool_name]["id"]
# given 2 (hopefully) unknown objects
query = [
- {"id": data.sha1_1, "indexer_configuration_id": tool_id,},
- {"id": data.sha1_2, "indexer_configuration_id": tool_id,},
+ {
+ "id": data.sha1_1,
+ "indexer_configuration_id": tool_id,
+ },
+ {
+ "id": data.sha1_2,
+ "indexer_configuration_id": tool_id,
+ },
]
# we expect these are both returned by the xxx_missing endpoint
actual_missing = endpoint(storage, etype, "missing")(query)
assert list(actual_missing) == [
data.sha1_1,
data.sha1_2,
]
# now, when we add one of them
summary = endpoint(storage, etype, "add")(
[
self.row_class.from_dict(
{
"id": data.sha1_2,
**self.example_data[0],
"indexer_configuration_id": tool_id,
}
)
]
)
assert summary == expected_summary(1, etype)
# we expect only the other one returned
actual_missing = endpoint(storage, etype, "missing")(query)
assert list(actual_missing) == [data.sha1_1]
def test_add__update_in_place_duplicate(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
etype = self.endpoint_type
tool = data.tools[self.tool_name]
data_v1 = {
"id": data.sha1_2,
**self.example_data[0],
"indexer_configuration_id": tool["id"],
}
# given
summary = endpoint(storage, etype, "add")([self.row_class.from_dict(data_v1)])
assert summary == expected_summary(1, etype) # not added
# when
actual_data = list(endpoint(storage, etype, "get")([data.sha1_2]))
expected_data_v1 = [
self.row_class.from_dict(
{"id": data.sha1_2, **self.example_data[0], "tool": tool}
)
]
# then
assert actual_data == expected_data_v1
# given
data_v2 = data_v1.copy()
data_v2.update(self.example_data[1])
endpoint(storage, etype, "add")([self.row_class.from_dict(data_v2)])
assert summary == expected_summary(1, etype) # modified so counted
actual_data = list(endpoint(storage, etype, "get")([data.sha1_2]))
expected_data_v2 = [
self.row_class.from_dict(
- {"id": data.sha1_2, **self.example_data[1], "tool": tool,}
+ {
+ "id": data.sha1_2,
+ **self.example_data[1],
+ "tool": tool,
+ }
)
]
# data did change as the v2 was used to overwrite v1
assert actual_data == expected_data_v2
def test_add_deadlock(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
etype = self.endpoint_type
tool = data.tools[self.tool_name]
hashes = [
hash_to_bytes("34973274ccef6ab4dfaaf86599792fa9c3fe4{:03d}".format(i))
for i in range(1000)
]
data_v1 = [
self.row_class.from_dict(
{
"id": hash_,
**self.example_data[0],
"indexer_configuration_id": tool["id"],
}
)
for hash_ in hashes
]
data_v2 = [
self.row_class.from_dict(
{
"id": hash_,
**self.example_data[1],
"indexer_configuration_id": tool["id"],
}
)
for hash_ in hashes
]
# Remove one item from each, so that both queries have to succeed for
# all items to be in the DB.
data_v2a = data_v2[1:]
data_v2b = list(reversed(data_v2[0:-1]))
# given
endpoint(storage, etype, "add")(data_v1)
# when
actual_data = sorted(
- endpoint(storage, etype, "get")(hashes), key=lambda x: x.id,
+ endpoint(storage, etype, "get")(hashes),
+ key=lambda x: x.id,
)
expected_data_v1 = [
self.row_class.from_dict(
{"id": hash_, **self.example_data[0], "tool": tool}
)
for hash_ in hashes
]
# then
assert actual_data == expected_data_v1
# given
def f1() -> None:
endpoint(storage, etype, "add")(data_v2a)
def f2() -> None:
endpoint(storage, etype, "add")(data_v2b)
t1 = threading.Thread(target=f1)
t2 = threading.Thread(target=f2)
t2.start()
t1.start()
t1.join()
t2.join()
actual_data = sorted(
- endpoint(storage, etype, "get")(hashes), key=lambda x: x.id,
+ endpoint(storage, etype, "get")(hashes),
+ key=lambda x: x.id,
)
expected_data_v2 = [
self.row_class.from_dict(
{"id": hash_, **self.example_data[1], "tool": tool}
)
for hash_ in hashes
]
assert len(actual_data) == len(expected_data_v1) == len(expected_data_v2)
for (item, expected_item_v1, expected_item_v2) in zip(
actual_data, expected_data_v1, expected_data_v2
):
assert item in (expected_item_v1, expected_item_v2)
def test_add__duplicate_twice(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
etype = self.endpoint_type
tool = data.tools[self.tool_name]
data_rev1 = self.row_class.from_dict(
{
"id": data.revision_id_2,
**self.example_data[0],
"indexer_configuration_id": tool["id"],
}
)
data_rev2 = self.row_class.from_dict(
{
"id": data.revision_id_2,
**self.example_data[1],
"indexer_configuration_id": tool["id"],
}
)
# when
summary = endpoint(storage, etype, "add")([data_rev1])
assert summary == expected_summary(1, etype)
with pytest.raises(DuplicateId):
endpoint(storage, etype, "add")([data_rev2, data_rev2])
# then
actual_data = list(
endpoint(storage, etype, "get")([data.revision_id_2, data.revision_id_1])
)
expected_data = [
self.row_class.from_dict(
{"id": data.revision_id_2, **self.example_data[0], "tool": tool}
)
]
assert actual_data == expected_data
def test_add(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
etype = self.endpoint_type
tool = data.tools[self.tool_name]
# conftest fills it with mimetypes
storage.journal_writer.journal.objects = [] # type: ignore
query = [data.sha1_2, data.sha1_1]
data1 = self.row_class.from_dict(
{
"id": data.sha1_2,
**self.example_data[0],
"indexer_configuration_id": tool["id"],
}
)
# when
summary = endpoint(storage, etype, "add")([data1])
assert summary == expected_summary(1, etype)
# then
actual_data = list(endpoint(storage, etype, "get")(query))
# then
expected_data = [
self.row_class.from_dict(
{"id": data.sha1_2, **self.example_data[0], "tool": tool}
)
]
assert actual_data == expected_data
journal_objects = storage.journal_writer.journal.objects # type: ignore
actual_journal_data = [
obj for (obj_type, obj) in journal_objects if obj_type == self.endpoint_type
]
assert list(sorted(actual_journal_data)) == list(sorted(expected_data))
class TestIndexerStorageContentMimetypes(StorageETypeTester):
- """Test Indexer Storage content_mimetype related methods
- """
+ """Test Indexer Storage content_mimetype related methods"""
endpoint_type = "content_mimetype"
tool_name = "file"
example_data = [
- {"mimetype": "text/plain", "encoding": "utf-8",},
- {"mimetype": "text/html", "encoding": "us-ascii",},
+ {
+ "mimetype": "text/plain",
+ "encoding": "utf-8",
+ },
+ {
+ "mimetype": "text/html",
+ "encoding": "us-ascii",
+ },
]
row_class = ContentMimetypeRow
def test_generate_content_mimetype_get_partition_failure(
self, swh_indexer_storage: IndexerStorageInterface
) -> None:
"""get_partition call with wrong limit input should fail"""
storage = swh_indexer_storage
indexer_configuration_id = 42
with pytest.raises(
IndexerStorageArgumentException, match="limit should not be None"
):
storage.content_mimetype_get_partition(
indexer_configuration_id, 0, 3, limit=None # type: ignore
)
def test_generate_content_mimetype_get_partition_no_limit(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
"""get_partition should return result"""
storage, data = swh_indexer_storage_with_data
mimetypes = data.mimetypes
expected_ids = set([c.id for c in mimetypes])
indexer_configuration_id = mimetypes[0].indexer_configuration_id
assert len(mimetypes) == 16
nb_partitions = 16
actual_ids = []
for partition_id in range(nb_partitions):
actual_result = storage.content_mimetype_get_partition(
indexer_configuration_id, partition_id, nb_partitions
)
assert actual_result.next_page_token is None
actual_ids.extend(actual_result.results)
assert len(actual_ids) == len(expected_ids)
for actual_id in actual_ids:
assert actual_id in expected_ids
def test_generate_content_mimetype_get_partition_full(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
- """get_partition for a single partition should return available ids
-
- """
+ """get_partition for a single partition should return available ids"""
storage, data = swh_indexer_storage_with_data
mimetypes = data.mimetypes
expected_ids = set([c.id for c in mimetypes])
indexer_configuration_id = mimetypes[0].indexer_configuration_id
actual_result = storage.content_mimetype_get_partition(
indexer_configuration_id, 0, 1
)
assert actual_result.next_page_token is None
actual_ids = actual_result.results
assert len(actual_ids) == len(expected_ids)
for actual_id in actual_ids:
assert actual_id in expected_ids
def test_generate_content_mimetype_get_partition_empty(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
"""get_partition when at least one of the partitions is empty"""
storage, data = swh_indexer_storage_with_data
mimetypes = data.mimetypes
expected_ids = set([c.id for c in mimetypes])
indexer_configuration_id = mimetypes[0].indexer_configuration_id
# nb_partitions = smallest power of 2 such that at least one of
# the partitions is empty
nb_mimetypes = len(mimetypes)
nb_partitions = 1 << math.floor(math.log2(nb_mimetypes) + 1)
seen_ids = []
for partition_id in range(nb_partitions):
actual_result = storage.content_mimetype_get_partition(
indexer_configuration_id,
partition_id,
nb_partitions,
limit=nb_mimetypes + 1,
)
for actual_id in actual_result.results:
seen_ids.append(actual_id)
# Limit is higher than the max number of results
assert actual_result.next_page_token is None
assert set(seen_ids) == expected_ids
def test_generate_content_mimetype_get_partition_with_pagination(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
- """get_partition should return ids provided with pagination
-
- """
+ """get_partition should return ids provided with pagination"""
storage, data = swh_indexer_storage_with_data
mimetypes = data.mimetypes
expected_ids = set([c.id for c in mimetypes])
indexer_configuration_id = mimetypes[0].indexer_configuration_id
nb_partitions = 4
actual_ids = []
for partition_id in range(nb_partitions):
next_page_token = None
while True:
actual_result = storage.content_mimetype_get_partition(
indexer_configuration_id,
partition_id,
nb_partitions,
limit=2,
page_token=next_page_token,
)
actual_ids.extend(actual_result.results)
next_page_token = actual_result.next_page_token
if next_page_token is None:
break
assert len(set(actual_ids)) == len(set(expected_ids))
for actual_id in actual_ids:
assert actual_id in expected_ids
class TestIndexerStorageContentLanguage(StorageETypeTester):
- """Test Indexer Storage content_language related methods
- """
+ """Test Indexer Storage content_language related methods"""
endpoint_type = "content_language"
tool_name = "pygments"
example_data = [
- {"lang": "haskell",},
- {"lang": "common-lisp",},
+ {
+ "lang": "haskell",
+ },
+ {
+ "lang": "common-lisp",
+ },
]
row_class = ContentLanguageRow
class TestIndexerStorageContentCTags(StorageETypeTester):
- """Test Indexer Storage content_ctags related methods
- """
+ """Test Indexer Storage content_ctags related methods"""
endpoint_type = "content_ctags"
tool_name = "universal-ctags"
example_data = [
- {"name": "done", "kind": "variable", "line": 119, "lang": "OCaml",},
- {"name": "done", "kind": "variable", "line": 100, "lang": "Python",},
- {"name": "main", "kind": "function", "line": 119, "lang": "Python",},
+ {
+ "name": "done",
+ "kind": "variable",
+ "line": 119,
+ "lang": "OCaml",
+ },
+ {
+ "name": "done",
+ "kind": "variable",
+ "line": 100,
+ "lang": "Python",
+ },
+ {
+ "name": "main",
+ "kind": "function",
+ "line": 119,
+ "lang": "Python",
+ },
]
row_class = ContentCtagsRow
# the following tests are disabled because CTAGS behaves differently
@pytest.mark.skip
def test_add__update_in_place_duplicate(self):
pass
@pytest.mark.skip
def test_add_deadlock(self):
pass
def test_content_ctags_search(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
# 1. given
tool = data.tools["universal-ctags"]
tool_id = tool["id"]
ctags1 = [
ContentCtagsRow(
id=data.sha1_1,
indexer_configuration_id=tool_id,
**kwargs, # type: ignore
)
for kwargs in [
- {"name": "hello", "kind": "function", "line": 133, "lang": "Python",},
- {"name": "counter", "kind": "variable", "line": 119, "lang": "Python",},
- {"name": "hello", "kind": "variable", "line": 210, "lang": "Python",},
+ {
+ "name": "hello",
+ "kind": "function",
+ "line": 133,
+ "lang": "Python",
+ },
+ {
+ "name": "counter",
+ "kind": "variable",
+ "line": 119,
+ "lang": "Python",
+ },
+ {
+ "name": "hello",
+ "kind": "variable",
+ "line": 210,
+ "lang": "Python",
+ },
]
]
ctags1_with_tool = [
attr.evolve(ctag, indexer_configuration_id=None, tool=tool)
for ctag in ctags1
]
ctags2 = [
ContentCtagsRow(
id=data.sha1_2,
indexer_configuration_id=tool_id,
**kwargs, # type: ignore
)
for kwargs in [
- {"name": "hello", "kind": "variable", "line": 100, "lang": "C",},
- {"name": "result", "kind": "variable", "line": 120, "lang": "C",},
+ {
+ "name": "hello",
+ "kind": "variable",
+ "line": 100,
+ "lang": "C",
+ },
+ {
+ "name": "result",
+ "kind": "variable",
+ "line": 120,
+ "lang": "C",
+ },
]
]
ctags2_with_tool = [
attr.evolve(ctag, indexer_configuration_id=None, tool=tool)
for ctag in ctags2
]
storage.content_ctags_add(ctags1 + ctags2)
# 1. when
actual_ctags = list(storage.content_ctags_search("hello", limit=1))
# 1. then
assert actual_ctags == [ctags1_with_tool[0]]
# 2. when
actual_ctags = list(
storage.content_ctags_search("hello", limit=1, last_sha1=data.sha1_1)
)
# 2. then
assert actual_ctags == [ctags2_with_tool[0]]
# 3. when
actual_ctags = list(storage.content_ctags_search("hello"))
# 3. then
assert actual_ctags == [
ctags1_with_tool[0],
ctags1_with_tool[2],
ctags2_with_tool[0],
]
# 4. when
actual_ctags = list(storage.content_ctags_search("counter"))
# then
assert actual_ctags == [ctags1_with_tool[1]]
# 5. when
actual_ctags = list(storage.content_ctags_search("result", limit=1))
# then
assert actual_ctags == [ctags2_with_tool[1]]
def test_content_ctags_search_no_result(
self, swh_indexer_storage: IndexerStorageInterface
) -> None:
storage = swh_indexer_storage
actual_ctags = list(storage.content_ctags_search("counter"))
assert not actual_ctags
def test_content_ctags_add__add_new_ctags_added(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
# given
tool = data.tools["universal-ctags"]
tool_id = tool["id"]
ctag1 = ContentCtagsRow(
id=data.sha1_2,
indexer_configuration_id=tool_id,
name="done",
kind="variable",
line=100,
lang="Scheme",
)
ctag1_with_tool = attr.evolve(ctag1, indexer_configuration_id=None, tool=tool)
# given
storage.content_ctags_add([ctag1])
storage.content_ctags_add([ctag1]) # conflict does nothing
# when
actual_ctags = list(storage.content_ctags_get([data.sha1_2]))
# then
assert actual_ctags == [ctag1_with_tool]
# given
ctag2 = ContentCtagsRow(
id=data.sha1_2,
indexer_configuration_id=tool_id,
name="defn",
kind="function",
line=120,
lang="Scheme",
)
ctag2_with_tool = attr.evolve(ctag2, indexer_configuration_id=None, tool=tool)
storage.content_ctags_add([ctag2])
actual_ctags = list(storage.content_ctags_get([data.sha1_2]))
assert actual_ctags == [ctag1_with_tool, ctag2_with_tool]
def test_content_ctags_add__update_in_place(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
# given
tool = data.tools["universal-ctags"]
tool_id = tool["id"]
ctag1 = ContentCtagsRow(
id=data.sha1_2,
indexer_configuration_id=tool_id,
name="done",
kind="variable",
line=100,
lang="Scheme",
)
ctag1_with_tool = attr.evolve(ctag1, indexer_configuration_id=None, tool=tool)
# given
storage.content_ctags_add([ctag1])
# when
actual_ctags = list(storage.content_ctags_get([data.sha1_2]))
# then
assert actual_ctags == [ctag1_with_tool]
# given
ctag2 = ContentCtagsRow(
id=data.sha1_2,
indexer_configuration_id=tool_id,
name="defn",
kind="function",
line=120,
lang="Scheme",
)
ctag2_with_tool = attr.evolve(ctag2, indexer_configuration_id=None, tool=tool)
storage.content_ctags_add([ctag1, ctag2])
actual_ctags = list(storage.content_ctags_get([data.sha1_2]))
assert actual_ctags == [ctag1_with_tool, ctag2_with_tool]
def test_add_empty(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
(storage, data) = swh_indexer_storage_with_data
etype = self.endpoint_type
summary = endpoint(storage, etype, "add")([])
assert summary == {"content_ctags:add": 0}
actual_ctags = list(endpoint(storage, etype, "get")([data.sha1_2]))
assert actual_ctags == []
def test_get_unknown(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
(storage, data) = swh_indexer_storage_with_data
etype = self.endpoint_type
actual_ctags = list(endpoint(storage, etype, "get")([data.sha1_2]))
assert actual_ctags == []
class TestIndexerStorageContentMetadata(StorageETypeTester):
- """Test Indexer Storage content_metadata related methods
- """
+ """Test Indexer Storage content_metadata related methods"""
tool_name = "swh-metadata-detector"
endpoint_type = "content_metadata"
example_data = [
{
"metadata": {
"other": {},
"codeRepository": {
"type": "git",
"url": "https://github.com/moranegg/metadata_test",
},
"description": "Simple package.json test for indexer",
"name": "test_metadata",
"version": "0.0.1",
},
},
- {"metadata": {"other": {}, "name": "test_metadata", "version": "0.0.1"},},
+ {
+ "metadata": {"other": {}, "name": "test_metadata", "version": "0.0.1"},
+ },
]
row_class = ContentMetadataRow
class TestIndexerStorageRevisionIntrinsicMetadata(StorageETypeTester):
- """Test Indexer Storage revision_intrinsic_metadata related methods
- """
+ """Test Indexer Storage revision_intrinsic_metadata related methods"""
tool_name = "swh-metadata-detector"
endpoint_type = "revision_intrinsic_metadata"
example_data = [
{
"metadata": {
"other": {},
"codeRepository": {
"type": "git",
"url": "https://github.com/moranegg/metadata_test",
},
"description": "Simple package.json test for indexer",
"name": "test_metadata",
"version": "0.0.1",
},
"mappings": ["mapping1"],
},
{
"metadata": {"other": {}, "name": "test_metadata", "version": "0.0.1"},
"mappings": ["mapping2"],
},
]
row_class = RevisionIntrinsicMetadataRow
class TestIndexerStorageContentFossologyLicense(StorageETypeTester):
endpoint_type = "content_fossology_license"
tool_name = "nomos"
example_data = [
{"license": "Apache-2.0"},
{"license": "BSD-2-Clause"},
]
row_class = ContentLicenseRow
# the following tests are disabled because licenses behaves differently
@pytest.mark.skip
def test_add__update_in_place_duplicate(self):
pass
@pytest.mark.skip
def test_add_deadlock(self):
pass
# content_fossology_license_missing does not exist
@pytest.mark.skip
def test_missing(self):
pass
def test_content_fossology_license_add__new_license_added(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
# given
tool = data.tools["nomos"]
tool_id = tool["id"]
license1 = ContentLicenseRow(
- id=data.sha1_1, license="Apache-2.0", indexer_configuration_id=tool_id,
+ id=data.sha1_1,
+ license="Apache-2.0",
+ indexer_configuration_id=tool_id,
)
# given
storage.content_fossology_license_add([license1])
# conflict does nothing
storage.content_fossology_license_add([license1])
# when
actual_licenses = list(storage.content_fossology_license_get([data.sha1_1]))
# then
expected_licenses = [
- ContentLicenseRow(id=data.sha1_1, license="Apache-2.0", tool=tool,)
+ ContentLicenseRow(
+ id=data.sha1_1,
+ license="Apache-2.0",
+ tool=tool,
+ )
]
assert actual_licenses == expected_licenses
# given
license2 = ContentLicenseRow(
- id=data.sha1_1, license="BSD-2-Clause", indexer_configuration_id=tool_id,
+ id=data.sha1_1,
+ license="BSD-2-Clause",
+ indexer_configuration_id=tool_id,
)
storage.content_fossology_license_add([license2])
actual_licenses = list(storage.content_fossology_license_get([data.sha1_1]))
expected_licenses.append(
- ContentLicenseRow(id=data.sha1_1, license="BSD-2-Clause", tool=tool,)
+ ContentLicenseRow(
+ id=data.sha1_1,
+ license="BSD-2-Clause",
+ tool=tool,
+ )
)
# first license was not removed when the second one was added
assert sorted(actual_licenses) == sorted(expected_licenses)
def test_generate_content_fossology_license_get_partition_failure(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
"""get_partition call with wrong limit input should fail"""
storage, data = swh_indexer_storage_with_data
indexer_configuration_id = 42
with pytest.raises(
IndexerStorageArgumentException, match="limit should not be None"
):
storage.content_fossology_license_get_partition(
- indexer_configuration_id, 0, 3, limit=None, # type: ignore
+ indexer_configuration_id,
+ 0,
+ 3,
+ limit=None, # type: ignore
)
def test_generate_content_fossology_license_get_partition_no_limit(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
"""get_partition should return results"""
storage, data = swh_indexer_storage_with_data
# craft some consistent mimetypes
fossology_licenses = data.fossology_licenses
mimetypes = prepare_mimetypes_from_licenses(fossology_licenses)
indexer_configuration_id = fossology_licenses[0].indexer_configuration_id
storage.content_mimetype_add(mimetypes)
# add fossology_licenses to storage
storage.content_fossology_license_add(fossology_licenses)
# All ids from the db
expected_ids = set([c.id for c in fossology_licenses])
assert len(fossology_licenses) == 10
assert len(mimetypes) == 10
nb_partitions = 4
actual_ids = []
for partition_id in range(nb_partitions):
actual_result = storage.content_fossology_license_get_partition(
indexer_configuration_id, partition_id, nb_partitions
)
assert actual_result.next_page_token is None
actual_ids.extend(actual_result.results)
assert len(set(actual_ids)) == len(expected_ids)
for actual_id in actual_ids:
assert actual_id in expected_ids
def test_generate_content_fossology_license_get_partition_full(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
- """get_partition for a single partition should return available ids
-
- """
+ """get_partition for a single partition should return available ids"""
storage, data = swh_indexer_storage_with_data
# craft some consistent mimetypes
fossology_licenses = data.fossology_licenses
mimetypes = prepare_mimetypes_from_licenses(fossology_licenses)
indexer_configuration_id = fossology_licenses[0].indexer_configuration_id
storage.content_mimetype_add(mimetypes)
# add fossology_licenses to storage
storage.content_fossology_license_add(fossology_licenses)
# All ids from the db
expected_ids = set([c.id for c in fossology_licenses])
actual_result = storage.content_fossology_license_get_partition(
indexer_configuration_id, 0, 1
)
assert actual_result.next_page_token is None
actual_ids = actual_result.results
assert len(set(actual_ids)) == len(expected_ids)
for actual_id in actual_ids:
assert actual_id in expected_ids
def test_generate_content_fossology_license_get_partition_empty(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
"""get_partition when at least one of the partitions is empty"""
storage, data = swh_indexer_storage_with_data
# craft some consistent mimetypes
fossology_licenses = data.fossology_licenses
mimetypes = prepare_mimetypes_from_licenses(fossology_licenses)
indexer_configuration_id = fossology_licenses[0].indexer_configuration_id
storage.content_mimetype_add(mimetypes)
# add fossology_licenses to storage
storage.content_fossology_license_add(fossology_licenses)
# All ids from the db
expected_ids = set([c.id for c in fossology_licenses])
# nb_partitions = smallest power of 2 such that at least one of
# the partitions is empty
nb_licenses = len(fossology_licenses)
nb_partitions = 1 << math.floor(math.log2(nb_licenses) + 1)
seen_ids = []
for partition_id in range(nb_partitions):
actual_result = storage.content_fossology_license_get_partition(
indexer_configuration_id,
partition_id,
nb_partitions,
limit=nb_licenses + 1,
)
for actual_id in actual_result.results:
seen_ids.append(actual_id)
# Limit is higher than the max number of results
assert actual_result.next_page_token is None
assert set(seen_ids) == expected_ids
def test_generate_content_fossology_license_get_partition_with_pagination(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
- """get_partition should return ids provided with paginationv
-
- """
+ """get_partition should return ids provided with paginationv"""
storage, data = swh_indexer_storage_with_data
# craft some consistent mimetypes
fossology_licenses = data.fossology_licenses
mimetypes = prepare_mimetypes_from_licenses(fossology_licenses)
indexer_configuration_id = fossology_licenses[0].indexer_configuration_id
storage.content_mimetype_add(mimetypes)
# add fossology_licenses to storage
storage.content_fossology_license_add(fossology_licenses)
# All ids from the db
expected_ids = [c.id for c in fossology_licenses]
nb_partitions = 4
actual_ids = []
for partition_id in range(nb_partitions):
next_page_token = None
while True:
actual_result = storage.content_fossology_license_get_partition(
indexer_configuration_id,
partition_id,
nb_partitions,
limit=2,
page_token=next_page_token,
)
actual_ids.extend(actual_result.results)
next_page_token = actual_result.next_page_token
if next_page_token is None:
break
assert len(set(actual_ids)) == len(set(expected_ids))
for actual_id in actual_ids:
assert actual_id in expected_ids
def test_add_empty(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
(storage, data) = swh_indexer_storage_with_data
etype = self.endpoint_type
summary = endpoint(storage, etype, "add")([])
assert summary == {"content_fossology_license:add": 0}
actual_license = list(endpoint(storage, etype, "get")([data.sha1_2]))
assert actual_license == []
def test_get_unknown(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
(storage, data) = swh_indexer_storage_with_data
etype = self.endpoint_type
actual_license = list(endpoint(storage, etype, "get")([data.sha1_2]))
assert actual_license == []
class TestIndexerStorageOriginIntrinsicMetadata:
def test_origin_intrinsic_metadata_add(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
# given
tool_id = data.tools["swh-metadata-detector"]["id"]
metadata = {
"version": None,
"name": None,
}
metadata_rev = RevisionIntrinsicMetadataRow(
id=data.revision_id_2,
metadata=metadata,
mappings=["mapping1"],
indexer_configuration_id=tool_id,
)
metadata_origin = OriginIntrinsicMetadataRow(
id=data.origin_url_1,
metadata=metadata,
indexer_configuration_id=tool_id,
mappings=["mapping1"],
from_revision=data.revision_id_2,
)
# when
storage.revision_intrinsic_metadata_add([metadata_rev])
storage.origin_intrinsic_metadata_add([metadata_origin])
# then
actual_metadata = list(
storage.origin_intrinsic_metadata_get([data.origin_url_1, "no://where"])
)
expected_metadata = [
OriginIntrinsicMetadataRow(
id=data.origin_url_1,
metadata=metadata,
tool=data.tools["swh-metadata-detector"],
from_revision=data.revision_id_2,
mappings=["mapping1"],
)
]
assert actual_metadata == expected_metadata
journal_objects = storage.journal_writer.journal.objects # type: ignore
actual_journal_metadata = [
obj
for (obj_type, obj) in journal_objects
if obj_type == "origin_intrinsic_metadata"
]
assert list(sorted(actual_journal_metadata)) == list(sorted(expected_metadata))
def test_origin_intrinsic_metadata_add_update_in_place_duplicate(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
# given
tool_id = data.tools["swh-metadata-detector"]["id"]
metadata_v1: Dict[str, Any] = {
"version": None,
"name": None,
}
metadata_rev_v1 = RevisionIntrinsicMetadataRow(
id=data.revision_id_2,
metadata=metadata_v1,
mappings=[],
indexer_configuration_id=tool_id,
)
metadata_origin_v1 = OriginIntrinsicMetadataRow(
id=data.origin_url_1,
metadata=metadata_v1.copy(),
indexer_configuration_id=tool_id,
mappings=[],
from_revision=data.revision_id_2,
)
# given
storage.revision_intrinsic_metadata_add([metadata_rev_v1])
storage.origin_intrinsic_metadata_add([metadata_origin_v1])
# when
actual_metadata = list(
storage.origin_intrinsic_metadata_get([data.origin_url_1])
)
# then
expected_metadata_v1 = [
OriginIntrinsicMetadataRow(
id=data.origin_url_1,
metadata=metadata_v1,
tool=data.tools["swh-metadata-detector"],
from_revision=data.revision_id_2,
mappings=[],
)
]
assert actual_metadata == expected_metadata_v1
# given
metadata_v2 = metadata_v1.copy()
metadata_v2.update(
- {"name": "test_update_duplicated_metadata", "author": "MG",}
+ {
+ "name": "test_update_duplicated_metadata",
+ "author": "MG",
+ }
)
metadata_rev_v2 = attr.evolve(metadata_rev_v1, metadata=metadata_v2)
metadata_origin_v2 = OriginIntrinsicMetadataRow(
id=data.origin_url_1,
metadata=metadata_v2.copy(),
indexer_configuration_id=tool_id,
mappings=["npm"],
from_revision=data.revision_id_1,
)
storage.revision_intrinsic_metadata_add([metadata_rev_v2])
storage.origin_intrinsic_metadata_add([metadata_origin_v2])
actual_metadata = list(
storage.origin_intrinsic_metadata_get([data.origin_url_1])
)
expected_metadata_v2 = [
OriginIntrinsicMetadataRow(
id=data.origin_url_1,
metadata=metadata_v2,
tool=data.tools["swh-metadata-detector"],
from_revision=data.revision_id_1,
mappings=["npm"],
)
]
# metadata did change as the v2 was used to overwrite v1
assert actual_metadata == expected_metadata_v2
def test_origin_intrinsic_metadata_add__deadlock(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
# given
tool_id = data.tools["swh-metadata-detector"]["id"]
origins = ["file:///tmp/origin{:02d}".format(i) for i in range(100)]
example_data1: Dict[str, Any] = {
- "metadata": {"version": None, "name": None,},
+ "metadata": {
+ "version": None,
+ "name": None,
+ },
"mappings": [],
}
example_data2: Dict[str, Any] = {
- "metadata": {"version": "v1.1.1", "name": "foo",},
+ "metadata": {
+ "version": "v1.1.1",
+ "name": "foo",
+ },
"mappings": [],
}
metadata_rev_v1 = RevisionIntrinsicMetadataRow(
id=data.revision_id_2,
- metadata={"version": None, "name": None,},
+ metadata={
+ "version": None,
+ "name": None,
+ },
mappings=[],
indexer_configuration_id=tool_id,
)
data_v1 = [
OriginIntrinsicMetadataRow(
id=origin,
from_revision=data.revision_id_2,
indexer_configuration_id=tool_id,
**example_data1,
)
for origin in origins
]
data_v2 = [
OriginIntrinsicMetadataRow(
id=origin,
from_revision=data.revision_id_2,
indexer_configuration_id=tool_id,
**example_data2,
)
for origin in origins
]
# Remove one item from each, so that both queries have to succeed for
# all items to be in the DB.
data_v2a = data_v2[1:]
data_v2b = list(reversed(data_v2[0:-1]))
# given
storage.revision_intrinsic_metadata_add([metadata_rev_v1])
storage.origin_intrinsic_metadata_add(data_v1)
# when
actual_data = list(storage.origin_intrinsic_metadata_get(origins))
expected_data_v1 = [
OriginIntrinsicMetadataRow(
id=origin,
from_revision=data.revision_id_2,
tool=data.tools["swh-metadata-detector"],
**example_data1,
)
for origin in origins
]
# then
assert actual_data == expected_data_v1
# given
def f1() -> None:
storage.origin_intrinsic_metadata_add(data_v2a)
def f2() -> None:
storage.origin_intrinsic_metadata_add(data_v2b)
t1 = threading.Thread(target=f1)
t2 = threading.Thread(target=f2)
t2.start()
t1.start()
t1.join()
t2.join()
actual_data = list(storage.origin_intrinsic_metadata_get(origins))
expected_data_v2 = [
OriginIntrinsicMetadataRow(
id=origin,
from_revision=data.revision_id_2,
tool=data.tools["swh-metadata-detector"],
**example_data2,
)
for origin in origins
]
actual_data.sort(key=lambda item: item.id)
assert len(actual_data) == len(expected_data_v1) == len(expected_data_v2)
for (item, expected_item_v1, expected_item_v2) in zip(
actual_data, expected_data_v1, expected_data_v2
):
assert item in (expected_item_v1, expected_item_v2)
def test_origin_intrinsic_metadata_add__duplicate_twice(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
# given
tool_id = data.tools["swh-metadata-detector"]["id"]
metadata = {
"developmentStatus": None,
"name": None,
}
metadata_rev = RevisionIntrinsicMetadataRow(
id=data.revision_id_2,
metadata=metadata,
mappings=["mapping1"],
indexer_configuration_id=tool_id,
)
metadata_origin = OriginIntrinsicMetadataRow(
id=data.origin_url_1,
metadata=metadata,
indexer_configuration_id=tool_id,
mappings=["mapping1"],
from_revision=data.revision_id_2,
)
# when
storage.revision_intrinsic_metadata_add([metadata_rev])
with pytest.raises(DuplicateId):
storage.origin_intrinsic_metadata_add([metadata_origin, metadata_origin])
def test_origin_intrinsic_metadata_search_fulltext(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
# given
tool_id = data.tools["swh-metadata-detector"]["id"]
metadata1 = {
"author": "John Doe",
}
metadata1_rev = RevisionIntrinsicMetadataRow(
id=data.revision_id_1,
metadata=metadata1,
mappings=[],
indexer_configuration_id=tool_id,
)
metadata1_origin = OriginIntrinsicMetadataRow(
id=data.origin_url_1,
metadata=metadata1,
mappings=[],
indexer_configuration_id=tool_id,
from_revision=data.revision_id_1,
)
metadata2 = {
"author": "Jane Doe",
}
metadata2_rev = RevisionIntrinsicMetadataRow(
id=data.revision_id_2,
metadata=metadata2,
mappings=[],
indexer_configuration_id=tool_id,
)
metadata2_origin = OriginIntrinsicMetadataRow(
id=data.origin_url_2,
metadata=metadata2,
mappings=[],
indexer_configuration_id=tool_id,
from_revision=data.revision_id_2,
)
# when
storage.revision_intrinsic_metadata_add([metadata1_rev])
storage.origin_intrinsic_metadata_add([metadata1_origin])
storage.revision_intrinsic_metadata_add([metadata2_rev])
storage.origin_intrinsic_metadata_add([metadata2_origin])
# then
search = storage.origin_intrinsic_metadata_search_fulltext
assert set([res.id for res in search(["Doe"])]) == set(
[data.origin_url_1, data.origin_url_2]
)
assert [res.id for res in search(["John", "Doe"])] == [data.origin_url_1]
assert [res.id for res in search(["John"])] == [data.origin_url_1]
assert not list(search(["John", "Jane"]))
def test_origin_intrinsic_metadata_search_fulltext_rank(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
# given
tool_id = data.tools["swh-metadata-detector"]["id"]
# The following authors have "Random Person" to add some more content
# to the JSON data, to work around normalization quirks when there
# are few words (rank/(1+ln(nb_words)) is very sensitive to nb_words
# for small values of nb_words).
- metadata1 = {"author": ["Random Person", "John Doe", "Jane Doe",]}
+ metadata1 = {
+ "author": [
+ "Random Person",
+ "John Doe",
+ "Jane Doe",
+ ]
+ }
metadata1_rev = RevisionIntrinsicMetadataRow(
id=data.revision_id_1,
metadata=metadata1,
mappings=[],
indexer_configuration_id=tool_id,
)
metadata1_origin = OriginIntrinsicMetadataRow(
id=data.origin_url_1,
metadata=metadata1,
mappings=[],
indexer_configuration_id=tool_id,
from_revision=data.revision_id_1,
)
- metadata2 = {"author": ["Random Person", "Jane Doe",]}
+ metadata2 = {
+ "author": [
+ "Random Person",
+ "Jane Doe",
+ ]
+ }
metadata2_rev = RevisionIntrinsicMetadataRow(
id=data.revision_id_2,
metadata=metadata2,
mappings=[],
indexer_configuration_id=tool_id,
)
metadata2_origin = OriginIntrinsicMetadataRow(
id=data.origin_url_2,
metadata=metadata2,
mappings=[],
indexer_configuration_id=tool_id,
from_revision=data.revision_id_2,
)
# when
storage.revision_intrinsic_metadata_add([metadata1_rev])
storage.origin_intrinsic_metadata_add([metadata1_origin])
storage.revision_intrinsic_metadata_add([metadata2_rev])
storage.origin_intrinsic_metadata_add([metadata2_origin])
# then
search = storage.origin_intrinsic_metadata_search_fulltext
assert [res.id for res in search(["Doe"])] == [
data.origin_url_1,
data.origin_url_2,
]
assert [res.id for res in search(["Doe"], limit=1)] == [data.origin_url_1]
assert [res.id for res in search(["John"])] == [data.origin_url_1]
assert [res.id for res in search(["Jane"])] == [
data.origin_url_2,
data.origin_url_1,
]
assert [res.id for res in search(["John", "Jane"])] == [data.origin_url_1]
def _fill_origin_intrinsic_metadata(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
tool1_id = data.tools["swh-metadata-detector"]["id"]
tool2_id = data.tools["swh-metadata-detector2"]["id"]
metadata1 = {
"@context": "foo",
"author": "John Doe",
}
metadata1_rev = RevisionIntrinsicMetadataRow(
id=data.revision_id_1,
metadata=metadata1,
mappings=["npm"],
indexer_configuration_id=tool1_id,
)
metadata1_origin = OriginIntrinsicMetadataRow(
id=data.origin_url_1,
metadata=metadata1,
mappings=["npm"],
indexer_configuration_id=tool1_id,
from_revision=data.revision_id_1,
)
metadata2 = {
"@context": "foo",
"author": "Jane Doe",
}
metadata2_rev = RevisionIntrinsicMetadataRow(
id=data.revision_id_2,
metadata=metadata2,
mappings=["npm", "gemspec"],
indexer_configuration_id=tool2_id,
)
metadata2_origin = OriginIntrinsicMetadataRow(
id=data.origin_url_2,
metadata=metadata2,
mappings=["npm", "gemspec"],
indexer_configuration_id=tool2_id,
from_revision=data.revision_id_2,
)
metadata3 = {
"@context": "foo",
}
metadata3_rev = RevisionIntrinsicMetadataRow(
id=data.revision_id_3,
metadata=metadata3,
mappings=["npm", "gemspec"],
indexer_configuration_id=tool2_id,
)
metadata3_origin = OriginIntrinsicMetadataRow(
id=data.origin_url_3,
metadata=metadata3,
mappings=["pkg-info"],
indexer_configuration_id=tool2_id,
from_revision=data.revision_id_3,
)
storage.revision_intrinsic_metadata_add([metadata1_rev])
storage.origin_intrinsic_metadata_add([metadata1_origin])
storage.revision_intrinsic_metadata_add([metadata2_rev])
storage.origin_intrinsic_metadata_add([metadata2_origin])
storage.revision_intrinsic_metadata_add([metadata3_rev])
storage.origin_intrinsic_metadata_add([metadata3_origin])
def test_origin_intrinsic_metadata_search_by_producer(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
self._fill_origin_intrinsic_metadata(swh_indexer_storage_with_data)
tool1 = data.tools["swh-metadata-detector"]
tool2 = data.tools["swh-metadata-detector2"]
endpoint = storage.origin_intrinsic_metadata_search_by_producer
# test pagination
# no 'page_token' param, return all origins
result = endpoint(ids_only=True)
assert result == PagedResult(
- results=[data.origin_url_1, data.origin_url_2, data.origin_url_3,],
+ results=[
+ data.origin_url_1,
+ data.origin_url_2,
+ data.origin_url_3,
+ ],
next_page_token=None,
)
# 'page_token' is < than origin_1, return everything
result = endpoint(page_token=data.origin_url_1[:-1], ids_only=True)
assert result == PagedResult(
- results=[data.origin_url_1, data.origin_url_2, data.origin_url_3,],
+ results=[
+ data.origin_url_1,
+ data.origin_url_2,
+ data.origin_url_3,
+ ],
next_page_token=None,
)
# 'page_token' is origin_3, return nothing
result = endpoint(page_token=data.origin_url_3, ids_only=True)
assert result == PagedResult(results=[], next_page_token=None)
# test limit argument
result = endpoint(page_token=data.origin_url_1[:-1], limit=2, ids_only=True)
assert result == PagedResult(
results=[data.origin_url_1, data.origin_url_2],
next_page_token=data.origin_url_2,
)
result = endpoint(page_token=data.origin_url_1, limit=2, ids_only=True)
assert result == PagedResult(
- results=[data.origin_url_2, data.origin_url_3], next_page_token=None,
+ results=[data.origin_url_2, data.origin_url_3],
+ next_page_token=None,
)
result = endpoint(page_token=data.origin_url_2, limit=2, ids_only=True)
- assert result == PagedResult(results=[data.origin_url_3], next_page_token=None,)
+ assert result == PagedResult(
+ results=[data.origin_url_3],
+ next_page_token=None,
+ )
# test mappings filtering
result = endpoint(mappings=["npm"], ids_only=True)
assert result == PagedResult(
- results=[data.origin_url_1, data.origin_url_2], next_page_token=None,
+ results=[data.origin_url_1, data.origin_url_2],
+ next_page_token=None,
)
result = endpoint(mappings=["npm", "gemspec"], ids_only=True)
assert result == PagedResult(
- results=[data.origin_url_1, data.origin_url_2], next_page_token=None,
+ results=[data.origin_url_1, data.origin_url_2],
+ next_page_token=None,
)
result = endpoint(mappings=["gemspec"], ids_only=True)
- assert result == PagedResult(results=[data.origin_url_2], next_page_token=None,)
+ assert result == PagedResult(
+ results=[data.origin_url_2],
+ next_page_token=None,
+ )
result = endpoint(mappings=["pkg-info"], ids_only=True)
- assert result == PagedResult(results=[data.origin_url_3], next_page_token=None,)
+ assert result == PagedResult(
+ results=[data.origin_url_3],
+ next_page_token=None,
+ )
result = endpoint(mappings=["foobar"], ids_only=True)
- assert result == PagedResult(results=[], next_page_token=None,)
+ assert result == PagedResult(
+ results=[],
+ next_page_token=None,
+ )
# test pagination + mappings
result = endpoint(mappings=["npm"], limit=1, ids_only=True)
assert result == PagedResult(
- results=[data.origin_url_1], next_page_token=data.origin_url_1,
+ results=[data.origin_url_1],
+ next_page_token=data.origin_url_1,
)
# test tool filtering
result = endpoint(tool_ids=[tool1["id"]], ids_only=True)
- assert result == PagedResult(results=[data.origin_url_1], next_page_token=None,)
+ assert result == PagedResult(
+ results=[data.origin_url_1],
+ next_page_token=None,
+ )
result = endpoint(tool_ids=[tool2["id"]], ids_only=True)
assert sorted(result.results) == [data.origin_url_2, data.origin_url_3]
assert result.next_page_token is None
result = endpoint(tool_ids=[tool1["id"], tool2["id"]], ids_only=True)
assert sorted(result.results) == [
data.origin_url_1,
data.origin_url_2,
data.origin_url_3,
]
assert result.next_page_token is None
# test ids_only=False
assert endpoint(mappings=["gemspec"]) == PagedResult(
results=[
OriginIntrinsicMetadataRow(
id=data.origin_url_2,
- metadata={"@context": "foo", "author": "Jane Doe",},
+ metadata={
+ "@context": "foo",
+ "author": "Jane Doe",
+ },
mappings=["npm", "gemspec"],
tool=tool2,
from_revision=data.revision_id_2,
)
],
next_page_token=None,
)
def test_origin_intrinsic_metadata_stats(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
self._fill_origin_intrinsic_metadata(swh_indexer_storage_with_data)
result = storage.origin_intrinsic_metadata_stats()
assert result == {
"per_mapping": {
"cff": 0,
"gemspec": 1,
"npm": 2,
"pkg-info": 1,
"codemeta": 0,
"maven": 0,
},
"total": 3,
"non_empty": 2,
}
class TestIndexerStorageIndexerConfiguration:
def test_indexer_configuration_add(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
tool = {
"tool_name": "some-unknown-tool",
"tool_version": "some-version",
"tool_configuration": {"debian-package": "some-package"},
}
actual_tool = storage.indexer_configuration_get(tool)
assert actual_tool is None # does not exist
# add it
actual_tools = list(storage.indexer_configuration_add([tool]))
assert len(actual_tools) == 1
actual_tool = actual_tools[0]
assert actual_tool is not None # now it exists
new_id = actual_tool.pop("id")
assert actual_tool == tool
actual_tools2 = list(storage.indexer_configuration_add([tool]))
actual_tool2 = actual_tools2[0]
assert actual_tool2 is not None # now it exists
new_id2 = actual_tool2.pop("id")
assert new_id == new_id2
assert actual_tool == actual_tool2
def test_indexer_configuration_add_multiple(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
tool = {
"tool_name": "some-unknown-tool",
"tool_version": "some-version",
"tool_configuration": {"debian-package": "some-package"},
}
actual_tools = list(storage.indexer_configuration_add([tool]))
assert len(actual_tools) == 1
new_tools = [
tool,
{
"tool_name": "yet-another-tool",
"tool_version": "version",
"tool_configuration": {},
},
]
actual_tools = list(storage.indexer_configuration_add(new_tools))
assert len(actual_tools) == 2
# order not guaranteed, so we iterate over results to check
for tool in actual_tools:
_id = tool.pop("id")
assert _id is not None
assert tool in new_tools
def test_indexer_configuration_get_missing(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
tool = {
"tool_name": "unknown-tool",
"tool_version": "3.1.0rc2-31-ga2cbb8c",
"tool_configuration": {"command_line": "nomossa <filepath>"},
}
actual_tool = storage.indexer_configuration_get(tool)
assert actual_tool is None
def test_indexer_configuration_get(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
tool = {
"tool_name": "nomos",
"tool_version": "3.1.0rc2-31-ga2cbb8c",
"tool_configuration": {"command_line": "nomossa <filepath>"},
}
actual_tool = storage.indexer_configuration_get(tool)
assert actual_tool
expected_tool = tool.copy()
del actual_tool["id"]
assert expected_tool == actual_tool
def test_indexer_configuration_metadata_get_missing_context(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
tool = {
"tool_name": "swh-metadata-translator",
"tool_version": "0.0.1",
"tool_configuration": {"context": "unknown-context"},
}
actual_tool = storage.indexer_configuration_get(tool)
assert actual_tool is None
def test_indexer_configuration_metadata_get(
self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any]
) -> None:
storage, data = swh_indexer_storage_with_data
tool = {
"tool_name": "swh-metadata-translator",
"tool_version": "0.0.1",
"tool_configuration": {"type": "local", "context": "NpmMapping"},
}
storage.indexer_configuration_add([tool])
actual_tool = storage.indexer_configuration_get(tool)
assert actual_tool
expected_tool = tool.copy()
expected_tool["id"] = actual_tool["id"]
assert expected_tool == actual_tool
diff --git a/swh/indexer/tests/tasks.py b/swh/indexer/tests/tasks.py
index a6011c7..0cd55fd 100644
--- a/swh/indexer/tests/tasks.py
+++ b/swh/indexer/tests/tasks.py
@@ -1,47 +1,47 @@
from celery import current_app as app
from swh.indexer.metadata import OriginMetadataIndexer, RevisionMetadataIndexer
from .test_metadata import ContentMetadataTestIndexer
from .test_origin_head import OriginHeadTestIndexer
from .utils import BASE_TEST_CONFIG
class RevisionMetadataTestIndexer(RevisionMetadataIndexer):
"""Specific indexer whose configuration is enough to satisfy the
- indexing tests.
+ indexing tests.
"""
ContentMetadataIndexer = ContentMetadataTestIndexer
def parse_config_file(self, *args, **kwargs):
return {
**BASE_TEST_CONFIG,
"tools": {
"name": "swh-metadata-detector",
"version": "0.0.2",
"configuration": {"type": "local", "context": "NpmMapping"},
},
}
class OriginMetadataTestIndexer(OriginMetadataIndexer):
def parse_config_file(self, *args, **kwargs):
return {**BASE_TEST_CONFIG, "tools": []}
def _prepare_sub_indexers(self):
self.origin_head_indexer = OriginHeadTestIndexer()
self.revision_metadata_indexer = RevisionMetadataTestIndexer()
@app.task
def revision_intrinsic_metadata(*args, **kwargs):
indexer = RevisionMetadataTestIndexer()
indexer.run(*args, **kwargs)
print("REV RESULT=", indexer.results)
@app.task
def origin_intrinsic_metadata(*args, **kwargs):
indexer = OriginMetadataTestIndexer()
indexer.run(*args, **kwargs)
diff --git a/swh/indexer/tests/test_cli.py b/swh/indexer/tests/test_cli.py
index 97698d3..e0628df 100644
--- a/swh/indexer/tests/test_cli.py
+++ b/swh/indexer/tests/test_cli.py
@@ -1,501 +1,525 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
from functools import reduce
import re
from typing import Any, Dict, List
from unittest.mock import patch
from click.testing import CliRunner
from confluent_kafka import Consumer
import pytest
from swh.indexer.cli import indexer_cli_group
from swh.indexer.storage.interface import IndexerStorageInterface
from swh.indexer.storage.model import (
OriginIntrinsicMetadataRow,
RevisionIntrinsicMetadataRow,
)
from swh.journal.writer import get_journal_writer
from swh.model.hashutil import hash_to_bytes
from swh.model.model import OriginVisitStatus
def fill_idx_storage(idx_storage: IndexerStorageInterface, nb_rows: int) -> List[int]:
tools: List[Dict[str, Any]] = [
- {"tool_name": "tool %d" % i, "tool_version": "0.0.1", "tool_configuration": {},}
+ {
+ "tool_name": "tool %d" % i,
+ "tool_version": "0.0.1",
+ "tool_configuration": {},
+ }
for i in range(2)
]
tools = idx_storage.indexer_configuration_add(tools)
origin_metadata = [
OriginIntrinsicMetadataRow(
id="file://dev/%04d" % origin_id,
from_revision=hash_to_bytes("abcd{:0>36}".format(origin_id)),
indexer_configuration_id=tools[origin_id % 2]["id"],
metadata={"name": "origin %d" % origin_id},
mappings=["mapping%d" % (origin_id % 10)],
)
for origin_id in range(nb_rows)
]
revision_metadata = [
RevisionIntrinsicMetadataRow(
id=hash_to_bytes("abcd{:0>36}".format(origin_id)),
indexer_configuration_id=tools[origin_id % 2]["id"],
metadata={"name": "origin %d" % origin_id},
mappings=["mapping%d" % (origin_id % 10)],
)
for origin_id in range(nb_rows)
]
idx_storage.revision_intrinsic_metadata_add(revision_metadata)
idx_storage.origin_intrinsic_metadata_add(origin_metadata)
return [tool["id"] for tool in tools]
def _origins_in_task_args(tasks):
"""Returns the set of origins contained in the arguments of the
provided tasks (assumed to be of type index-origin-metadata)."""
return reduce(
set.union, (set(task["arguments"]["args"][0]) for task in tasks), set()
)
def _assert_tasks_for_origins(tasks, origins):
expected_kwargs = {}
assert {task["type"] for task in tasks} == {"index-origin-metadata"}
assert all(len(task["arguments"]["args"]) == 1 for task in tasks)
for task in tasks:
assert task["arguments"]["kwargs"] == expected_kwargs, task
assert _origins_in_task_args(tasks) == set(["file://dev/%04d" % i for i in origins])
@pytest.fixture
def cli_runner():
return CliRunner()
def test_cli_mapping_list(cli_runner, swh_config):
result = cli_runner.invoke(
indexer_cli_group,
["-C", swh_config, "mapping", "list"],
catch_exceptions=False,
)
expected_output = "\n".join(
[
"cff",
"codemeta",
"gemspec",
"maven",
"npm",
"pkg-info",
"",
] # must be sorted for test to pass
)
assert result.exit_code == 0, result.output
assert result.output == expected_output
def test_cli_mapping_list_terms(cli_runner, swh_config):
result = cli_runner.invoke(
indexer_cli_group,
["-C", swh_config, "mapping", "list-terms"],
catch_exceptions=False,
)
assert result.exit_code == 0, result.output
assert re.search(r"http://schema.org/url:\n.*npm", result.output)
assert re.search(r"http://schema.org/url:\n.*codemeta", result.output)
assert re.search(
r"https://codemeta.github.io/terms/developmentStatus:\n\tcodemeta",
result.output,
)
def test_cli_mapping_list_terms_exclude(cli_runner, swh_config):
result = cli_runner.invoke(
indexer_cli_group,
["-C", swh_config, "mapping", "list-terms", "--exclude-mapping", "codemeta"],
catch_exceptions=False,
)
assert result.exit_code == 0, result.output
assert re.search(r"http://schema.org/url:\n.*npm", result.output)
assert not re.search(r"http://schema.org/url:\n.*codemeta", result.output)
assert not re.search(
r"https://codemeta.github.io/terms/developmentStatus:\n\tcodemeta",
result.output,
)
@patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3)
@patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3)
def test_cli_origin_metadata_reindex_empty_db(
cli_runner, swh_config, indexer_scheduler, idx_storage, storage
):
result = cli_runner.invoke(
indexer_cli_group,
- ["-C", swh_config, "schedule", "reindex_origin_metadata",],
+ [
+ "-C",
+ swh_config,
+ "schedule",
+ "reindex_origin_metadata",
+ ],
catch_exceptions=False,
)
expected_output = "Nothing to do (no origin metadata matched the criteria).\n"
assert result.exit_code == 0, result.output
assert result.output == expected_output
tasks = indexer_scheduler.search_tasks()
assert len(tasks) == 0
@patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3)
@patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3)
def test_cli_origin_metadata_reindex_divisor(
cli_runner, swh_config, indexer_scheduler, idx_storage, storage
):
"""Tests the re-indexing when origin_batch_size*task_batch_size is a
divisor of nb_origins."""
fill_idx_storage(idx_storage, 90)
result = cli_runner.invoke(
indexer_cli_group,
- ["-C", swh_config, "schedule", "reindex_origin_metadata",],
+ [
+ "-C",
+ swh_config,
+ "schedule",
+ "reindex_origin_metadata",
+ ],
catch_exceptions=False,
)
# Check the output
expected_output = (
"Scheduled 3 tasks (30 origins).\n"
"Scheduled 6 tasks (60 origins).\n"
"Scheduled 9 tasks (90 origins).\n"
"Done.\n"
)
assert result.exit_code == 0, result.output
assert result.output == expected_output
# Check scheduled tasks
tasks = indexer_scheduler.search_tasks()
assert len(tasks) == 9
_assert_tasks_for_origins(tasks, range(90))
@patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3)
@patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3)
def test_cli_origin_metadata_reindex_dry_run(
cli_runner, swh_config, indexer_scheduler, idx_storage, storage
):
"""Tests the re-indexing when origin_batch_size*task_batch_size is a
divisor of nb_origins."""
fill_idx_storage(idx_storage, 90)
result = cli_runner.invoke(
indexer_cli_group,
- ["-C", swh_config, "schedule", "--dry-run", "reindex_origin_metadata",],
+ [
+ "-C",
+ swh_config,
+ "schedule",
+ "--dry-run",
+ "reindex_origin_metadata",
+ ],
catch_exceptions=False,
)
# Check the output
expected_output = (
"Scheduled 3 tasks (30 origins).\n"
"Scheduled 6 tasks (60 origins).\n"
"Scheduled 9 tasks (90 origins).\n"
"Done.\n"
)
assert result.exit_code == 0, result.output
assert result.output == expected_output
# Check scheduled tasks
tasks = indexer_scheduler.search_tasks()
assert len(tasks) == 0
@patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3)
@patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3)
def test_cli_origin_metadata_reindex_nondivisor(
cli_runner, swh_config, indexer_scheduler, idx_storage, storage
):
"""Tests the re-indexing when neither origin_batch_size or
task_batch_size is a divisor of nb_origins."""
fill_idx_storage(idx_storage, 70)
result = cli_runner.invoke(
indexer_cli_group,
[
"-C",
swh_config,
"schedule",
"reindex_origin_metadata",
"--batch-size",
"20",
],
catch_exceptions=False,
)
# Check the output
expected_output = (
"Scheduled 3 tasks (60 origins).\n"
"Scheduled 4 tasks (70 origins).\n"
"Done.\n"
)
assert result.exit_code == 0, result.output
assert result.output == expected_output
# Check scheduled tasks
tasks = indexer_scheduler.search_tasks()
assert len(tasks) == 4
_assert_tasks_for_origins(tasks, range(70))
@patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3)
@patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3)
def test_cli_origin_metadata_reindex_filter_one_mapping(
cli_runner, swh_config, indexer_scheduler, idx_storage, storage
):
"""Tests the re-indexing when origin_batch_size*task_batch_size is a
divisor of nb_origins."""
fill_idx_storage(idx_storage, 110)
result = cli_runner.invoke(
indexer_cli_group,
[
"-C",
swh_config,
"schedule",
"reindex_origin_metadata",
"--mapping",
"mapping1",
],
catch_exceptions=False,
)
# Check the output
expected_output = "Scheduled 2 tasks (11 origins).\nDone.\n"
assert result.exit_code == 0, result.output
assert result.output == expected_output
# Check scheduled tasks
tasks = indexer_scheduler.search_tasks()
assert len(tasks) == 2
_assert_tasks_for_origins(tasks, [1, 11, 21, 31, 41, 51, 61, 71, 81, 91, 101])
@patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3)
@patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3)
def test_cli_origin_metadata_reindex_filter_two_mappings(
cli_runner, swh_config, indexer_scheduler, idx_storage, storage
):
"""Tests the re-indexing when origin_batch_size*task_batch_size is a
divisor of nb_origins."""
fill_idx_storage(idx_storage, 110)
result = cli_runner.invoke(
indexer_cli_group,
[
"--config-file",
swh_config,
"schedule",
"reindex_origin_metadata",
"--mapping",
"mapping1",
"--mapping",
"mapping2",
],
catch_exceptions=False,
)
# Check the output
expected_output = "Scheduled 3 tasks (22 origins).\nDone.\n"
assert result.exit_code == 0, result.output
assert result.output == expected_output
# Check scheduled tasks
tasks = indexer_scheduler.search_tasks()
assert len(tasks) == 3
_assert_tasks_for_origins(
tasks,
[
1,
11,
21,
31,
41,
51,
61,
71,
81,
91,
101,
2,
12,
22,
32,
42,
52,
62,
72,
82,
92,
102,
],
)
@patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3)
@patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3)
def test_cli_origin_metadata_reindex_filter_one_tool(
cli_runner, swh_config, indexer_scheduler, idx_storage, storage
):
"""Tests the re-indexing when origin_batch_size*task_batch_size is a
divisor of nb_origins."""
tool_ids = fill_idx_storage(idx_storage, 110)
result = cli_runner.invoke(
indexer_cli_group,
[
"-C",
swh_config,
"schedule",
"reindex_origin_metadata",
"--tool-id",
str(tool_ids[0]),
],
catch_exceptions=False,
)
# Check the output
expected_output = (
"Scheduled 3 tasks (30 origins).\n"
"Scheduled 6 tasks (55 origins).\n"
"Done.\n"
)
assert result.exit_code == 0, result.output
assert result.output == expected_output
# Check scheduled tasks
tasks = indexer_scheduler.search_tasks()
assert len(tasks) == 6
_assert_tasks_for_origins(tasks, [x * 2 for x in range(55)])
def now():
return datetime.datetime.now(tz=datetime.timezone.utc)
def test_cli_journal_client(
cli_runner,
swh_config,
indexer_scheduler,
kafka_prefix: str,
kafka_server,
consumer: Consumer,
):
"""Test the 'swh indexer journal-client' cli tool."""
journal_writer = get_journal_writer(
"kafka",
brokers=[kafka_server],
prefix=kafka_prefix,
client_id="test producer",
value_sanitizer=lambda object_type, value: value,
flush_timeout=3, # fail early if something is going wrong
)
visit_statuses = [
OriginVisitStatus(
origin="file:///dev/zero",
visit=1,
date=now(),
status="full",
snapshot=None,
),
OriginVisitStatus(
origin="file:///dev/foobar",
visit=2,
date=now(),
status="full",
snapshot=None,
),
OriginVisitStatus(
origin="file:///tmp/spamegg",
visit=3,
date=now(),
status="full",
snapshot=None,
),
OriginVisitStatus(
origin="file:///dev/0002",
visit=6,
date=now(),
status="full",
snapshot=None,
),
OriginVisitStatus( # will be filtered out due to its 'partial' status
origin="file:///dev/0000",
visit=4,
date=now(),
status="partial",
snapshot=None,
),
OriginVisitStatus( # will be filtered out due to its 'ongoing' status
origin="file:///dev/0001",
visit=5,
date=now(),
status="ongoing",
snapshot=None,
),
]
journal_writer.write_additions("origin_visit_status", visit_statuses)
visit_statuses_full = [vs for vs in visit_statuses if vs.status == "full"]
result = cli_runner.invoke(
indexer_cli_group,
[
"-C",
swh_config,
"journal-client",
"--broker",
kafka_server,
"--prefix",
kafka_prefix,
"--group-id",
"test-consumer",
"--stop-after-objects",
len(visit_statuses),
"--origin-metadata-task-type",
"index-origin-metadata",
],
catch_exceptions=False,
)
# Check the output
expected_output = "Done.\n"
assert result.exit_code == 0, result.output
assert result.output == expected_output
# Check scheduled tasks
tasks = indexer_scheduler.search_tasks(task_type="index-origin-metadata")
# This can be split into multiple tasks but no more than the origin-visit-statuses
# written in the journal
assert len(tasks) <= len(visit_statuses_full)
actual_origins = []
for task in tasks:
actual_task = dict(task)
assert actual_task["type"] == "index-origin-metadata"
scheduled_origins = actual_task["arguments"]["args"][0]
actual_origins.extend(scheduled_origins)
assert set(actual_origins) == {vs.origin for vs in visit_statuses_full}
def test_cli_journal_client_without_brokers(
cli_runner, swh_config, kafka_prefix: str, kafka_server, consumer: Consumer
):
"""Without brokers configuration, the cli fails."""
with pytest.raises(ValueError, match="brokers"):
cli_runner.invoke(
indexer_cli_group,
- ["-C", swh_config, "journal-client",],
+ [
+ "-C",
+ swh_config,
+ "journal-client",
+ ],
catch_exceptions=False,
)
diff --git a/swh/indexer/tests/test_codemeta.py b/swh/indexer/tests/test_codemeta.py
index cadc35f..1829a70 100644
--- a/swh/indexer/tests/test_codemeta.py
+++ b/swh/indexer/tests/test_codemeta.py
@@ -1,258 +1,298 @@
# Copyright (C) 2018-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import pytest
from swh.indexer.codemeta import CROSSWALK_TABLE, merge_documents, merge_values
def test_crosstable():
assert CROSSWALK_TABLE["NodeJS"] == {
"repository": "http://schema.org/codeRepository",
"os": "http://schema.org/operatingSystem",
"cpu": "http://schema.org/processorRequirements",
"engines": "http://schema.org/runtimePlatform",
"author": "http://schema.org/author",
"author.email": "http://schema.org/email",
"author.name": "http://schema.org/name",
"contributors": "http://schema.org/contributor",
"keywords": "http://schema.org/keywords",
"license": "http://schema.org/license",
"version": "http://schema.org/version",
"description": "http://schema.org/description",
"name": "http://schema.org/name",
"bugs": "https://codemeta.github.io/terms/issueTracker",
"homepage": "http://schema.org/url",
}
def test_merge_values():
assert merge_values("a", "b") == ["a", "b"]
assert merge_values(["a", "b"], "c") == ["a", "b", "c"]
assert merge_values("a", ["b", "c"]) == ["a", "b", "c"]
assert merge_values({"@list": ["a"]}, {"@list": ["b"]}) == {"@list": ["a", "b"]}
assert merge_values({"@list": ["a", "b"]}, {"@list": ["c"]}) == {
"@list": ["a", "b", "c"]
}
with pytest.raises(ValueError):
merge_values({"@list": ["a"]}, "b")
with pytest.raises(ValueError):
merge_values("a", {"@list": ["b"]})
with pytest.raises(ValueError):
merge_values({"@list": ["a"]}, ["b"])
with pytest.raises(ValueError):
merge_values(["a"], {"@list": ["b"]})
assert merge_values("a", None) == "a"
assert merge_values(["a", "b"], None) == ["a", "b"]
assert merge_values(None, ["b", "c"]) == ["b", "c"]
assert merge_values({"@list": ["a"]}, None) == {"@list": ["a"]}
assert merge_values(None, {"@list": ["a"]}) == {"@list": ["a"]}
def test_merge_documents():
"""
Test the creation of a coherent minimal metadata set
"""
# given
metadata_list = [
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"name": "test_1",
"version": "0.0.2",
"description": "Simple package.json test for indexer",
"codeRepository": "git+https://github.com/moranegg/metadata_test",
},
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"name": "test_0_1",
"version": "0.0.2",
"description": "Simple package.json test for indexer",
"codeRepository": "git+https://github.com/moranegg/metadata_test",
},
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"name": "test_metadata",
"version": "0.0.2",
- "author": {"type": "Person", "name": "moranegg",},
+ "author": {
+ "type": "Person",
+ "name": "moranegg",
+ },
},
]
# when
results = merge_documents(metadata_list)
# then
expected_results = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"version": "0.0.2",
"description": "Simple package.json test for indexer",
"name": ["test_1", "test_0_1", "test_metadata"],
"author": [{"type": "Person", "name": "moranegg"}],
"codeRepository": "git+https://github.com/moranegg/metadata_test",
}
assert results == expected_results
def test_merge_documents_ids():
# given
metadata_list = [
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"id": "http://example.org/test1",
"name": "test_1",
},
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"id": "http://example.org/test2",
"name": "test_2",
},
]
# when
results = merge_documents(metadata_list)
# then
expected_results = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"id": "http://example.org/test1",
"schema:sameAs": "http://example.org/test2",
"name": ["test_1", "test_2"],
}
assert results == expected_results
def test_merge_documents_duplicate_ids():
# given
metadata_list = [
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"id": "http://example.org/test1",
"name": "test_1",
},
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"id": "http://example.org/test1",
"name": "test_1b",
},
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"id": "http://example.org/test2",
"name": "test_2",
},
]
# when
results = merge_documents(metadata_list)
# then
expected_results = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"id": "http://example.org/test1",
"schema:sameAs": "http://example.org/test2",
"name": ["test_1", "test_1b", "test_2"],
}
assert results == expected_results
def test_merge_documents_lists():
"""Tests merging two @list elements."""
# given
metadata_list = [
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
- "author": {"@list": [{"name": "test_1"},]},
+ "author": {
+ "@list": [
+ {"name": "test_1"},
+ ]
+ },
},
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
- "author": {"@list": [{"name": "test_2"},]},
+ "author": {
+ "@list": [
+ {"name": "test_2"},
+ ]
+ },
},
]
# when
results = merge_documents(metadata_list)
# then
expected_results = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
- "author": [{"name": "test_1"}, {"name": "test_2"},],
+ "author": [
+ {"name": "test_1"},
+ {"name": "test_2"},
+ ],
}
assert results == expected_results
def test_merge_documents_lists_duplicates():
"""Tests merging two @list elements with a duplicate subelement."""
# given
metadata_list = [
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
- "author": {"@list": [{"name": "test_1"},]},
+ "author": {
+ "@list": [
+ {"name": "test_1"},
+ ]
+ },
},
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
- "author": {"@list": [{"name": "test_2"}, {"name": "test_1"},]},
+ "author": {
+ "@list": [
+ {"name": "test_2"},
+ {"name": "test_1"},
+ ]
+ },
},
]
# when
results = merge_documents(metadata_list)
# then
expected_results = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
- "author": [{"name": "test_1"}, {"name": "test_2"},],
+ "author": [
+ {"name": "test_1"},
+ {"name": "test_2"},
+ ],
}
assert results == expected_results
def test_merge_documents_list_left():
"""Tests merging a singleton with an @list."""
# given
metadata_list = [
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"author": {"name": "test_1"},
},
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
- "author": {"@list": [{"name": "test_2"},]},
+ "author": {
+ "@list": [
+ {"name": "test_2"},
+ ]
+ },
},
]
# when
results = merge_documents(metadata_list)
# then
expected_results = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
- "author": [{"name": "test_1"}, {"name": "test_2"},],
+ "author": [
+ {"name": "test_1"},
+ {"name": "test_2"},
+ ],
}
assert results == expected_results
def test_merge_documents_list_right():
"""Tests merging an @list with a singleton."""
# given
metadata_list = [
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
- "author": {"@list": [{"name": "test_1"},]},
+ "author": {
+ "@list": [
+ {"name": "test_1"},
+ ]
+ },
},
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"author": {"name": "test_2"},
},
]
# when
results = merge_documents(metadata_list)
# then
expected_results = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
- "author": [{"name": "test_1"}, {"name": "test_2"},],
+ "author": [
+ {"name": "test_1"},
+ {"name": "test_2"},
+ ],
}
assert results == expected_results
diff --git a/swh/indexer/tests/test_ctags.py b/swh/indexer/tests/test_ctags.py
index bfb7f53..720d8c4 100644
--- a/swh/indexer/tests/test_ctags.py
+++ b/swh/indexer/tests/test_ctags.py
@@ -1,162 +1,172 @@
# Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import json
import unittest
from unittest.mock import patch
import pytest
import swh.indexer.ctags
from swh.indexer.ctags import CtagsIndexer, run_ctags
from swh.indexer.storage.model import ContentCtagsRow
from swh.indexer.tests.utils import (
BASE_TEST_CONFIG,
OBJ_STORAGE_DATA,
SHA1_TO_CTAGS,
CommonContentIndexerTest,
fill_obj_storage,
fill_storage,
filter_dict,
)
from swh.model.hashutil import hash_to_bytes
class BasicTest(unittest.TestCase):
@patch("swh.indexer.ctags.subprocess")
def test_run_ctags(self, mock_subprocess):
- """Computing licenses from a raw content should return results
-
- """
+ """Computing licenses from a raw content should return results"""
output0 = """
{"name":"defun","kind":"function","line":1,"language":"scheme"}
{"name":"name","kind":"symbol","line":5,"language":"else"}"""
output1 = """
{"name":"let","kind":"var","line":10,"language":"something"}"""
expected_result0 = [
{"name": "defun", "kind": "function", "line": 1, "lang": "scheme"},
{"name": "name", "kind": "symbol", "line": 5, "lang": "else"},
]
expected_result1 = [
{"name": "let", "kind": "var", "line": 10, "lang": "something"}
]
for path, lang, intermediary_result, expected_result in [
(b"some/path", "lisp", output0, expected_result0),
(b"some/path/2", "markdown", output1, expected_result1),
]:
mock_subprocess.check_output.return_value = intermediary_result
actual_result = list(run_ctags(path, lang=lang))
self.assertEqual(actual_result, expected_result)
class InjectCtagsIndexer:
- """Override ctags computations.
-
- """
+ """Override ctags computations."""
def compute_ctags(self, path, lang):
- """Inject fake ctags given path (sha1 identifier).
-
- """
+ """Inject fake ctags given path (sha1 identifier)."""
return {"lang": lang, **SHA1_TO_CTAGS.get(path)}
CONFIG = {
**BASE_TEST_CONFIG,
"tools": {
"name": "universal-ctags",
"version": "~git7859817b",
"configuration": {
"command_line": """ctags --fields=+lnz --sort=no """
""" --links=no <filepath>""",
"max_content_size": 1000,
},
},
- "languages": {"python": "python", "haskell": "haskell", "bar": "bar",},
+ "languages": {
+ "python": "python",
+ "haskell": "haskell",
+ "bar": "bar",
+ },
"workdir": "/tmp",
}
class TestCtagsIndexer(CommonContentIndexerTest, unittest.TestCase):
"""Ctags indexer test scenarios:
- Known sha1s in the input list have their data indexed
- Unknown sha1 in the input list are not indexed
"""
def get_indexer_results(self, ids):
yield from self.idx_storage.content_ctags_get(ids)
def setUp(self):
super().setUp()
self.indexer = CtagsIndexer(config=CONFIG)
self.indexer.catch_exceptions = False
self.idx_storage = self.indexer.idx_storage
fill_storage(self.indexer.storage)
fill_obj_storage(self.indexer.objstorage)
# Prepare test input
self.id0 = "01c9379dfc33803963d07c1ccc748d3fe4c96bb5"
self.id1 = "d4c647f0fc257591cc9ba1722484229780d1c607"
self.id2 = "688a5ef812c53907562fe379d4b3851e69c7cb15"
tool = {k.replace("tool_", ""): v for (k, v) in self.indexer.tool.items()}
self.expected_results = [
*[
- ContentCtagsRow(id=hash_to_bytes(self.id0), tool=tool, **kwargs,)
+ ContentCtagsRow(
+ id=hash_to_bytes(self.id0),
+ tool=tool,
+ **kwargs,
+ )
for kwargs in SHA1_TO_CTAGS[self.id0]
],
*[
- ContentCtagsRow(id=hash_to_bytes(self.id1), tool=tool, **kwargs,)
+ ContentCtagsRow(
+ id=hash_to_bytes(self.id1),
+ tool=tool,
+ **kwargs,
+ )
for kwargs in SHA1_TO_CTAGS[self.id1]
],
*[
- ContentCtagsRow(id=hash_to_bytes(self.id2), tool=tool, **kwargs,)
+ ContentCtagsRow(
+ id=hash_to_bytes(self.id2),
+ tool=tool,
+ **kwargs,
+ )
for kwargs in SHA1_TO_CTAGS[self.id2]
],
]
self._set_mocks()
def _set_mocks(self):
def find_ctags_for_content(raw_content):
for (sha1, ctags) in SHA1_TO_CTAGS.items():
if OBJ_STORAGE_DATA[sha1] == raw_content:
return ctags
else:
raise ValueError(
("%r not found in objstorage, can't mock its ctags.") % raw_content
)
def fake_language(raw_content, *args, **kwargs):
ctags = find_ctags_for_content(raw_content)
return {"lang": ctags[0]["lang"]}
self._real_compute_language = swh.indexer.ctags.compute_language
swh.indexer.ctags.compute_language = fake_language
def fake_check_output(cmd, *args, **kwargs):
id_ = cmd[-1].split("/")[-1]
return "\n".join(
json.dumps({"language": ctag["lang"], **ctag})
for ctag in SHA1_TO_CTAGS[id_]
)
self._real_check_output = swh.indexer.ctags.subprocess.check_output
swh.indexer.ctags.subprocess.check_output = fake_check_output
def tearDown(self):
swh.indexer.ctags.compute_language = self._real_compute_language
swh.indexer.ctags.subprocess.check_output = self._real_check_output
super().tearDown()
def test_ctags_w_no_tool():
with pytest.raises(ValueError):
CtagsIndexer(config=filter_dict(CONFIG, "tools"))
diff --git a/swh/indexer/tests/test_fossology_license.py b/swh/indexer/tests/test_fossology_license.py
index 7a087f7..ed81b27 100644
--- a/swh/indexer/tests/test_fossology_license.py
+++ b/swh/indexer/tests/test_fossology_license.py
@@ -1,159 +1,163 @@
# Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import Any, Dict
import unittest
from unittest.mock import patch
import pytest
from swh.indexer import fossology_license
from swh.indexer.fossology_license import (
FossologyLicenseIndexer,
FossologyLicensePartitionIndexer,
compute_license,
)
from swh.indexer.storage.model import ContentLicenseRow
from swh.indexer.tests.utils import (
BASE_TEST_CONFIG,
SHA1_TO_LICENSES,
CommonContentIndexerPartitionTest,
CommonContentIndexerTest,
fill_obj_storage,
fill_storage,
filter_dict,
)
from swh.model.hashutil import hash_to_bytes
class BasicTest(unittest.TestCase):
@patch("swh.indexer.fossology_license.subprocess")
def test_compute_license(self, mock_subprocess):
- """Computing licenses from a raw content should return results
-
- """
+ """Computing licenses from a raw content should return results"""
for path, intermediary_result, output in [
(b"some/path", None, []),
(b"some/path/2", [], []),
(b"other/path", " contains license(s) GPL,AGPL", ["GPL", "AGPL"]),
]:
mock_subprocess.check_output.return_value = intermediary_result
actual_result = compute_license(path)
- self.assertEqual(actual_result, {"licenses": output, "path": path,})
+ self.assertEqual(
+ actual_result,
+ {
+ "licenses": output,
+ "path": path,
+ },
+ )
def mock_compute_license(path):
- """path is the content identifier
-
- """
+ """path is the content identifier"""
if isinstance(id, bytes):
path = path.decode("utf-8")
# path is something like /tmp/tmpXXX/<sha1> so we keep only the sha1 part
path = path.split("/")[-1]
return {"licenses": SHA1_TO_LICENSES.get(path, [])}
CONFIG = {
**BASE_TEST_CONFIG,
"workdir": "/tmp",
"tools": {
"name": "nomos",
"version": "3.1.0rc2-31-ga2cbb8c",
- "configuration": {"command_line": "nomossa <filepath>",},
+ "configuration": {
+ "command_line": "nomossa <filepath>",
+ },
},
} # type: Dict[str, Any]
RANGE_CONFIG = dict(list(CONFIG.items()) + [("write_batch_size", 100)])
class TestFossologyLicenseIndexer(CommonContentIndexerTest, unittest.TestCase):
"""Language indexer test scenarios:
- Known sha1s in the input list have their data indexed
- Unknown sha1 in the input list are not indexed
"""
def get_indexer_results(self, ids):
yield from self.idx_storage.content_fossology_license_get(ids)
def setUp(self):
super().setUp()
# replace actual license computation with a mock
self.orig_compute_license = fossology_license.compute_license
fossology_license.compute_license = mock_compute_license
self.indexer = FossologyLicenseIndexer(CONFIG)
self.indexer.catch_exceptions = False
self.idx_storage = self.indexer.idx_storage
fill_storage(self.indexer.storage)
fill_obj_storage(self.indexer.objstorage)
self.id0 = "01c9379dfc33803963d07c1ccc748d3fe4c96bb5"
self.id1 = "688a5ef812c53907562fe379d4b3851e69c7cb15"
self.id2 = "da39a3ee5e6b4b0d3255bfef95601890afd80709" # empty content
tool = {k.replace("tool_", ""): v for (k, v) in self.indexer.tool.items()}
# then
self.expected_results = [
*[
ContentLicenseRow(
id=hash_to_bytes(self.id0), tool=tool, license=license
)
for license in SHA1_TO_LICENSES[self.id0]
],
*[
ContentLicenseRow(
id=hash_to_bytes(self.id1), tool=tool, license=license
)
for license in SHA1_TO_LICENSES[self.id1]
],
*[], # self.id2
]
def tearDown(self):
super().tearDown()
fossology_license.compute_license = self.orig_compute_license
class TestFossologyLicensePartitionIndexer(
CommonContentIndexerPartitionTest, unittest.TestCase
):
"""Range Fossology License Indexer tests.
- new data within range are indexed
- no data outside a range are indexed
- with filtering existing indexed data prior to compute new index
- without filtering existing indexed data prior to compute new index
"""
def setUp(self):
super().setUp()
# replace actual license computation with a mock
self.orig_compute_license = fossology_license.compute_license
fossology_license.compute_license = mock_compute_license
self.indexer = FossologyLicensePartitionIndexer(config=RANGE_CONFIG)
self.indexer.catch_exceptions = False
fill_storage(self.indexer.storage)
fill_obj_storage(self.indexer.objstorage)
def tearDown(self):
super().tearDown()
fossology_license.compute_license = self.orig_compute_license
def test_fossology_w_no_tool():
with pytest.raises(ValueError):
FossologyLicenseIndexer(config=filter_dict(CONFIG, "tools"))
def test_fossology_range_w_no_tool():
with pytest.raises(ValueError):
FossologyLicensePartitionIndexer(config=filter_dict(RANGE_CONFIG, "tools"))
diff --git a/swh/indexer/tests/test_indexer.py b/swh/indexer/tests/test_indexer.py
index 6edaa91..767c63f 100644
--- a/swh/indexer/tests/test_indexer.py
+++ b/swh/indexer/tests/test_indexer.py
@@ -1,152 +1,155 @@
# Copyright (C) 2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import Any, Dict, Iterable, List, Optional
from unittest.mock import Mock
import pytest
from swh.indexer.indexer import (
ContentIndexer,
ContentPartitionIndexer,
OriginIndexer,
RevisionIndexer,
)
from swh.indexer.storage import PagedResult, Sha1
from swh.model.model import Content
from .utils import BASE_TEST_CONFIG
class _TestException(Exception):
pass
class CrashingIndexerMixin:
USE_TOOLS = False
def index(
self, id: Any, data: Optional[Any] = None, **kwargs
) -> List[Dict[str, Any]]:
raise _TestException()
def persist_index_computations(self, results) -> Dict[str, int]:
return {}
def indexed_contents_in_partition(
self, partition_id: int, nb_partitions: int
) -> Iterable[Sha1]:
raise _TestException()
class CrashingContentIndexer(CrashingIndexerMixin, ContentIndexer):
pass
class CrashingContentPartitionIndexer(CrashingIndexerMixin, ContentPartitionIndexer):
pass
class CrashingRevisionIndexer(CrashingIndexerMixin, RevisionIndexer):
pass
class CrashingOriginIndexer(CrashingIndexerMixin, OriginIndexer):
pass
class TrivialContentPartitionIndexer(ContentPartitionIndexer[str]):
USE_TOOLS = False
def index(self, id: bytes, data: Optional[bytes], **kwargs) -> List[str]:
return ["indexed " + id.decode()]
def indexed_contents_in_partition(
self, partition_id: int, nb_partitions: int
) -> Iterable[Sha1]:
return iter([b"excluded hash", b"other excluded hash"])
def persist_index_computations(self, results: List[str]) -> Dict[str, int]:
self._results.append(results) # type: ignore
return {"nb_added": len(results)}
def test_content_indexer_catch_exceptions():
indexer = CrashingContentIndexer(config=BASE_TEST_CONFIG)
indexer.objstorage = Mock()
indexer.objstorage.get.return_value = b"content"
assert indexer.run([b"foo"]) == {"status": "failed"}
indexer.catch_exceptions = False
with pytest.raises(_TestException):
indexer.run([b"foo"])
def test_revision_indexer_catch_exceptions():
indexer = CrashingRevisionIndexer(config=BASE_TEST_CONFIG)
indexer.storage = Mock()
indexer.storage.revision_get.return_value = ["rev"]
assert indexer.run([b"foo"]) == {"status": "failed"}
indexer.catch_exceptions = False
with pytest.raises(_TestException):
indexer.run([b"foo"])
def test_origin_indexer_catch_exceptions():
indexer = CrashingOriginIndexer(config=BASE_TEST_CONFIG)
assert indexer.run(["http://example.org"]) == {"status": "failed"}
indexer.catch_exceptions = False
with pytest.raises(_TestException):
indexer.run(["http://example.org"])
def test_content_partition_indexer_catch_exceptions():
indexer = CrashingContentPartitionIndexer(
config={**BASE_TEST_CONFIG, "write_batch_size": 42}
)
assert indexer.run(0, 42) == {"status": "failed"}
indexer.catch_exceptions = False
with pytest.raises(_TestException):
indexer.run(0, 42)
def test_content_partition_indexer():
# TODO: simplify the mocking in this test
indexer = TrivialContentPartitionIndexer(
- config={**BASE_TEST_CONFIG, "write_batch_size": 10,} # doesn't matter
+ config={
+ **BASE_TEST_CONFIG,
+ "write_batch_size": 10,
+ } # doesn't matter
)
indexer.catch_exceptions = False
indexer._results = []
indexer.storage = Mock()
indexer.storage.content_get_partition = lambda *args, **kwargs: PagedResult(
results=[
Content(sha1=c, sha1_git=c, sha256=c, blake2s256=c, length=42)
for c in [
b"hash1",
b"excluded hash",
b"hash2",
b"other excluded hash",
b"hash3",
]
],
next_page_token=None,
)
indexer.objstorage = Mock()
indexer.objstorage.get = lambda id: b"foo"
nb_partitions = 1
partition_id = 0
indexer.run(partition_id, nb_partitions)
assert indexer._results == [["indexed hash1", "indexed hash2", "indexed hash3"]]
diff --git a/swh/indexer/tests/test_journal_client.py b/swh/indexer/tests/test_journal_client.py
index b3e4140..5839180 100644
--- a/swh/indexer/tests/test_journal_client.py
+++ b/swh/indexer/tests/test_journal_client.py
@@ -1,116 +1,145 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import Dict, List
from unittest.mock import patch
import pytest
from swh.indexer.journal_client import process_journal_objects
from swh.scheduler.interface import SchedulerInterface
def search_tasks(indexer_scheduler: SchedulerInterface, task_type) -> List[Dict]:
tasks = indexer_scheduler.search_tasks(task_type=task_type)
keys_not_to_compare = ["next_run", "current_interval", "id", "priority", "status"]
result_tasks = []
for task in tasks:
task = dict(task)
for key in keys_not_to_compare:
del task[key]
result_tasks.append(task)
return result_tasks
@pytest.mark.parametrize(
"origin",
[
"file:///dev/zero", # current format
- {"url": "file:///dev/zero",}, # legacy format
+ {
+ "url": "file:///dev/zero",
+ }, # legacy format
],
)
def test_journal_client_origin_visit_status(origin, indexer_scheduler):
- messages = {"origin_visit_status": [{"status": "full", "origin": origin},]}
+ messages = {
+ "origin_visit_status": [
+ {"status": "full", "origin": origin},
+ ]
+ }
process_journal_objects(
messages,
scheduler=indexer_scheduler,
task_names={"origin_metadata": "index-origin-metadata"},
)
actual_tasks = search_tasks(indexer_scheduler, task_type="index-origin-metadata")
assert actual_tasks == [
{
- "arguments": {"kwargs": {}, "args": [["file:///dev/zero"]],},
+ "arguments": {
+ "kwargs": {},
+ "args": [["file:///dev/zero"]],
+ },
"policy": "oneshot",
"type": "index-origin-metadata",
"retries_left": 1,
}
]
def test_journal_client_one_origin_visit_batch(indexer_scheduler):
messages = {
"origin_visit_status": [
- {"status": "full", "origin": "file:///dev/zero",},
- {"status": "full", "origin": "file:///tmp/foobar",},
+ {
+ "status": "full",
+ "origin": "file:///dev/zero",
+ },
+ {
+ "status": "full",
+ "origin": "file:///tmp/foobar",
+ },
]
}
process_journal_objects(
messages,
scheduler=indexer_scheduler,
task_names={"origin_metadata": "index-origin-metadata"},
)
actual_tasks = search_tasks(indexer_scheduler, task_type="index-origin-metadata")
assert actual_tasks == [
{
"arguments": {
"kwargs": {},
"args": [["file:///dev/zero", "file:///tmp/foobar"]],
},
"policy": "oneshot",
"type": "index-origin-metadata",
"retries_left": 1,
}
]
@patch("swh.indexer.journal_client.MAX_ORIGINS_PER_TASK", 2)
def test_journal_client_origin_visit_batches(indexer_scheduler):
messages = {
"origin_visit_status": [
- {"status": "full", "origin": "file:///dev/zero",},
- {"status": "full", "origin": "file:///tmp/foobar",},
- {"status": "full", "origin": "file:///tmp/spamegg",},
+ {
+ "status": "full",
+ "origin": "file:///dev/zero",
+ },
+ {
+ "status": "full",
+ "origin": "file:///tmp/foobar",
+ },
+ {
+ "status": "full",
+ "origin": "file:///tmp/spamegg",
+ },
]
}
process_journal_objects(
messages,
scheduler=indexer_scheduler,
task_names={"origin_metadata": "index-origin-metadata"},
)
actual_tasks = search_tasks(indexer_scheduler, task_type="index-origin-metadata")
assert actual_tasks == [
{
"arguments": {
"kwargs": {},
- "args": [["file:///dev/zero", "file:///tmp/foobar"],],
+ "args": [
+ ["file:///dev/zero", "file:///tmp/foobar"],
+ ],
},
"policy": "oneshot",
"type": "index-origin-metadata",
"retries_left": 1,
},
{
- "arguments": {"kwargs": {}, "args": [["file:///tmp/spamegg"]],},
+ "arguments": {
+ "kwargs": {},
+ "args": [["file:///tmp/spamegg"]],
+ },
"policy": "oneshot",
"type": "index-origin-metadata",
"retries_left": 1,
},
]
diff --git a/swh/indexer/tests/test_metadata.py b/swh/indexer/tests/test_metadata.py
index afc1f31..fdc8119 100644
--- a/swh/indexer/tests/test_metadata.py
+++ b/swh/indexer/tests/test_metadata.py
@@ -1,1330 +1,1337 @@
# Copyright (C) 2017-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import json
import unittest
from hypothesis import HealthCheck, given, settings, strategies
from swh.indexer.codemeta import CODEMETA_TERMS
from swh.indexer.metadata import ContentMetadataIndexer, RevisionMetadataIndexer
from swh.indexer.metadata_detector import detect_metadata
from swh.indexer.metadata_dictionary import MAPPINGS
from swh.indexer.metadata_dictionary.maven import MavenMapping
from swh.indexer.metadata_dictionary.npm import NpmMapping
from swh.indexer.metadata_dictionary.ruby import GemspecMapping
from swh.indexer.storage.model import ContentMetadataRow, RevisionIntrinsicMetadataRow
from swh.indexer.tests.utils import DIRECTORY2, REVISION
from swh.model.hashutil import hash_to_bytes
from swh.model.model import Directory, DirectoryEntry, Revision
from .utils import (
BASE_TEST_CONFIG,
YARN_PARSER_METADATA,
fill_obj_storage,
fill_storage,
json_document_strategy,
xml_document_strategy,
)
TRANSLATOR_TOOL = {
"name": "swh-metadata-translator",
"version": "0.0.2",
"configuration": {"type": "local", "context": "NpmMapping"},
}
class ContentMetadataTestIndexer(ContentMetadataIndexer):
"""Specific Metadata whose configuration is enough to satisfy the
- indexing tests.
+ indexing tests.
"""
def parse_config_file(self, *args, **kwargs):
assert False, "should not be called; the rev indexer configures it."
REVISION_METADATA_CONFIG = {
**BASE_TEST_CONFIG,
"tools": TRANSLATOR_TOOL,
}
class Metadata(unittest.TestCase):
"""
Tests metadata_mock_tool tool for Metadata detection
"""
def setUp(self):
"""
shows the entire diff in the results
"""
self.maxDiff = None
self.npm_mapping = MAPPINGS["NpmMapping"]()
self.codemeta_mapping = MAPPINGS["CodemetaMapping"]()
self.maven_mapping = MAPPINGS["MavenMapping"]()
self.pkginfo_mapping = MAPPINGS["PythonPkginfoMapping"]()
self.gemspec_mapping = MAPPINGS["GemspecMapping"]()
self.cff_mapping = MAPPINGS["CffMapping"]()
def test_compute_metadata_none(self):
"""
testing content empty content is empty
should return None
"""
# given
content = b""
# None if no metadata was found or an error occurred
declared_metadata = None
# when
result = self.npm_mapping.translate(content)
# then
self.assertEqual(declared_metadata, result)
def test_compute_metadata_cff(self):
"""
testing CITATION.cff translation
"""
# given
content = """# YAML 1.2
---
abstract: "Command line program to convert from Citation File \
Format to various other formats such as BibTeX, EndNote, RIS, \
schema.org, CodeMeta, and .zenodo.json."
authors:
-
affiliation: "Netherlands eScience Center"
family-names: Klaver
given-names: Tom
-
affiliation: "Humboldt-Universität zu Berlin"
family-names: Druskat
given-names: Stephan
orcid: https://orcid.org/0000-0003-4925-7248
cff-version: "1.0.3"
date-released: 2019-11-12
doi: 10.5281/zenodo.1162057
keywords:
- "citation"
- "bibliography"
- "cff"
- "CITATION.cff"
license: Apache-2.0
message: "If you use this software, please cite it using these metadata."
repository-code: "https://github.com/citation-file-format/cff-converter-python"
title: cffconvert
version: "1.4.0-alpha0"
""".encode(
"utf-8"
)
expected = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"author": [
{
"type": "Person",
"affiliation": {
"type": "Organization",
"name": "Netherlands eScience Center",
},
"familyName": "Klaver",
"givenName": "Tom",
},
{
"id": "https://orcid.org/0000-0003-4925-7248",
"type": "Person",
"affiliation": {
"type": "Organization",
"name": "Humboldt-Universität zu Berlin",
},
"familyName": "Druskat",
"givenName": "Stephan",
},
],
"codeRepository": (
"https://github.com/citation-file-format/cff-converter-python"
),
"datePublished": "2019-11-12",
"description": """Command line program to convert from \
Citation File Format to various other formats such as BibTeX, EndNote, \
RIS, schema.org, CodeMeta, and .zenodo.json.""",
"identifier": "https://doi.org/10.5281/zenodo.1162057",
"keywords": ["citation", "bibliography", "cff", "CITATION.cff"],
"license": "https://spdx.org/licenses/Apache-2.0",
"version": "1.4.0-alpha0",
}
# when
result = self.cff_mapping.translate(content)
# then
self.assertEqual(expected, result)
def test_compute_metadata_npm(self):
"""
testing only computation of metadata with hard_mapping_npm
"""
# given
content = b"""
{
"name": "test_metadata",
"version": "0.0.2",
"description": "Simple package.json test for indexer",
"repository": {
"type": "git",
"url": "https://github.com/moranegg/metadata_test"
},
"author": {
"email": "moranegg@example.com",
"name": "Morane G"
}
}
"""
declared_metadata = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"name": "test_metadata",
"version": "0.0.2",
"description": "Simple package.json test for indexer",
"codeRepository": "git+https://github.com/moranegg/metadata_test",
"author": [
- {"type": "Person", "name": "Morane G", "email": "moranegg@example.com",}
+ {
+ "type": "Person",
+ "name": "Morane G",
+ "email": "moranegg@example.com",
+ }
],
}
# when
result = self.npm_mapping.translate(content)
# then
self.assertEqual(declared_metadata, result)
def test_index_content_metadata_npm(self):
"""
testing NPM with package.json
- one sha1 uses a file that can't be translated to metadata and
should return None in the translated metadata
"""
# given
sha1s = [
hash_to_bytes("26a9f72a7c87cc9205725cfd879f514ff4f3d8d5"),
hash_to_bytes("d4c647f0fc257591cc9ba1722484229780d1c607"),
hash_to_bytes("02fb2c89e14f7fab46701478c83779c7beb7b069"),
]
# this metadata indexer computes only metadata for package.json
# in npm context with a hard mapping
config = BASE_TEST_CONFIG.copy()
config["tools"] = [TRANSLATOR_TOOL]
metadata_indexer = ContentMetadataTestIndexer(config=config)
fill_obj_storage(metadata_indexer.objstorage)
fill_storage(metadata_indexer.storage)
# when
metadata_indexer.run(sha1s)
results = list(metadata_indexer.idx_storage.content_metadata_get(sha1s))
expected_results = [
ContentMetadataRow(
id=hash_to_bytes("26a9f72a7c87cc9205725cfd879f514ff4f3d8d5"),
tool=TRANSLATOR_TOOL,
metadata={
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"codeRepository": "git+https://github.com/moranegg/metadata_test",
"description": "Simple package.json test for indexer",
"name": "test_metadata",
"version": "0.0.1",
},
),
ContentMetadataRow(
id=hash_to_bytes("d4c647f0fc257591cc9ba1722484229780d1c607"),
tool=TRANSLATOR_TOOL,
metadata={
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"issueTracker": "https://github.com/npm/npm/issues",
"author": [
{
"type": "Person",
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
"url": "http://blog.izs.me",
}
],
"codeRepository": "git+https://github.com/npm/npm",
"description": "a package manager for JavaScript",
"license": "https://spdx.org/licenses/Artistic-2.0",
"version": "5.0.3",
"name": "npm",
"keywords": [
"install",
"modules",
"package manager",
"package.json",
],
"url": "https://docs.npmjs.com/",
},
),
]
for result in results:
del result.tool["id"]
# The assertion below returns False sometimes because of nested lists
self.assertEqual(expected_results, results)
def test_npm_bugs_normalization(self):
# valid dictionary
package_json = b"""{
"name": "foo",
"bugs": {
"url": "https://github.com/owner/project/issues",
"email": "foo@example.com"
}
}"""
result = self.npm_mapping.translate(package_json)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"name": "foo",
"issueTracker": "https://github.com/owner/project/issues",
"type": "SoftwareSourceCode",
},
)
# "invalid" dictionary
package_json = b"""{
"name": "foo",
"bugs": {
"email": "foo@example.com"
}
}"""
result = self.npm_mapping.translate(package_json)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"name": "foo",
"type": "SoftwareSourceCode",
},
)
# string
package_json = b"""{
"name": "foo",
"bugs": "https://github.com/owner/project/issues"
}"""
result = self.npm_mapping.translate(package_json)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"name": "foo",
"issueTracker": "https://github.com/owner/project/issues",
"type": "SoftwareSourceCode",
},
)
def test_npm_repository_normalization(self):
# normal
package_json = b"""{
"name": "foo",
"repository": {
"type" : "git",
"url" : "https://github.com/npm/cli.git"
}
}"""
result = self.npm_mapping.translate(package_json)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"name": "foo",
"codeRepository": "git+https://github.com/npm/cli.git",
"type": "SoftwareSourceCode",
},
)
# missing url
package_json = b"""{
"name": "foo",
"repository": {
"type" : "git"
}
}"""
result = self.npm_mapping.translate(package_json)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"name": "foo",
"type": "SoftwareSourceCode",
},
)
# github shortcut
package_json = b"""{
"name": "foo",
"repository": "github:npm/cli"
}"""
result = self.npm_mapping.translate(package_json)
expected_result = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"name": "foo",
"codeRepository": "git+https://github.com/npm/cli.git",
"type": "SoftwareSourceCode",
}
self.assertEqual(result, expected_result)
# github shortshortcut
package_json = b"""{
"name": "foo",
"repository": "npm/cli"
}"""
result = self.npm_mapping.translate(package_json)
self.assertEqual(result, expected_result)
# gitlab shortcut
package_json = b"""{
"name": "foo",
"repository": "gitlab:user/repo"
}"""
result = self.npm_mapping.translate(package_json)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"name": "foo",
"codeRepository": "git+https://gitlab.com/user/repo.git",
"type": "SoftwareSourceCode",
},
)
def test_detect_metadata_package_json(self):
# given
df = [
{
"sha1_git": b"abc",
"name": b"index.js",
"target": b"abc",
"length": 897,
"status": "visible",
"type": "file",
"perms": 33188,
"dir_id": b"dir_a",
"sha1": b"bcd",
},
{
"sha1_git": b"aab",
"name": b"package.json",
"target": b"aab",
"length": 712,
"status": "visible",
"type": "file",
"perms": 33188,
"dir_id": b"dir_a",
"sha1": b"cde",
},
]
# when
results = detect_metadata(df)
expected_results = {"NpmMapping": [b"cde"]}
# then
self.assertEqual(expected_results, results)
def test_detect_metadata_codemeta_json_uppercase(self):
# given
df = [
{
"sha1_git": b"abc",
"name": b"index.html",
"target": b"abc",
"length": 897,
"status": "visible",
"type": "file",
"perms": 33188,
"dir_id": b"dir_a",
"sha1": b"bcd",
},
{
"sha1_git": b"aab",
"name": b"CODEMETA.json",
"target": b"aab",
"length": 712,
"status": "visible",
"type": "file",
"perms": 33188,
"dir_id": b"dir_a",
"sha1": b"bcd",
},
]
# when
results = detect_metadata(df)
expected_results = {"CodemetaMapping": [b"bcd"]}
# then
self.assertEqual(expected_results, results)
def test_compute_metadata_valid_codemeta(self):
raw_content = b"""{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"@type": "SoftwareSourceCode",
"identifier": "CodeMeta",
"description": "CodeMeta is a concept vocabulary that can be used to standardize the exchange of software metadata across repositories and organizations.",
"name": "CodeMeta: Minimal metadata schemas for science software and code, in JSON-LD",
"codeRepository": "https://github.com/codemeta/codemeta",
"issueTracker": "https://github.com/codemeta/codemeta/issues",
"license": "https://spdx.org/licenses/Apache-2.0",
"version": "2.0",
"author": [
{
"@type": "Person",
"givenName": "Carl",
"familyName": "Boettiger",
"email": "cboettig@gmail.com",
"@id": "http://orcid.org/0000-0002-1642-628X"
},
{
"@type": "Person",
"givenName": "Matthew B.",
"familyName": "Jones",
"email": "jones@nceas.ucsb.edu",
"@id": "http://orcid.org/0000-0003-0077-4738"
}
],
"maintainer": {
"@type": "Person",
"givenName": "Carl",
"familyName": "Boettiger",
"email": "cboettig@gmail.com",
"@id": "http://orcid.org/0000-0002-1642-628X"
},
"contIntegration": "https://travis-ci.org/codemeta/codemeta",
"developmentStatus": "active",
"downloadUrl": "https://github.com/codemeta/codemeta/archive/2.0.zip",
"funder": {
"@id": "https://doi.org/10.13039/100000001",
"@type": "Organization",
"name": "National Science Foundation"
},
"funding":"1549758; Codemeta: A Rosetta Stone for Metadata in Scientific Software",
"keywords": [
"metadata",
"software"
],
"version":"2.0",
"dateCreated":"2017-06-05",
"datePublished":"2017-06-05",
"programmingLanguage": "JSON-LD"
}""" # noqa
expected_result = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"identifier": "CodeMeta",
"description": "CodeMeta is a concept vocabulary that can "
"be used to standardize the exchange of software metadata "
"across repositories and organizations.",
"name": "CodeMeta: Minimal metadata schemas for science "
"software and code, in JSON-LD",
"codeRepository": "https://github.com/codemeta/codemeta",
"issueTracker": "https://github.com/codemeta/codemeta/issues",
"license": "https://spdx.org/licenses/Apache-2.0",
"version": "2.0",
"author": [
{
"type": "Person",
"givenName": "Carl",
"familyName": "Boettiger",
"email": "cboettig@gmail.com",
"id": "http://orcid.org/0000-0002-1642-628X",
},
{
"type": "Person",
"givenName": "Matthew B.",
"familyName": "Jones",
"email": "jones@nceas.ucsb.edu",
"id": "http://orcid.org/0000-0003-0077-4738",
},
],
"maintainer": {
"type": "Person",
"givenName": "Carl",
"familyName": "Boettiger",
"email": "cboettig@gmail.com",
"id": "http://orcid.org/0000-0002-1642-628X",
},
"contIntegration": "https://travis-ci.org/codemeta/codemeta",
"developmentStatus": "active",
"downloadUrl": "https://github.com/codemeta/codemeta/archive/2.0.zip",
"funder": {
"id": "https://doi.org/10.13039/100000001",
"type": "Organization",
"name": "National Science Foundation",
},
"funding": "1549758; Codemeta: A Rosetta Stone for Metadata "
"in Scientific Software",
"keywords": ["metadata", "software"],
"version": "2.0",
"dateCreated": "2017-06-05",
"datePublished": "2017-06-05",
"programmingLanguage": "JSON-LD",
}
result = self.codemeta_mapping.translate(raw_content)
self.assertEqual(result, expected_result)
def test_compute_metadata_codemeta_alternate_context(self):
raw_content = b"""{
"@context": "https://raw.githubusercontent.com/codemeta/codemeta/master/codemeta.jsonld",
"@type": "SoftwareSourceCode",
"identifier": "CodeMeta"
}""" # noqa
expected_result = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"identifier": "CodeMeta",
}
result = self.codemeta_mapping.translate(raw_content)
self.assertEqual(result, expected_result)
def test_compute_metadata_maven(self):
raw_content = b"""
<project>
<name>Maven Default Project</name>
<modelVersion>4.0.0</modelVersion>
<groupId>com.mycompany.app</groupId>
<artifactId>my-app</artifactId>
<version>1.2.3</version>
<repositories>
<repository>
<id>central</id>
<name>Maven Repository Switchboard</name>
<layout>default</layout>
<url>http://repo1.maven.org/maven2</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
</repositories>
<licenses>
<license>
<name>Apache License, Version 2.0</name>
<url>https://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
<comments>A business-friendly OSS license</comments>
</license>
</licenses>
</project>"""
result = self.maven_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"name": "Maven Default Project",
"identifier": "com.mycompany.app",
"version": "1.2.3",
"license": "https://www.apache.org/licenses/LICENSE-2.0.txt",
"codeRepository": (
"http://repo1.maven.org/maven2/com/mycompany/app/my-app"
),
},
)
def test_compute_metadata_maven_empty(self):
raw_content = b"""
<project>
</project>"""
result = self.maven_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
},
)
def test_compute_metadata_maven_almost_empty(self):
raw_content = b"""
<project>
<foo/>
</project>"""
result = self.maven_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
},
)
def test_compute_metadata_maven_invalid_xml(self):
expected_warning = (
"WARNING:swh.indexer.metadata_dictionary.maven.MavenMapping:"
"Error parsing XML from foo"
)
raw_content = b"""
<project>"""
with self.assertLogs("swh.indexer.metadata_dictionary", level="WARNING") as cm:
result = MAPPINGS["MavenMapping"]("foo").translate(raw_content)
self.assertEqual(cm.output, [expected_warning])
self.assertEqual(result, None)
raw_content = b"""
"""
with self.assertLogs("swh.indexer.metadata_dictionary", level="WARNING") as cm:
result = MAPPINGS["MavenMapping"]("foo").translate(raw_content)
self.assertEqual(cm.output, [expected_warning])
self.assertEqual(result, None)
def test_compute_metadata_maven_unknown_encoding(self):
expected_warning = (
"WARNING:swh.indexer.metadata_dictionary.maven.MavenMapping:"
"Error detecting XML encoding from foo"
)
raw_content = b"""<?xml version="1.0" encoding="foo"?>
<project>
</project>"""
with self.assertLogs("swh.indexer.metadata_dictionary", level="WARNING") as cm:
result = MAPPINGS["MavenMapping"]("foo").translate(raw_content)
self.assertEqual(cm.output, [expected_warning])
self.assertEqual(result, None)
raw_content = b"""<?xml version="1.0" encoding="UTF-7"?>
<project>
</project>"""
with self.assertLogs("swh.indexer.metadata_dictionary", level="WARNING") as cm:
result = MAPPINGS["MavenMapping"]("foo").translate(raw_content)
self.assertEqual(cm.output, [expected_warning])
self.assertEqual(result, None)
def test_compute_metadata_maven_invalid_encoding(self):
expected_warning = [
# libexpat1 <= 2.2.10-2+deb11u1
[
(
"WARNING:swh.indexer.metadata_dictionary.maven.MavenMapping:"
"Error unidecoding XML from foo"
)
],
# libexpat1 >= 2.2.10-2+deb11u2
[
(
"WARNING:swh.indexer.metadata_dictionary.maven.MavenMapping:"
"Error parsing XML from foo"
)
],
]
raw_content = b"""<?xml version="1.0" encoding="UTF-8"?>
<foo\xe5ct>
</foo>"""
with self.assertLogs("swh.indexer.metadata_dictionary", level="WARNING") as cm:
result = MAPPINGS["MavenMapping"]("foo").translate(raw_content)
self.assertIn(cm.output, expected_warning)
self.assertEqual(result, None)
def test_compute_metadata_maven_minimal(self):
raw_content = b"""
<project>
<name>Maven Default Project</name>
<modelVersion>4.0.0</modelVersion>
<groupId>com.mycompany.app</groupId>
<artifactId>my-app</artifactId>
<version>1.2.3</version>
</project>"""
result = self.maven_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"name": "Maven Default Project",
"identifier": "com.mycompany.app",
"version": "1.2.3",
"codeRepository": (
"https://repo.maven.apache.org/maven2/com/mycompany/app/my-app"
),
},
)
def test_compute_metadata_maven_empty_nodes(self):
raw_content = b"""
<project>
<name>Maven Default Project</name>
<modelVersion>4.0.0</modelVersion>
<groupId>com.mycompany.app</groupId>
<artifactId>my-app</artifactId>
<version>1.2.3</version>
<repositories>
</repositories>
</project>"""
result = self.maven_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"name": "Maven Default Project",
"identifier": "com.mycompany.app",
"version": "1.2.3",
"codeRepository": (
"https://repo.maven.apache.org/maven2/com/mycompany/app/my-app"
),
},
)
raw_content = b"""
<project>
<name>Maven Default Project</name>
<modelVersion>4.0.0</modelVersion>
<groupId>com.mycompany.app</groupId>
<artifactId>my-app</artifactId>
<version></version>
</project>"""
result = self.maven_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"name": "Maven Default Project",
"identifier": "com.mycompany.app",
"codeRepository": (
"https://repo.maven.apache.org/maven2/com/mycompany/app/my-app"
),
},
)
raw_content = b"""
<project>
<name></name>
<modelVersion>4.0.0</modelVersion>
<groupId>com.mycompany.app</groupId>
<artifactId>my-app</artifactId>
<version>1.2.3</version>
</project>"""
result = self.maven_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"identifier": "com.mycompany.app",
"version": "1.2.3",
"codeRepository": (
"https://repo.maven.apache.org/maven2/com/mycompany/app/my-app"
),
},
)
raw_content = b"""
<project>
<name>Maven Default Project</name>
<modelVersion>4.0.0</modelVersion>
<groupId>com.mycompany.app</groupId>
<artifactId>my-app</artifactId>
<version>1.2.3</version>
<licenses>
</licenses>
</project>"""
result = self.maven_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"name": "Maven Default Project",
"identifier": "com.mycompany.app",
"version": "1.2.3",
"codeRepository": (
"https://repo.maven.apache.org/maven2/com/mycompany/app/my-app"
),
},
)
raw_content = b"""
<project>
<groupId></groupId>
<version>1.2.3</version>
</project>"""
result = self.maven_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"version": "1.2.3",
},
)
def test_compute_metadata_maven_invalid_licenses(self):
raw_content = b"""
<project>
<name>Maven Default Project</name>
<modelVersion>4.0.0</modelVersion>
<groupId>com.mycompany.app</groupId>
<artifactId>my-app</artifactId>
<version>1.2.3</version>
<licenses>
foo
</licenses>
</project>"""
result = self.maven_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"name": "Maven Default Project",
"identifier": "com.mycompany.app",
"version": "1.2.3",
"codeRepository": (
"https://repo.maven.apache.org/maven2/com/mycompany/app/my-app"
),
},
)
def test_compute_metadata_maven_multiple(self):
"""Tests when there are multiple code repos and licenses."""
raw_content = b"""
<project>
<name>Maven Default Project</name>
<modelVersion>4.0.0</modelVersion>
<groupId>com.mycompany.app</groupId>
<artifactId>my-app</artifactId>
<version>1.2.3</version>
<repositories>
<repository>
<id>central</id>
<name>Maven Repository Switchboard</name>
<layout>default</layout>
<url>http://repo1.maven.org/maven2</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
<repository>
<id>example</id>
<name>Example Maven Repo</name>
<layout>default</layout>
<url>http://example.org/maven2</url>
</repository>
</repositories>
<licenses>
<license>
<name>Apache License, Version 2.0</name>
<url>https://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
<comments>A business-friendly OSS license</comments>
</license>
<license>
<name>MIT license</name>
<url>https://opensource.org/licenses/MIT</url>
</license>
</licenses>
</project>"""
result = self.maven_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"name": "Maven Default Project",
"identifier": "com.mycompany.app",
"version": "1.2.3",
"license": [
"https://www.apache.org/licenses/LICENSE-2.0.txt",
"https://opensource.org/licenses/MIT",
],
"codeRepository": [
"http://repo1.maven.org/maven2/com/mycompany/app/my-app",
"http://example.org/maven2/com/mycompany/app/my-app",
],
},
)
def test_compute_metadata_pkginfo(self):
raw_content = b"""\
Metadata-Version: 2.1
Name: swh.core
Version: 0.0.49
Summary: Software Heritage core utilities
Home-page: https://forge.softwareheritage.org/diffusion/DCORE/
Author: Software Heritage developers
Author-email: swh-devel@inria.fr
License: UNKNOWN
Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest
Project-URL: Funding, https://www.softwareheritage.org/donate
Project-URL: Source, https://forge.softwareheritage.org/source/swh-core
Description: swh-core
========
\x20
core library for swh's modules:
- config parser
- hash computations
- serialization
- logging mechanism
\x20
Platform: UNKNOWN
Classifier: Programming Language :: Python :: 3
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
Classifier: Operating System :: OS Independent
Classifier: Development Status :: 5 - Production/Stable
Description-Content-Type: text/markdown
Provides-Extra: testing
""" # noqa
result = self.pkginfo_mapping.translate(raw_content)
self.assertCountEqual(
result["description"],
[
"Software Heritage core utilities", # note the comma here
"swh-core\n"
"========\n"
"\n"
"core library for swh's modules:\n"
"- config parser\n"
"- hash computations\n"
"- serialization\n"
"- logging mechanism\n"
"",
],
result,
)
del result["description"]
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"url": "https://forge.softwareheritage.org/diffusion/DCORE/",
"name": "swh.core",
"author": [
{
"type": "Person",
"name": "Software Heritage developers",
"email": "swh-devel@inria.fr",
}
],
"version": "0.0.49",
},
)
def test_compute_metadata_pkginfo_utf8(self):
raw_content = b"""\
Metadata-Version: 1.1
Name: snowpyt
Description-Content-Type: UNKNOWN
Description: foo
Hydrology N\xc2\xb083
""" # noqa
result = self.pkginfo_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"name": "snowpyt",
"description": "foo\nHydrology N°83",
},
)
def test_compute_metadata_pkginfo_keywords(self):
raw_content = b"""\
Metadata-Version: 2.1
Name: foo
Keywords: foo bar baz
""" # noqa
result = self.pkginfo_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"name": "foo",
"keywords": ["foo", "bar", "baz"],
},
)
def test_compute_metadata_pkginfo_license(self):
raw_content = b"""\
Metadata-Version: 2.1
Name: foo
License: MIT
""" # noqa
result = self.pkginfo_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"name": "foo",
"license": "MIT",
},
)
def test_gemspec_base(self):
raw_content = b"""
Gem::Specification.new do |s|
s.name = 'example'
s.version = '0.1.0'
s.licenses = ['MIT']
s.summary = "This is an example!"
s.description = "Much longer explanation of the example!"
s.authors = ["Ruby Coder"]
s.email = 'rubycoder@example.com'
s.files = ["lib/example.rb"]
s.homepage = 'https://rubygems.org/gems/example'
s.metadata = { "source_code_uri" => "https://github.com/example/example" }
end"""
result = self.gemspec_mapping.translate(raw_content)
self.assertCountEqual(
result.pop("description"),
["This is an example!", "Much longer explanation of the example!"],
)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"author": [{"type": "Person", "name": "Ruby Coder"}],
"name": "example",
"license": "https://spdx.org/licenses/MIT",
"codeRepository": "https://rubygems.org/gems/example",
"email": "rubycoder@example.com",
"version": "0.1.0",
},
)
def test_gemspec_two_author_fields(self):
raw_content = b"""
Gem::Specification.new do |s|
s.authors = ["Ruby Coder1"]
s.author = "Ruby Coder2"
end"""
result = self.gemspec_mapping.translate(raw_content)
self.assertCountEqual(
result.pop("author"),
[
{"type": "Person", "name": "Ruby Coder1"},
{"type": "Person", "name": "Ruby Coder2"},
],
)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
},
)
def test_gemspec_invalid_author(self):
raw_content = b"""
Gem::Specification.new do |s|
s.author = ["Ruby Coder"]
end"""
result = self.gemspec_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
},
)
raw_content = b"""
Gem::Specification.new do |s|
s.author = "Ruby Coder1",
end"""
result = self.gemspec_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
},
)
raw_content = b"""
Gem::Specification.new do |s|
s.authors = ["Ruby Coder1", ["Ruby Coder2"]]
end"""
result = self.gemspec_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"author": [{"type": "Person", "name": "Ruby Coder1"}],
},
)
def test_gemspec_alternative_header(self):
raw_content = b"""
require './lib/version'
Gem::Specification.new { |s|
s.name = 'rb-system-with-aliases'
s.summary = 'execute system commands with aliases'
}
"""
result = self.gemspec_mapping.translate(raw_content)
self.assertEqual(
result,
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"name": "rb-system-with-aliases",
"description": "execute system commands with aliases",
},
)
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(json_document_strategy(keys=list(NpmMapping.mapping)))
def test_npm_adversarial(self, doc):
raw = json.dumps(doc).encode()
self.npm_mapping.translate(raw)
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(json_document_strategy(keys=CODEMETA_TERMS))
def test_codemeta_adversarial(self, doc):
raw = json.dumps(doc).encode()
self.codemeta_mapping.translate(raw)
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
xml_document_strategy(
keys=list(MavenMapping.mapping),
root="project",
xmlns="http://maven.apache.org/POM/4.0.0",
)
)
def test_maven_adversarial(self, doc):
self.maven_mapping.translate(doc)
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
strategies.dictionaries(
# keys
strategies.one_of(
strategies.text(), *map(strategies.just, GemspecMapping.mapping)
),
# values
strategies.recursive(
strategies.characters(),
lambda children: strategies.lists(children, min_size=1),
),
)
)
def test_gemspec_adversarial(self, doc):
parts = [b"Gem::Specification.new do |s|\n"]
for (k, v) in doc.items():
parts.append(" s.{} = {}\n".format(k, repr(v)).encode())
parts.append(b"end\n")
self.gemspec_mapping.translate(b"".join(parts))
def test_revision_metadata_indexer(self):
metadata_indexer = RevisionMetadataIndexer(config=REVISION_METADATA_CONFIG)
fill_obj_storage(metadata_indexer.objstorage)
fill_storage(metadata_indexer.storage)
tool = metadata_indexer.idx_storage.indexer_configuration_get(
{f"tool_{k}": v for (k, v) in TRANSLATOR_TOOL.items()}
)
assert tool is not None
rev = REVISION
assert rev.directory == DIRECTORY2.id
metadata_indexer.idx_storage.content_metadata_add(
[
ContentMetadataRow(
id=DIRECTORY2.entries[0].target,
indexer_configuration_id=tool["id"],
metadata=YARN_PARSER_METADATA,
)
]
)
metadata_indexer.run([rev.id])
results = list(
metadata_indexer.idx_storage.revision_intrinsic_metadata_get([REVISION.id])
)
expected_results = [
RevisionIntrinsicMetadataRow(
id=rev.id,
tool=TRANSLATOR_TOOL,
metadata=YARN_PARSER_METADATA,
mappings=["npm"],
)
]
for result in results:
del result.tool["id"]
# then
self.assertEqual(results, expected_results)
def test_revision_metadata_indexer_single_root_dir(self):
metadata_indexer = RevisionMetadataIndexer(config=REVISION_METADATA_CONFIG)
fill_obj_storage(metadata_indexer.objstorage)
fill_storage(metadata_indexer.storage)
# Add a parent directory, that is the only directory at the root
# of the revision
rev = REVISION
assert rev.directory == DIRECTORY2.id
directory = Directory(
entries=(
DirectoryEntry(
- name=b"foobar-1.0.0", type="dir", target=rev.directory, perms=16384,
+ name=b"foobar-1.0.0",
+ type="dir",
+ target=rev.directory,
+ perms=16384,
),
),
)
assert directory.id is not None
metadata_indexer.storage.directory_add([directory])
new_rev_dict = {**rev.to_dict(), "directory": directory.id}
new_rev_dict.pop("id")
new_rev = Revision.from_dict(new_rev_dict)
metadata_indexer.storage.revision_add([new_rev])
tool = metadata_indexer.idx_storage.indexer_configuration_get(
{f"tool_{k}": v for (k, v) in TRANSLATOR_TOOL.items()}
)
assert tool is not None
metadata_indexer.idx_storage.content_metadata_add(
[
ContentMetadataRow(
id=DIRECTORY2.entries[0].target,
indexer_configuration_id=tool["id"],
metadata=YARN_PARSER_METADATA,
)
]
)
metadata_indexer.run([new_rev.id])
results = list(
metadata_indexer.idx_storage.revision_intrinsic_metadata_get([new_rev.id])
)
expected_results = [
RevisionIntrinsicMetadataRow(
id=new_rev.id,
tool=TRANSLATOR_TOOL,
metadata=YARN_PARSER_METADATA,
mappings=["npm"],
)
]
for result in results:
del result.tool["id"]
# then
self.assertEqual(results, expected_results)
diff --git a/swh/indexer/tests/test_origin_head.py b/swh/indexer/tests/test_origin_head.py
index 67b5b05..1712fe3 100644
--- a/swh/indexer/tests/test_origin_head.py
+++ b/swh/indexer/tests/test_origin_head.py
@@ -1,176 +1,216 @@
# Copyright (C) 2017-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import copy
from datetime import datetime, timezone
import unittest
import pytest
from swh.indexer.origin_head import OriginHeadIndexer
from swh.indexer.tests.utils import fill_storage
from swh.model.model import (
Origin,
OriginVisit,
OriginVisitStatus,
Snapshot,
SnapshotBranch,
TargetType,
)
from swh.storage.utils import now
@pytest.fixture
def swh_indexer_config(swh_indexer_config):
config = copy.deepcopy(swh_indexer_config)
config.update(
{
"tools": {
"name": "origin-metadata",
"version": "0.0.1",
"configuration": {},
},
"tasks": {
"revision_intrinsic_metadata": None,
"origin_intrinsic_metadata": None,
},
}
)
return config
class OriginHeadTestIndexer(OriginHeadIndexer):
"""Specific indexer whose configuration is enough to satisfy the
- indexing tests.
+ indexing tests.
"""
def persist_index_computations(self, results):
self.results = results
SAMPLE_SNAPSHOT = Snapshot(
branches={
b"foo": None,
- b"HEAD": SnapshotBranch(target_type=TargetType.ALIAS, target=b"foo",),
+ b"HEAD": SnapshotBranch(
+ target_type=TargetType.ALIAS,
+ target=b"foo",
+ ),
},
)
class OriginHead(unittest.TestCase):
@pytest.fixture(autouse=True)
def init(self, swh_config):
super().setUp()
self.indexer = OriginHeadTestIndexer()
self.indexer.catch_exceptions = False
fill_storage(self.indexer.storage)
def test_git(self):
origin_url = "https://github.com/SoftwareHeritage/swh-storage"
self.indexer.run([origin_url])
rev_id = b"8K\x12\x00d\x03\xcc\xe4]bS\xe3\x8f{\xd7}\xac\xefrm"
self.assertEqual(
- self.indexer.results, [{"revision_id": rev_id, "origin_url": origin_url,}],
+ self.indexer.results,
+ [
+ {
+ "revision_id": rev_id,
+ "origin_url": origin_url,
+ }
+ ],
)
def test_git_partial_snapshot(self):
"""Checks partial snapshots are ignored."""
origin_url = "https://github.com/SoftwareHeritage/swh-core"
self.indexer.storage.origin_add([Origin(url=origin_url)])
visit = self.indexer.storage.origin_visit_add(
[
OriginVisit(
origin=origin_url,
date=datetime(2019, 2, 27, tzinfo=timezone.utc),
type="git",
)
]
)[0]
self.indexer.storage.snapshot_add([SAMPLE_SNAPSHOT])
visit_status = OriginVisitStatus(
origin=origin_url,
visit=visit.visit,
date=now(),
status="partial",
snapshot=SAMPLE_SNAPSHOT.id,
)
self.indexer.storage.origin_visit_status_add([visit_status])
self.indexer.run([origin_url])
self.assertEqual(self.indexer.results, [])
def test_vcs_missing_snapshot(self):
origin_url = "https://github.com/SoftwareHeritage/swh-indexer"
self.indexer.storage.origin_add([Origin(url=origin_url)])
self.indexer.run([origin_url])
self.assertEqual(self.indexer.results, [])
def test_pypi_missing_branch(self):
origin_url = "https://pypi.org/project/abcdef/"
- self.indexer.storage.origin_add([Origin(url=origin_url,)])
+ self.indexer.storage.origin_add(
+ [
+ Origin(
+ url=origin_url,
+ )
+ ]
+ )
visit = self.indexer.storage.origin_visit_add(
[
OriginVisit(
origin=origin_url,
date=datetime(2019, 2, 27, tzinfo=timezone.utc),
type="pypi",
)
]
)[0]
self.indexer.storage.snapshot_add([SAMPLE_SNAPSHOT])
visit_status = OriginVisitStatus(
origin=origin_url,
visit=visit.visit,
date=now(),
status="full",
snapshot=SAMPLE_SNAPSHOT.id,
)
self.indexer.storage.origin_visit_status_add([visit_status])
self.indexer.run(["https://pypi.org/project/abcdef/"])
self.assertEqual(self.indexer.results, [])
def test_ftp(self):
origin_url = "rsync://ftp.gnu.org/gnu/3dldf"
self.indexer.run([origin_url])
rev_id = b"\x8e\xa9\x8e/\xea}\x9feF\xf4\x9f\xfd\xee\xcc\x1a\xb4`\x8c\x8by"
self.assertEqual(
- self.indexer.results, [{"revision_id": rev_id, "origin_url": origin_url,}],
+ self.indexer.results,
+ [
+ {
+ "revision_id": rev_id,
+ "origin_url": origin_url,
+ }
+ ],
)
def test_ftp_missing_snapshot(self):
origin_url = "rsync://ftp.gnu.org/gnu/foobar"
self.indexer.storage.origin_add([Origin(url=origin_url)])
self.indexer.run([origin_url])
self.assertEqual(self.indexer.results, [])
def test_deposit(self):
origin_url = "https://forge.softwareheritage.org/source/jesuisgpl/"
self.indexer.storage.origin_add([Origin(url=origin_url)])
self.indexer.run([origin_url])
rev_id = b"\xe7n\xa4\x9c\x9f\xfb\xb7\xf76\x11\x08{\xa6\xe9\x99\xb1\x9e]q\xeb"
self.assertEqual(
- self.indexer.results, [{"revision_id": rev_id, "origin_url": origin_url,}],
+ self.indexer.results,
+ [
+ {
+ "revision_id": rev_id,
+ "origin_url": origin_url,
+ }
+ ],
)
def test_deposit_missing_snapshot(self):
origin_url = "https://forge.softwareheritage.org/source/foobar"
- self.indexer.storage.origin_add([Origin(url=origin_url,)])
+ self.indexer.storage.origin_add(
+ [
+ Origin(
+ url=origin_url,
+ )
+ ]
+ )
self.indexer.run([origin_url])
self.assertEqual(self.indexer.results, [])
def test_pypi(self):
origin_url = "https://pypi.org/project/limnoria/"
self.indexer.run([origin_url])
rev_id = b"\x83\xb9\xb6\xc7\x05\xb1%\xd0\xfem\xd8kA\x10\x9d\xc5\xfa2\xf8t"
self.assertEqual(
- self.indexer.results, [{"revision_id": rev_id, "origin_url": origin_url}],
+ self.indexer.results,
+ [{"revision_id": rev_id, "origin_url": origin_url}],
)
def test_svn(self):
origin_url = "http://0-512-md.googlecode.com/svn/"
self.indexer.run([origin_url])
rev_id = b"\xe4?r\xe1,\x88\xab\xec\xe7\x9a\x87\xb8\xc9\xad#.\x1bw=\x18"
self.assertEqual(
- self.indexer.results, [{"revision_id": rev_id, "origin_url": origin_url,}],
+ self.indexer.results,
+ [
+ {
+ "revision_id": rev_id,
+ "origin_url": origin_url,
+ }
+ ],
)
diff --git a/swh/indexer/tests/test_origin_metadata.py b/swh/indexer/tests/test_origin_metadata.py
index a555e9c..e05bd08 100644
--- a/swh/indexer/tests/test_origin_metadata.py
+++ b/swh/indexer/tests/test_origin_metadata.py
@@ -1,255 +1,256 @@
# Copyright (C) 2018-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import copy
from unittest.mock import patch
import pytest
from swh.indexer.metadata import OriginMetadataIndexer
from swh.indexer.storage.interface import IndexerStorageInterface
from swh.indexer.storage.model import (
OriginIntrinsicMetadataRow,
RevisionIntrinsicMetadataRow,
)
from swh.model.model import Origin
from swh.storage.interface import StorageInterface
from .test_metadata import TRANSLATOR_TOOL
from .utils import REVISION, YARN_PARSER_METADATA
@pytest.fixture
def swh_indexer_config(swh_indexer_config):
- """Override the default configuration to override the tools entry
-
- """
+ """Override the default configuration to override the tools entry"""
cfg = copy.deepcopy(swh_indexer_config)
cfg["tools"] = TRANSLATOR_TOOL
return cfg
def test_origin_metadata_indexer(
swh_indexer_config,
idx_storage: IndexerStorageInterface,
storage: StorageInterface,
obj_storage,
) -> None:
indexer = OriginMetadataIndexer(config=swh_indexer_config)
origin = "https://github.com/librariesio/yarn-parser"
indexer.run([origin])
tool = swh_indexer_config["tools"]
rev_id = REVISION.id
rev_metadata = RevisionIntrinsicMetadataRow(
- id=rev_id, tool=tool, metadata=YARN_PARSER_METADATA, mappings=["npm"],
+ id=rev_id,
+ tool=tool,
+ metadata=YARN_PARSER_METADATA,
+ mappings=["npm"],
)
origin_metadata = OriginIntrinsicMetadataRow(
id=origin,
tool=tool,
from_revision=rev_id,
metadata=YARN_PARSER_METADATA,
mappings=["npm"],
)
rev_results = list(idx_storage.revision_intrinsic_metadata_get([rev_id]))
for rev_result in rev_results:
assert rev_result.tool
del rev_result.tool["id"]
assert rev_results == [rev_metadata]
orig_results = list(idx_storage.origin_intrinsic_metadata_get([origin]))
for orig_result in orig_results:
assert orig_result.tool
del orig_result.tool["id"]
assert orig_results == [origin_metadata]
def test_origin_metadata_indexer_duplicate_origin(
swh_indexer_config,
idx_storage: IndexerStorageInterface,
storage: StorageInterface,
obj_storage,
) -> None:
indexer = OriginMetadataIndexer(config=swh_indexer_config)
indexer.storage = storage
indexer.idx_storage = idx_storage
indexer.run(["https://github.com/librariesio/yarn-parser"])
indexer.run(["https://github.com/librariesio/yarn-parser"] * 2)
origin = "https://github.com/librariesio/yarn-parser"
rev_id = REVISION.id
rev_results = list(indexer.idx_storage.revision_intrinsic_metadata_get([rev_id]))
assert len(rev_results) == 1
orig_results = list(indexer.idx_storage.origin_intrinsic_metadata_get([origin]))
assert len(orig_results) == 1
def test_origin_metadata_indexer_missing_head(
swh_indexer_config,
idx_storage: IndexerStorageInterface,
storage: StorageInterface,
obj_storage,
) -> None:
storage.origin_add([Origin(url="https://example.com")])
indexer = OriginMetadataIndexer(config=swh_indexer_config)
indexer.run(["https://example.com"])
origin = "https://example.com"
results = list(indexer.idx_storage.origin_intrinsic_metadata_get([origin]))
assert results == []
def test_origin_metadata_indexer_partial_missing_head(
swh_indexer_config,
idx_storage: IndexerStorageInterface,
storage: StorageInterface,
obj_storage,
) -> None:
origin1 = "https://example.com"
origin2 = "https://github.com/librariesio/yarn-parser"
storage.origin_add([Origin(url=origin1)])
indexer = OriginMetadataIndexer(config=swh_indexer_config)
indexer.run([origin1, origin2])
rev_id = REVISION.id
rev_results = list(indexer.idx_storage.revision_intrinsic_metadata_get([rev_id]))
assert rev_results == [
RevisionIntrinsicMetadataRow(
id=rev_id,
metadata=YARN_PARSER_METADATA,
mappings=["npm"],
tool=rev_results[0].tool,
)
]
orig_results = list(
indexer.idx_storage.origin_intrinsic_metadata_get([origin1, origin2])
)
for orig_result in orig_results:
assert orig_results == [
OriginIntrinsicMetadataRow(
id=origin2,
from_revision=rev_id,
metadata=YARN_PARSER_METADATA,
mappings=["npm"],
tool=orig_results[0].tool,
)
]
def test_origin_metadata_indexer_duplicate_revision(
swh_indexer_config,
idx_storage: IndexerStorageInterface,
storage: StorageInterface,
obj_storage,
) -> None:
indexer = OriginMetadataIndexer(config=swh_indexer_config)
indexer.storage = storage
indexer.idx_storage = idx_storage
indexer.catch_exceptions = False
origin1 = "https://github.com/librariesio/yarn-parser"
origin2 = "https://github.com/librariesio/yarn-parser.git"
indexer.run([origin1, origin2])
rev_id = REVISION.id
rev_results = list(indexer.idx_storage.revision_intrinsic_metadata_get([rev_id]))
assert len(rev_results) == 1
orig_results = list(
indexer.idx_storage.origin_intrinsic_metadata_get([origin1, origin2])
)
assert len(orig_results) == 2
def test_origin_metadata_indexer_no_metadata_file(
swh_indexer_config,
idx_storage: IndexerStorageInterface,
storage: StorageInterface,
obj_storage,
) -> None:
indexer = OriginMetadataIndexer(config=swh_indexer_config)
origin = "https://github.com/librariesio/yarn-parser"
with patch("swh.indexer.metadata_dictionary.npm.NpmMapping.filename", b"foo.json"):
indexer.run([origin])
rev_id = REVISION.id
rev_results = list(indexer.idx_storage.revision_intrinsic_metadata_get([rev_id]))
assert rev_results == []
orig_results = list(indexer.idx_storage.origin_intrinsic_metadata_get([origin]))
assert orig_results == []
def test_origin_metadata_indexer_no_metadata(
swh_indexer_config,
idx_storage: IndexerStorageInterface,
storage: StorageInterface,
obj_storage,
) -> None:
indexer = OriginMetadataIndexer(config=swh_indexer_config)
origin = "https://github.com/librariesio/yarn-parser"
with patch(
"swh.indexer.metadata.RevisionMetadataIndexer"
".translate_revision_intrinsic_metadata",
return_value=(["npm"], {"@context": "foo"}),
):
indexer.run([origin])
rev_id = REVISION.id
rev_results = list(indexer.idx_storage.revision_intrinsic_metadata_get([rev_id]))
assert rev_results == []
orig_results = list(indexer.idx_storage.origin_intrinsic_metadata_get([origin]))
assert orig_results == []
def test_origin_metadata_indexer_error(
swh_indexer_config,
idx_storage: IndexerStorageInterface,
storage: StorageInterface,
obj_storage,
) -> None:
indexer = OriginMetadataIndexer(config=swh_indexer_config)
origin = "https://github.com/librariesio/yarn-parser"
with patch(
"swh.indexer.metadata.RevisionMetadataIndexer"
".translate_revision_intrinsic_metadata",
return_value=None,
):
indexer.run([origin])
rev_id = REVISION.id
rev_results = list(indexer.idx_storage.revision_intrinsic_metadata_get([rev_id]))
assert rev_results == []
orig_results = list(indexer.idx_storage.origin_intrinsic_metadata_get([origin]))
assert orig_results == []
def test_origin_metadata_indexer_unknown_origin(
swh_indexer_config,
idx_storage: IndexerStorageInterface,
storage: StorageInterface,
obj_storage,
) -> None:
indexer = OriginMetadataIndexer(config=swh_indexer_config)
result = indexer.index_list(["https://unknown.org/foo"])
assert not result
diff --git a/swh/indexer/tests/test_tasks.py b/swh/indexer/tests/test_tasks.py
index 1058f10..e4be328 100644
--- a/swh/indexer/tests/test_tasks.py
+++ b/swh/indexer/tests/test_tasks.py
@@ -1,123 +1,132 @@
# Copyright (C) 2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
def test_task_origin_metadata(
mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config
):
mock_indexer = mocker.patch("swh.indexer.tasks.OriginMetadataIndexer.run")
mock_indexer.return_value = {"status": "eventful"}
res = swh_scheduler_celery_app.send_task(
- "swh.indexer.tasks.OriginMetadata", args=["origin-url"],
+ "swh.indexer.tasks.OriginMetadata",
+ args=["origin-url"],
)
assert res
res.wait()
assert res.successful()
assert res.result == {"status": "eventful"}
def test_task_ctags(
mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config
):
mock_indexer = mocker.patch("swh.indexer.tasks.CtagsIndexer.run")
mock_indexer.return_value = {"status": "eventful"}
- res = swh_scheduler_celery_app.send_task("swh.indexer.tasks.Ctags", args=["id0"],)
+ res = swh_scheduler_celery_app.send_task(
+ "swh.indexer.tasks.Ctags",
+ args=["id0"],
+ )
assert res
res.wait()
assert res.successful()
assert res.result == {"status": "eventful"}
def test_task_fossology_license(
mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config
):
mock_indexer = mocker.patch("swh.indexer.tasks.FossologyLicenseIndexer.run")
mock_indexer.return_value = {"status": "eventful"}
res = swh_scheduler_celery_app.send_task(
- "swh.indexer.tasks.ContentFossologyLicense", args=["id0"],
+ "swh.indexer.tasks.ContentFossologyLicense",
+ args=["id0"],
)
assert res
res.wait()
assert res.successful()
assert res.result == {"status": "eventful"}
def test_task_recompute_checksums(
mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config
):
mock_indexer = mocker.patch("swh.indexer.tasks.RecomputeChecksums.run")
mock_indexer.return_value = {"status": "eventful"}
res = swh_scheduler_celery_app.send_task(
- "swh.indexer.tasks.RecomputeChecksums", args=[[{"blake2b256": "id"}]],
+ "swh.indexer.tasks.RecomputeChecksums",
+ args=[[{"blake2b256": "id"}]],
)
assert res
res.wait()
assert res.successful()
assert res.result == {"status": "eventful"}
def test_task_mimetype(
mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config
):
mock_indexer = mocker.patch("swh.indexer.tasks.MimetypeIndexer.run")
mock_indexer.return_value = {"status": "eventful"}
res = swh_scheduler_celery_app.send_task(
- "swh.indexer.tasks.ContentMimetype", args=["id0"],
+ "swh.indexer.tasks.ContentMimetype",
+ args=["id0"],
)
assert res
res.wait()
assert res.successful()
assert res.result == {"status": "eventful"}
def test_task_mimetype_partition(
mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config
):
mock_indexer = mocker.patch("swh.indexer.tasks.MimetypePartitionIndexer.run")
mock_indexer.return_value = {"status": "eventful"}
res = swh_scheduler_celery_app.send_task(
- "swh.indexer.tasks.ContentMimetypePartition", args=[0, 4],
+ "swh.indexer.tasks.ContentMimetypePartition",
+ args=[0, 4],
)
assert res
res.wait()
assert res.successful()
assert res.result == {"status": "eventful"}
def test_task_license_partition(
mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config
):
mock_indexer = mocker.patch(
"swh.indexer.tasks.FossologyLicensePartitionIndexer.run"
)
mock_indexer.return_value = {"status": "eventful"}
res = swh_scheduler_celery_app.send_task(
- "swh.indexer.tasks.ContentFossologyLicensePartition", args=[0, 4],
+ "swh.indexer.tasks.ContentFossologyLicensePartition",
+ args=[0, 4],
)
assert res
res.wait()
assert res.successful()
assert res.result == {"status": "eventful"}
diff --git a/swh/indexer/tests/utils.py b/swh/indexer/tests/utils.py
index 025504f..df05572 100644
--- a/swh/indexer/tests/utils.py
+++ b/swh/indexer/tests/utils.py
@@ -1,731 +1,743 @@
# Copyright (C) 2017-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import abc
import datetime
import functools
from typing import Any, Dict
import unittest
from hypothesis import strategies
from swh.core.api.classes import stream_results
from swh.indexer.storage import INDEXER_CFG_KEY
from swh.model import hashutil
from swh.model.hashutil import hash_to_bytes
from swh.model.model import (
Content,
Directory,
DirectoryEntry,
Origin,
OriginVisit,
OriginVisitStatus,
Person,
Revision,
RevisionType,
Snapshot,
SnapshotBranch,
TargetType,
TimestampWithTimezone,
)
from swh.storage.utils import now
BASE_TEST_CONFIG: Dict[str, Dict[str, Any]] = {
"storage": {"cls": "memory"},
"objstorage": {"cls": "memory"},
INDEXER_CFG_KEY: {"cls": "memory"},
}
ORIGINS = [
Origin(url="https://github.com/SoftwareHeritage/swh-storage"),
Origin(url="rsync://ftp.gnu.org/gnu/3dldf"),
Origin(url="https://forge.softwareheritage.org/source/jesuisgpl/"),
Origin(url="https://pypi.org/project/limnoria/"),
Origin(url="http://0-512-md.googlecode.com/svn/"),
Origin(url="https://github.com/librariesio/yarn-parser"),
Origin(url="https://github.com/librariesio/yarn-parser.git"),
]
ORIGIN_VISITS = [
{"type": "git", "origin": ORIGINS[0].url},
{"type": "ftp", "origin": ORIGINS[1].url},
{"type": "deposit", "origin": ORIGINS[2].url},
{"type": "pypi", "origin": ORIGINS[3].url},
{"type": "svn", "origin": ORIGINS[4].url},
{"type": "git", "origin": ORIGINS[5].url},
{"type": "git", "origin": ORIGINS[6].url},
]
DIRECTORY = Directory(
id=hash_to_bytes("34f335a750111ca0a8b64d8034faec9eedc396be"),
entries=(
DirectoryEntry(
name=b"index.js",
type="file",
target=hash_to_bytes("01c9379dfc33803963d07c1ccc748d3fe4c96bb5"),
perms=0o100644,
),
DirectoryEntry(
name=b"package.json",
type="file",
target=hash_to_bytes("26a9f72a7c87cc9205725cfd879f514ff4f3d8d5"),
perms=0o100644,
),
DirectoryEntry(
name=b".github",
type="dir",
target=Directory(entries=()).id,
perms=0o040000,
),
),
)
DIRECTORY2 = Directory(
id=b"\xf8zz\xa1\x12`<1$\xfav\xf9\x01\xfd5\x85F`\xf2\xb6",
entries=(
DirectoryEntry(
name=b"package.json",
type="file",
target=hash_to_bytes("f5305243b3ce7ef8dc864ebc73794da304025beb"),
perms=0o100644,
),
),
)
REVISION = Revision(
id=hash_to_bytes("c6201cb1b9b9df9a7542f9665c3b5dfab85e9775"),
message=b"Improve search functionality",
author=Person(
name=b"Andrew Nesbitt",
fullname=b"Andrew Nesbitt <andrewnez@gmail.com>",
email=b"andrewnez@gmail.com",
),
committer=Person(
name=b"Andrew Nesbitt",
fullname=b"Andrew Nesbitt <andrewnez@gmail.com>",
email=b"andrewnez@gmail.com",
),
committer_date=TimestampWithTimezone.from_datetime(
datetime.datetime(
2013,
10,
4,
12,
50,
49,
tzinfo=datetime.timezone(datetime.timedelta(minutes=120)),
)
),
type=RevisionType.GIT,
synthetic=False,
date=TimestampWithTimezone.from_datetime(
datetime.datetime(
2017,
2,
20,
16,
14,
16,
tzinfo=datetime.timezone(datetime.timedelta(minutes=120)),
)
),
directory=DIRECTORY2.id,
parents=(),
)
REVISIONS = [REVISION]
SNAPSHOTS = [
Snapshot(
id=hash_to_bytes("a50fde72265343b7d28cecf6db20d98a81d21965"),
branches={
b"refs/heads/add-revision-origin-cache": SnapshotBranch(
target=b'L[\xce\x1c\x88\x8eF\t\xf1"\x19\x1e\xfb\xc0s\xe7/\xe9l\x1e',
target_type=TargetType.REVISION,
),
b"refs/head/master": SnapshotBranch(
target=b"8K\x12\x00d\x03\xcc\xe4]bS\xe3\x8f{\xd7}\xac\xefrm",
target_type=TargetType.REVISION,
),
b"HEAD": SnapshotBranch(
target=b"refs/head/master", target_type=TargetType.ALIAS
),
b"refs/tags/v0.0.103": SnapshotBranch(
target=b'\xb6"Im{\xfdLb\xb0\x94N\xea\x96m\x13x\x88+\x0f\xdd',
target_type=TargetType.RELEASE,
),
},
),
Snapshot(
id=hash_to_bytes("2c67f69a416bca4e1f3fcd848c588fab88ad0642"),
branches={
b"3DLDF-1.1.4.tar.gz": SnapshotBranch(
target=b'dJ\xfb\x1c\x91\xf4\x82B%]6\xa2\x90|\xd3\xfc"G\x99\x11',
target_type=TargetType.REVISION,
),
b"3DLDF-2.0.2.tar.gz": SnapshotBranch(
target=b"\xb6\x0e\xe7\x9e9\xac\xaa\x19\x9e=\xd1\xc5\x00\\\xc6\xfc\xe0\xa6\xb4V", # noqa
target_type=TargetType.REVISION,
),
b"3DLDF-2.0.3-examples.tar.gz": SnapshotBranch(
target=b"!H\x19\xc0\xee\x82-\x12F1\xbd\x97\xfe\xadZ\x80\x80\xc1\x83\xff", # noqa
target_type=TargetType.REVISION,
),
b"3DLDF-2.0.3.tar.gz": SnapshotBranch(
target=b"\x8e\xa9\x8e/\xea}\x9feF\xf4\x9f\xfd\xee\xcc\x1a\xb4`\x8c\x8by", # noqa
target_type=TargetType.REVISION,
),
b"3DLDF-2.0.tar.gz": SnapshotBranch(
target=b"F6*\xff(?\x19a\xef\xb6\xc2\x1fv$S\xe3G\xd3\xd1m",
target_type=TargetType.REVISION,
),
},
),
Snapshot(
id=hash_to_bytes("68c0d26104d47e278dd6be07ed61fafb561d0d20"),
branches={
b"master": SnapshotBranch(
target=b"\xe7n\xa4\x9c\x9f\xfb\xb7\xf76\x11\x08{\xa6\xe9\x99\xb1\x9e]q\xeb", # noqa
target_type=TargetType.REVISION,
)
},
),
Snapshot(
id=hash_to_bytes("f255245269e15fc99d284affd79f766668de0b67"),
branches={
b"HEAD": SnapshotBranch(
target=b"releases/2018.09.09", target_type=TargetType.ALIAS
),
b"releases/2018.09.01": SnapshotBranch(
target=b"<\xee1(\xe8\x8d_\xc1\xc9\xa6rT\xf1\x1d\xbb\xdfF\xfdw\xcf",
target_type=TargetType.REVISION,
),
b"releases/2018.09.09": SnapshotBranch(
target=b"\x83\xb9\xb6\xc7\x05\xb1%\xd0\xfem\xd8kA\x10\x9d\xc5\xfa2\xf8t", # noqa
target_type=TargetType.REVISION,
),
},
),
Snapshot(
id=hash_to_bytes("a1a28c0ab387a8f9e0618cb705eab81fc448f473"),
branches={
b"master": SnapshotBranch(
target=b"\xe4?r\xe1,\x88\xab\xec\xe7\x9a\x87\xb8\xc9\xad#.\x1bw=\x18",
target_type=TargetType.REVISION,
)
},
),
Snapshot(
id=hash_to_bytes("bb4fd3a836930ce629d912864319637040ff3040"),
branches={
b"HEAD": SnapshotBranch(
- target=REVISION.id, target_type=TargetType.REVISION,
+ target=REVISION.id,
+ target_type=TargetType.REVISION,
)
},
),
Snapshot(
id=hash_to_bytes("bb4fd3a836930ce629d912864319637040ff3040"),
branches={
b"HEAD": SnapshotBranch(
- target=REVISION.id, target_type=TargetType.REVISION,
+ target=REVISION.id,
+ target_type=TargetType.REVISION,
)
},
),
]
SHA1_TO_LICENSES = {
"01c9379dfc33803963d07c1ccc748d3fe4c96bb5": ["GPL"],
"02fb2c89e14f7fab46701478c83779c7beb7b069": ["Apache2.0"],
"103bc087db1d26afc3a0283f38663d081e9b01e6": ["MIT"],
"688a5ef812c53907562fe379d4b3851e69c7cb15": ["AGPL"],
"da39a3ee5e6b4b0d3255bfef95601890afd80709": [],
}
SHA1_TO_CTAGS = {
"01c9379dfc33803963d07c1ccc748d3fe4c96bb5": [
- {"name": "foo", "kind": "str", "line": 10, "lang": "bar",}
+ {
+ "name": "foo",
+ "kind": "str",
+ "line": 10,
+ "lang": "bar",
+ }
],
"d4c647f0fc257591cc9ba1722484229780d1c607": [
- {"name": "let", "kind": "int", "line": 100, "lang": "haskell",}
+ {
+ "name": "let",
+ "kind": "int",
+ "line": 100,
+ "lang": "haskell",
+ }
],
"688a5ef812c53907562fe379d4b3851e69c7cb15": [
- {"name": "symbol", "kind": "float", "line": 99, "lang": "python",}
+ {
+ "name": "symbol",
+ "kind": "float",
+ "line": 99,
+ "lang": "python",
+ }
],
}
OBJ_STORAGE_DATA = {
"01c9379dfc33803963d07c1ccc748d3fe4c96bb5": b"this is some text",
"688a5ef812c53907562fe379d4b3851e69c7cb15": b"another text",
"8986af901dd2043044ce8f0d8fc039153641cf17": b"yet another text",
"02fb2c89e14f7fab46701478c83779c7beb7b069": b"""
import unittest
import logging
from swh.indexer.mimetype import MimetypeIndexer
from swh.indexer.tests.test_utils import MockObjStorage
class MockStorage():
def content_mimetype_add(self, mimetypes):
self.state = mimetypes
def indexer_configuration_add(self, tools):
return [{
'id': 10,
}]
""",
"103bc087db1d26afc3a0283f38663d081e9b01e6": b"""
#ifndef __AVL__
#define __AVL__
typedef struct _avl_tree avl_tree;
typedef struct _data_t {
int content;
} data_t;
""",
"93666f74f1cf635c8c8ac118879da6ec5623c410": b"""
(should 'pygments (recognize 'lisp 'easily))
""",
"26a9f72a7c87cc9205725cfd879f514ff4f3d8d5": b"""
{
"name": "test_metadata",
"version": "0.0.1",
"description": "Simple package.json test for indexer",
"repository": {
"type": "git",
"url": "https://github.com/moranegg/metadata_test"
}
}
""",
"d4c647f0fc257591cc9ba1722484229780d1c607": b"""
{
"version": "5.0.3",
"name": "npm",
"description": "a package manager for JavaScript",
"keywords": [
"install",
"modules",
"package manager",
"package.json"
],
"preferGlobal": true,
"config": {
"publishtest": false
},
"homepage": "https://docs.npmjs.com/",
"author": "Isaac Z. Schlueter <i@izs.me> (http://blog.izs.me)",
"repository": {
"type": "git",
"url": "https://github.com/npm/npm"
},
"bugs": {
"url": "https://github.com/npm/npm/issues"
},
"dependencies": {
"JSONStream": "~1.3.1",
"abbrev": "~1.1.0",
"ansi-regex": "~2.1.1",
"ansicolors": "~0.3.2",
"ansistyles": "~0.1.3"
},
"devDependencies": {
"tacks": "~1.2.6",
"tap": "~10.3.2"
},
"license": "Artistic-2.0"
}
""",
"a7ab314d8a11d2c93e3dcf528ca294e7b431c449": b"""
""",
"da39a3ee5e6b4b0d3255bfef95601890afd80709": b"",
# was 626364 / b'bcd'
"e3e40fee6ff8a52f06c3b428bfe7c0ed2ef56e92": b"unimportant content for bcd",
# was 636465 / b'cde' now yarn-parser package.json
"f5305243b3ce7ef8dc864ebc73794da304025beb": b"""
{
"name": "yarn-parser",
"version": "1.0.0",
"description": "Tiny web service for parsing yarn.lock files",
"main": "index.js",
"scripts": {
"start": "node index.js",
"test": "mocha"
},
"engines": {
"node": "9.8.0"
},
"repository": {
"type": "git",
"url": "git+https://github.com/librariesio/yarn-parser.git"
},
"keywords": [
"yarn",
"parse",
"lock",
"dependencies"
],
"author": "Andrew Nesbitt",
"license": "AGPL-3.0",
"bugs": {
"url": "https://github.com/librariesio/yarn-parser/issues"
},
"homepage": "https://github.com/librariesio/yarn-parser#readme",
"dependencies": {
"@yarnpkg/lockfile": "^1.0.0",
"body-parser": "^1.15.2",
"express": "^4.14.0"
},
"devDependencies": {
"chai": "^4.1.2",
"mocha": "^5.2.0",
"request": "^2.87.0",
"test": "^0.6.0"
}
}
""",
}
YARN_PARSER_METADATA = {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"url": "https://github.com/librariesio/yarn-parser#readme",
"codeRepository": "git+git+https://github.com/librariesio/yarn-parser.git",
"author": [{"type": "Person", "name": "Andrew Nesbitt"}],
"license": "https://spdx.org/licenses/AGPL-3.0",
"version": "1.0.0",
"description": "Tiny web service for parsing yarn.lock files",
"issueTracker": "https://github.com/librariesio/yarn-parser/issues",
"name": "yarn-parser",
"keywords": ["yarn", "parse", "lock", "dependencies"],
"type": "SoftwareSourceCode",
}
json_dict_keys = strategies.one_of(
strategies.characters(),
strategies.just("type"),
strategies.just("url"),
strategies.just("name"),
strategies.just("email"),
strategies.just("@id"),
strategies.just("@context"),
strategies.just("repository"),
strategies.just("license"),
strategies.just("repositories"),
strategies.just("licenses"),
)
"""Hypothesis strategy that generates strings, with an emphasis on those
that are often used as dictionary keys in metadata files."""
generic_json_document = strategies.recursive(
strategies.none()
| strategies.booleans()
| strategies.floats()
| strategies.characters(),
lambda children: (
strategies.lists(children, min_size=1)
| strategies.dictionaries(json_dict_keys, children, min_size=1)
),
)
"""Hypothesis strategy that generates possible values for values of JSON
metadata files."""
def json_document_strategy(keys=None):
"""Generates an hypothesis strategy that generates metadata files
for a JSON-based format that uses the given keys."""
if keys is None:
keys = strategies.characters()
else:
keys = strategies.one_of(map(strategies.just, keys))
return strategies.dictionaries(keys, generic_json_document, min_size=1)
def _tree_to_xml(root, xmlns, data):
def encode(s):
"Skips unpaired surrogates generated by json_document_strategy"
return s.encode("utf8", "replace")
def to_xml(data, indent=b" "):
if data is None:
return b""
elif isinstance(data, (bool, str, int, float)):
return indent + encode(str(data))
elif isinstance(data, list):
return b"\n".join(to_xml(v, indent=indent) for v in data)
elif isinstance(data, dict):
lines = []
for (key, value) in data.items():
lines.append(indent + encode("<{}>".format(key)))
lines.append(to_xml(value, indent=indent + b" "))
lines.append(indent + encode("</{}>".format(key)))
return b"\n".join(lines)
else:
raise TypeError(data)
return b"\n".join(
[
'<{} xmlns="{}">'.format(root, xmlns).encode(),
to_xml(data),
"</{}>".format(root).encode(),
]
)
class TreeToXmlTest(unittest.TestCase):
def test_leaves(self):
self.assertEqual(
_tree_to_xml("root", "http://example.com", None),
b'<root xmlns="http://example.com">\n\n</root>',
)
self.assertEqual(
_tree_to_xml("root", "http://example.com", True),
b'<root xmlns="http://example.com">\n True\n</root>',
)
self.assertEqual(
_tree_to_xml("root", "http://example.com", "abc"),
b'<root xmlns="http://example.com">\n abc\n</root>',
)
self.assertEqual(
_tree_to_xml("root", "http://example.com", 42),
b'<root xmlns="http://example.com">\n 42\n</root>',
)
self.assertEqual(
_tree_to_xml("root", "http://example.com", 3.14),
b'<root xmlns="http://example.com">\n 3.14\n</root>',
)
def test_dict(self):
self.assertIn(
_tree_to_xml("root", "http://example.com", {"foo": "bar", "baz": "qux"}),
[
b'<root xmlns="http://example.com">\n'
b" <foo>\n bar\n </foo>\n"
b" <baz>\n qux\n </baz>\n"
b"</root>",
b'<root xmlns="http://example.com">\n'
b" <baz>\n qux\n </baz>\n"
b" <foo>\n bar\n </foo>\n"
b"</root>",
],
)
def test_list(self):
self.assertEqual(
_tree_to_xml(
- "root", "http://example.com", [{"foo": "bar"}, {"foo": "baz"},]
+ "root",
+ "http://example.com",
+ [
+ {"foo": "bar"},
+ {"foo": "baz"},
+ ],
),
b'<root xmlns="http://example.com">\n'
b" <foo>\n bar\n </foo>\n"
b" <foo>\n baz\n </foo>\n"
b"</root>",
)
def xml_document_strategy(keys, root, xmlns):
"""Generates an hypothesis strategy that generates metadata files
for an XML format that uses the given keys."""
return strategies.builds(
functools.partial(_tree_to_xml, root, xmlns), json_document_strategy(keys)
)
def filter_dict(d, keys):
"return a copy of the dict with keys deleted"
if not isinstance(keys, (list, tuple)):
keys = (keys,)
return dict((k, v) for (k, v) in d.items() if k not in keys)
def fill_obj_storage(obj_storage):
"""Add some content in an object storage."""
for (obj_id, content) in OBJ_STORAGE_DATA.items():
obj_storage.add(content, obj_id=hash_to_bytes(obj_id))
def fill_storage(storage):
storage.origin_add(ORIGINS)
storage.directory_add([DIRECTORY, DIRECTORY2])
storage.revision_add(REVISIONS)
storage.snapshot_add(SNAPSHOTS)
for visit, snapshot in zip(ORIGIN_VISITS, SNAPSHOTS):
assert snapshot.id is not None
visit = storage.origin_visit_add(
[OriginVisit(origin=visit["origin"], date=now(), type=visit["type"])]
)[0]
visit_status = OriginVisitStatus(
origin=visit.origin,
visit=visit.visit,
date=now(),
status="full",
snapshot=snapshot.id,
)
storage.origin_visit_status_add([visit_status])
contents = []
for (obj_id, content) in OBJ_STORAGE_DATA.items():
content_hashes = hashutil.MultiHash.from_data(content).digest()
contents.append(
Content(
data=content,
length=len(content),
status="visible",
sha1=hash_to_bytes(obj_id),
sha1_git=hash_to_bytes(obj_id),
sha256=content_hashes["sha256"],
blake2s256=content_hashes["blake2s256"],
)
)
storage.content_add(contents)
class CommonContentIndexerTest(metaclass=abc.ABCMeta):
def get_indexer_results(self, ids):
"""Override this for indexers that don't have a mock storage."""
return self.indexer.idx_storage.state
def assert_results_ok(self, sha1s, expected_results=None):
sha1s = [
sha1 if isinstance(sha1, bytes) else hash_to_bytes(sha1) for sha1 in sha1s
]
actual_results = list(self.get_indexer_results(sha1s))
if expected_results is None:
expected_results = self.expected_results
self.assertEqual(expected_results, actual_results)
def test_index(self):
- """Known sha1 have their data indexed
-
- """
+ """Known sha1 have their data indexed"""
sha1s = [self.id0, self.id1, self.id2]
# when
self.indexer.run(sha1s)
self.assert_results_ok(sha1s)
# 2nd pass
self.indexer.run(sha1s)
self.assert_results_ok(sha1s)
def test_index_one_unknown_sha1(self):
"""Unknown sha1 are not indexed"""
sha1s = [
self.id1,
"799a5ef812c53907562fe379d4b3851e69c7cb15", # unknown
"800a5ef812c53907562fe379d4b3851e69c7cb15",
] # unknown
# when
self.indexer.run(sha1s)
# then
expected_results = [
res
for res in self.expected_results
if hashutil.hash_to_hex(res.id) in sha1s
]
self.assert_results_ok(sha1s, expected_results)
class CommonContentIndexerPartitionTest:
- """Allows to factorize tests on range indexer.
-
- """
+ """Allows to factorize tests on range indexer."""
def setUp(self):
self.contents = sorted(OBJ_STORAGE_DATA)
def assert_results_ok(self, partition_id, nb_partitions, actual_results):
expected_ids = [
c.sha1
for c in stream_results(
self.indexer.storage.content_get_partition,
partition_id=partition_id,
nb_partitions=nb_partitions,
)
]
actual_results = list(actual_results)
for indexed_data in actual_results:
_id = indexed_data.id
assert _id in expected_ids
_tool_id = indexed_data.indexer_configuration_id
assert _tool_id == self.indexer.tool["id"]
def test__index_contents(self):
- """Indexing contents without existing data results in indexed data
-
- """
+ """Indexing contents without existing data results in indexed data"""
partition_id = 0
nb_partitions = 4
actual_results = list(
self.indexer._index_contents(partition_id, nb_partitions, indexed={})
)
self.assert_results_ok(partition_id, nb_partitions, actual_results)
def test__index_contents_with_indexed_data(self):
- """Indexing contents with existing data results in less indexed data
-
- """
+ """Indexing contents with existing data results in less indexed data"""
partition_id = 3
nb_partitions = 4
# first pass
actual_results = list(
self.indexer._index_contents(partition_id, nb_partitions, indexed={}),
)
self.assert_results_ok(partition_id, nb_partitions, actual_results)
indexed_ids = {res.id for res in actual_results}
actual_results = list(
self.indexer._index_contents(
partition_id, nb_partitions, indexed=indexed_ids
)
)
# already indexed, so nothing new
assert actual_results == []
def test_generate_content_get(self):
- """Optimal indexing should result in indexed data
-
- """
+ """Optimal indexing should result in indexed data"""
partition_id = 0
nb_partitions = 1
actual_results = self.indexer.run(
partition_id, nb_partitions, skip_existing=False
)
assert actual_results["status"] == "eventful", actual_results
def test_generate_content_get_no_result(self):
"""No result indexed returns False"""
- actual_results = self.indexer.run(1, 2 ** 512, incremental=False)
+ actual_results = self.indexer.run(1, 2**512, incremental=False)
assert actual_results == {"status": "uneventful"}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Fri, Jul 4, 1:08 PM (1 w, 1 d ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3273778
Attached To
rDCIDX Metadata indexer
Event Timeline
Log In to Comment