Page MenuHomeSoftware Heritage

No OneTemporary

diff --git a/swh/indexer/storage/__init__.py b/swh/indexer/storage/__init__.py
index 839c10d..34541a0 100644
--- a/swh/indexer/storage/__init__.py
+++ b/swh/indexer/storage/__init__.py
@@ -1,908 +1,915 @@
# Copyright (C) 2015-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import json
import psycopg2
from collections import defaultdict
from swh.core.api import remote_api_endpoint
from swh.storage.common import db_transaction_generator, db_transaction
from swh.storage.exc import StorageDBError
from .db import Db
from . import converters
INDEXER_CFG_KEY = 'indexer_storage'
MAPPING_NAMES = ['codemeta', 'gemspec', 'maven', 'npm', 'pkg-info']
def get_indexer_storage(cls, args):
"""Get an indexer storage object of class `storage_class` with
arguments `storage_args`.
Args:
cls (str): storage's class, either 'local' or 'remote'
args (dict): dictionary of arguments passed to the
storage class constructor
Returns:
an instance of swh.indexer's storage (either local or remote)
Raises:
ValueError if passed an unknown storage class.
"""
if cls == 'remote':
from .api.client import RemoteStorage as IndexerStorage
elif cls == 'local':
from . import IndexerStorage
elif cls == 'memory':
from .in_memory import IndexerStorage
else:
raise ValueError('Unknown indexer storage class `%s`' % cls)
return IndexerStorage(**args)
def _check_duplicates(data, key):
"""
If any two dictionaries in `data` have the same value for the
key, raises a `ValueError`.
Values associated to the key must be hashable.
Args:
data (List[dict]): List of dictionaries to be inserted
key (str): Name of the key that acts as id.
>>> _check_duplicates([
... {'id': 'foo', 'data': 'spam'},
... {'id': 'bar', 'data': 'egg'},
... ], 'id')
>>> _check_duplicates([
... {'id': 'foo', 'data': 'spam'},
... {'id': 'foo', 'data': 'egg'},
... ], 'id')
Traceback (most recent call last):
...
ValueError: The same id is present more than once.
"""
if len({item[key] for item in data}) < len(data):
raise ValueError(
'The same {} is present more than once.'.format(key))
class IndexerStorage:
"""SWH Indexer Storage
"""
def __init__(self, db, min_pool_conns=1, max_pool_conns=10):
"""
Args:
db_conn: either a libpq connection string, or a psycopg2 connection
"""
try:
if isinstance(db, psycopg2.extensions.connection):
self._pool = None
self._db = Db(db)
else:
self._pool = psycopg2.pool.ThreadedConnectionPool(
min_pool_conns, max_pool_conns, db
)
self._db = None
except psycopg2.OperationalError as e:
raise StorageDBError(e)
def get_db(self):
if self._db:
return self._db
return Db.from_pool(self._pool)
@remote_api_endpoint('check_config')
def check_config(self, *, check_write):
"""Check that the storage is configured and ready to go."""
# Check permissions on one of the tables
with self.get_db().transaction() as cur:
if check_write:
check = 'INSERT'
else:
check = 'SELECT'
cur.execute(
"select has_table_privilege(current_user, 'content_mimetype', %s)", # noqa
(check,)
)
return cur.fetchone()[0]
return True
@remote_api_endpoint('content_mimetype/missing')
@db_transaction_generator()
def content_mimetype_missing(self, mimetypes, db=None, cur=None):
"""Generate mimetypes missing from storage.
Args:
mimetypes (iterable): iterable of dict with keys:
- **id** (bytes): sha1 identifier
- **indexer_configuration_id** (int): tool used to compute the
results
Yields:
tuple (id, indexer_configuration_id): missing id
"""
for obj in db.content_mimetype_missing_from_list(mimetypes, cur):
yield obj[0]
def _content_get_range(self, content_type, start, end,
indexer_configuration_id, limit=1000,
with_textual_data=False,
db=None, cur=None):
"""Retrieve ids of type content_type within range [start, end] bound
by limit.
Args:
**content_type** (str): content's type (mimetype, language, etc...)
**start** (bytes): Starting identifier range (expected smaller
than end)
**end** (bytes): Ending identifier range (expected larger
than start)
**indexer_configuration_id** (int): The tool used to index data
**limit** (int): Limit result (default to 1000)
**with_textual_data** (bool): Deal with only textual
content (True) or all
content (all contents by
defaults, False)
Raises:
ValueError for;
- limit to None
- wrong content_type provided
Returns:
a dict with keys:
- **ids** [bytes]: iterable of content ids within the range.
- **next** (Optional[bytes]): The next range of sha1 starts at
this sha1 if any
"""
if limit is None:
raise ValueError('Development error: limit should not be None')
if content_type not in db.content_indexer_names:
err = 'Development error: Wrong type. Should be one of [%s]' % (
','.join(db.content_indexer_names))
raise ValueError(err)
ids = []
next_id = None
for counter, obj in enumerate(db.content_get_range(
content_type, start, end, indexer_configuration_id,
limit=limit+1, with_textual_data=with_textual_data, cur=cur)):
_id = obj[0]
if counter >= limit:
next_id = _id
break
ids.append(_id)
return {
'ids': ids,
'next': next_id
}
@remote_api_endpoint('content_mimetype/range')
@db_transaction()
def content_mimetype_get_range(self, start, end, indexer_configuration_id,
limit=1000, db=None, cur=None):
"""Retrieve mimetypes within range [start, end] bound by limit.
Args:
**start** (bytes): Starting identifier range (expected smaller
than end)
**end** (bytes): Ending identifier range (expected larger
than start)
**indexer_configuration_id** (int): The tool used to index data
**limit** (int): Limit result (default to 1000)
Raises:
ValueError for limit to None
Returns:
a dict with keys:
- **ids** [bytes]: iterable of content ids within the range.
- **next** (Optional[bytes]): The next range of sha1 starts at
this sha1 if any
"""
return self._content_get_range('mimetype', start, end,
indexer_configuration_id, limit=limit,
db=db, cur=cur)
@remote_api_endpoint('content_mimetype/add')
@db_transaction()
def content_mimetype_add(self, mimetypes, conflict_update=False, db=None,
cur=None):
"""Add mimetypes not present in storage.
Args:
mimetypes (iterable): dictionaries with keys:
- **id** (bytes): sha1 identifier
- **mimetype** (bytes): raw content's mimetype
- **encoding** (bytes): raw content's encoding
- **indexer_configuration_id** (int): tool's id used to
compute the results
- **conflict_update** (bool): Flag to determine if we want to
overwrite (``True``) or skip duplicates (``False``, the
default)
"""
_check_duplicates(mimetypes, 'id')
+ mimetypes.sort(key=lambda m: m['id'])
db.mktemp_content_mimetype(cur)
db.copy_to(mimetypes, 'tmp_content_mimetype',
['id', 'mimetype', 'encoding', 'indexer_configuration_id'],
cur)
db.content_mimetype_add_from_temp(conflict_update, cur)
@remote_api_endpoint('content_mimetype')
@db_transaction_generator()
def content_mimetype_get(self, ids, db=None, cur=None):
"""Retrieve full content mimetype per ids.
Args:
ids (iterable): sha1 identifier
Yields:
mimetypes (iterable): dictionaries with keys:
- **id** (bytes): sha1 identifier
- **mimetype** (bytes): raw content's mimetype
- **encoding** (bytes): raw content's encoding
- **tool** (dict): Tool used to compute the language
"""
for c in db.content_mimetype_get_from_list(ids, cur):
yield converters.db_to_mimetype(
dict(zip(db.content_mimetype_cols, c)))
@remote_api_endpoint('content_language/missing')
@db_transaction_generator()
def content_language_missing(self, languages, db=None, cur=None):
"""List languages missing from storage.
Args:
languages (iterable): dictionaries with keys:
- **id** (bytes): sha1 identifier
- **indexer_configuration_id** (int): tool used to compute
the results
Yields:
an iterable of missing id for the tuple (id,
indexer_configuration_id)
"""
for obj in db.content_language_missing_from_list(languages, cur):
yield obj[0]
@remote_api_endpoint('content_language')
@db_transaction_generator()
def content_language_get(self, ids, db=None, cur=None):
"""Retrieve full content language per ids.
Args:
ids (iterable): sha1 identifier
Yields:
languages (iterable): dictionaries with keys:
- **id** (bytes): sha1 identifier
- **lang** (bytes): raw content's language
- **tool** (dict): Tool used to compute the language
"""
for c in db.content_language_get_from_list(ids, cur):
yield converters.db_to_language(
dict(zip(db.content_language_cols, c)))
@remote_api_endpoint('content_language/add')
@db_transaction()
def content_language_add(self, languages, conflict_update=False, db=None,
cur=None):
"""Add languages not present in storage.
Args:
languages (iterable): dictionaries with keys:
- **id** (bytes): sha1
- **lang** (bytes): language detected
conflict_update (bool): Flag to determine if we want to
overwrite (true) or skip duplicates (false, the
default)
"""
_check_duplicates(languages, 'id')
+ languages.sort(key=lambda m: m['id'])
db.mktemp_content_language(cur)
# empty language is mapped to 'unknown'
db.copy_to(
({
'id': l['id'],
'lang': 'unknown' if not l['lang'] else l['lang'],
'indexer_configuration_id': l['indexer_configuration_id'],
} for l in languages),
'tmp_content_language',
['id', 'lang', 'indexer_configuration_id'], cur)
db.content_language_add_from_temp(conflict_update, cur)
@remote_api_endpoint('content/ctags/missing')
@db_transaction_generator()
def content_ctags_missing(self, ctags, db=None, cur=None):
"""List ctags missing from storage.
Args:
ctags (iterable): dicts with keys:
- **id** (bytes): sha1 identifier
- **indexer_configuration_id** (int): tool used to compute
the results
Yields:
an iterable of missing id for the tuple (id,
indexer_configuration_id)
"""
for obj in db.content_ctags_missing_from_list(ctags, cur):
yield obj[0]
@remote_api_endpoint('content/ctags')
@db_transaction_generator()
def content_ctags_get(self, ids, db=None, cur=None):
"""Retrieve ctags per id.
Args:
ids (iterable): sha1 checksums
Yields:
Dictionaries with keys:
- **id** (bytes): content's identifier
- **name** (str): symbol's name
- **kind** (str): symbol's kind
- **lang** (str): language for that content
- **tool** (dict): tool used to compute the ctags' info
"""
for c in db.content_ctags_get_from_list(ids, cur):
yield converters.db_to_ctags(dict(zip(db.content_ctags_cols, c)))
@remote_api_endpoint('content/ctags/add')
@db_transaction()
def content_ctags_add(self, ctags, conflict_update=False, db=None,
cur=None):
"""Add ctags not present in storage
Args:
ctags (iterable): dictionaries with keys:
- **id** (bytes): sha1
- **ctags** ([list): List of dictionary with keys: name, kind,
line, lang
"""
_check_duplicates(ctags, 'id')
+ ctags.sort(key=lambda m: m['id'])
def _convert_ctags(__ctags):
"""Convert ctags dict to list of ctags.
"""
for ctags in __ctags:
yield from converters.ctags_to_db(ctags)
db.mktemp_content_ctags(cur)
db.copy_to(list(_convert_ctags(ctags)),
tblname='tmp_content_ctags',
columns=['id', 'name', 'kind', 'line',
'lang', 'indexer_configuration_id'],
cur=cur)
db.content_ctags_add_from_temp(conflict_update, cur)
@remote_api_endpoint('content/ctags/search')
@db_transaction_generator()
def content_ctags_search(self, expression,
limit=10, last_sha1=None, db=None, cur=None):
"""Search through content's raw ctags symbols.
Args:
expression (str): Expression to search for
limit (int): Number of rows to return (default to 10).
last_sha1 (str): Offset from which retrieving data (default to '').
Yields:
rows of ctags including id, name, lang, kind, line, etc...
"""
for obj in db.content_ctags_search(expression, last_sha1, limit,
cur=cur):
yield converters.db_to_ctags(dict(zip(db.content_ctags_cols, obj)))
@remote_api_endpoint('content/fossology_license')
@db_transaction_generator()
def content_fossology_license_get(self, ids, db=None, cur=None):
"""Retrieve licenses per id.
Args:
ids (iterable): sha1 checksums
Yields:
`{id: facts}` where `facts` is a dict with the following keys:
- **licenses** ([str]): associated licenses for that content
- **tool** (dict): Tool used to compute the license
"""
d = defaultdict(list)
for c in db.content_fossology_license_get_from_list(ids, cur):
license = dict(zip(db.content_fossology_license_cols, c))
id_ = license['id']
d[id_].append(converters.db_to_fossology_license(license))
for id_, facts in d.items():
yield {id_: facts}
@remote_api_endpoint('content/fossology_license/add')
@db_transaction()
def content_fossology_license_add(self, licenses, conflict_update=False,
db=None, cur=None):
"""Add licenses not present in storage.
Args:
licenses (iterable): dictionaries with keys:
- **id**: sha1
- **licenses** ([bytes]): List of licenses associated to sha1
- **tool** (str): nomossa
conflict_update: Flag to determine if we want to overwrite (true)
or skip duplicates (false, the default)
Returns:
list: content_license entries which failed due to unknown licenses
"""
_check_duplicates(licenses, 'id')
+ licenses.sort(key=lambda m: m['id'])
db.mktemp_content_fossology_license(cur)
db.copy_to(
({
'id': sha1['id'],
'indexer_configuration_id': sha1['indexer_configuration_id'],
'license': license,
} for sha1 in licenses
for license in sha1['licenses']),
tblname='tmp_content_fossology_license',
columns=['id', 'license', 'indexer_configuration_id'],
cur=cur)
db.content_fossology_license_add_from_temp(conflict_update, cur)
@remote_api_endpoint('content/fossology_license/range')
@db_transaction()
def content_fossology_license_get_range(
self, start, end, indexer_configuration_id,
limit=1000, db=None, cur=None):
"""Retrieve licenses within range [start, end] bound by limit.
Args:
**start** (bytes): Starting identifier range (expected smaller
than end)
**end** (bytes): Ending identifier range (expected larger
than start)
**indexer_configuration_id** (int): The tool used to index data
**limit** (int): Limit result (default to 1000)
Raises:
ValueError for limit to None
Returns:
a dict with keys:
- **ids** [bytes]: iterable of content ids within the range.
- **next** (Optional[bytes]): The next range of sha1 starts at
this sha1 if any
"""
return self._content_get_range('fossology_license', start, end,
indexer_configuration_id, limit=limit,
with_textual_data=True, db=db, cur=cur)
@remote_api_endpoint('content_metadata/missing')
@db_transaction_generator()
def content_metadata_missing(self, metadata, db=None, cur=None):
"""List metadata missing from storage.
Args:
metadata (iterable): dictionaries with keys:
- **id** (bytes): sha1 identifier
- **indexer_configuration_id** (int): tool used to compute
the results
Yields:
missing sha1s
"""
for obj in db.content_metadata_missing_from_list(metadata, cur):
yield obj[0]
@remote_api_endpoint('content_metadata')
@db_transaction_generator()
def content_metadata_get(self, ids, db=None, cur=None):
"""Retrieve metadata per id.
Args:
ids (iterable): sha1 checksums
Yields:
dictionaries with the following keys:
id (bytes)
translated_metadata (str): associated metadata
tool (dict): tool used to compute metadata
"""
for c in db.content_metadata_get_from_list(ids, cur):
yield converters.db_to_metadata(
dict(zip(db.content_metadata_cols, c)))
@remote_api_endpoint('content_metadata/add')
@db_transaction()
def content_metadata_add(self, metadata, conflict_update=False, db=None,
cur=None):
"""Add metadata not present in storage.
Args:
metadata (iterable): dictionaries with keys:
- **id**: sha1
- **translated_metadata**: arbitrary dict
conflict_update: Flag to determine if we want to overwrite (true)
or skip duplicates (false, the default)
"""
_check_duplicates(metadata, 'id')
+ metadata.sort(key=lambda m: m['id'])
db.mktemp_content_metadata(cur)
db.copy_to(metadata, 'tmp_content_metadata',
['id', 'translated_metadata', 'indexer_configuration_id'],
cur)
db.content_metadata_add_from_temp(conflict_update, cur)
@remote_api_endpoint('revision_metadata/missing')
@db_transaction_generator()
def revision_metadata_missing(self, metadata, db=None, cur=None):
"""List metadata missing from storage.
Args:
metadata (iterable): dictionaries with keys:
- **id** (bytes): sha1_git revision identifier
- **indexer_configuration_id** (int): tool used to compute
the results
Yields:
missing ids
"""
for obj in db.revision_metadata_missing_from_list(metadata, cur):
yield obj[0]
@remote_api_endpoint('revision_metadata')
@db_transaction_generator()
def revision_metadata_get(self, ids, db=None, cur=None):
"""Retrieve revision metadata per id.
Args:
ids (iterable): sha1 checksums
Yields:
dictionaries with the following keys:
- **id** (bytes)
- **translated_metadata** (str): associated metadata
- **tool** (dict): tool used to compute metadata
- **mappings** (List[str]): list of mappings used to translate
these metadata
"""
for c in db.revision_metadata_get_from_list(ids, cur):
yield converters.db_to_metadata(
dict(zip(db.revision_metadata_cols, c)))
@remote_api_endpoint('revision_metadata/add')
@db_transaction()
def revision_metadata_add(self, metadata, conflict_update=False, db=None,
cur=None):
"""Add metadata not present in storage.
Args:
metadata (iterable): dictionaries with keys:
- **id**: sha1_git of revision
- **translated_metadata**: arbitrary dict
- **indexer_configuration_id**: tool used to compute metadata
- **mappings** (List[str]): list of mappings used to translate
these metadata
conflict_update: Flag to determine if we want to overwrite (true)
or skip duplicates (false, the default)
"""
_check_duplicates(metadata, 'id')
+ metadata.sort(key=lambda m: m['id'])
db.mktemp_revision_metadata(cur)
db.copy_to(metadata, 'tmp_revision_metadata',
['id', 'translated_metadata', 'mappings',
'indexer_configuration_id'],
cur)
db.revision_metadata_add_from_temp(conflict_update, cur)
@remote_api_endpoint('revision_metadata/delete')
@db_transaction()
def revision_metadata_delete(self, entries, db=None, cur=None):
"""Remove revision metadata from the storage.
Args:
entries (dict): dictionaries with the following keys:
- **id** (bytes): revision identifier
- **indexer_configuration_id** (int): tool used to compute
metadata
"""
db.revision_metadata_delete(entries, cur)
@remote_api_endpoint('origin_intrinsic_metadata')
@db_transaction_generator()
def origin_intrinsic_metadata_get(self, ids, db=None, cur=None):
"""Retrieve origin metadata per id.
Args:
ids (iterable): origin identifiers
Yields:
list: dictionaries with the following keys:
- **origin_id** (int)
- **metadata** (str): associated metadata
- **tool** (dict): tool used to compute metadata
- **mappings** (List[str]): list of mappings used to translate
these metadata
"""
for c in db.origin_intrinsic_metadata_get_from_list(ids, cur):
yield converters.db_to_metadata(
dict(zip(db.origin_intrinsic_metadata_cols, c)))
@remote_api_endpoint('origin_intrinsic_metadata/add')
@db_transaction()
def origin_intrinsic_metadata_add(self, metadata,
conflict_update=False, db=None,
cur=None):
"""Add origin metadata not present in storage.
Args:
metadata (iterable): dictionaries with keys:
- **origin_id**: origin identifier
- **from_revision**: sha1 id of the revision used to generate
these metadata.
- **metadata**: arbitrary dict
- **indexer_configuration_id**: tool used to compute metadata
- **mappings** (List[str]): list of mappings used to translate
these metadata
conflict_update: Flag to determine if we want to overwrite (true)
or skip duplicates (false, the default)
"""
_check_duplicates(metadata, 'origin_id')
+ metadata.sort(key=lambda m: m['origin_id'])
db.mktemp_origin_intrinsic_metadata(cur)
db.copy_to(metadata, 'tmp_origin_intrinsic_metadata',
['origin_id', 'metadata', 'indexer_configuration_id',
'from_revision', 'mappings'],
cur)
db.origin_intrinsic_metadata_add_from_temp(conflict_update, cur)
@remote_api_endpoint('origin_intrinsic_metadata/delete')
@db_transaction()
def origin_intrinsic_metadata_delete(
self, entries, db=None, cur=None):
"""Remove origin metadata from the storage.
Args:
entries (dict): dictionaries with the following keys:
- **id** (int): origin identifier
- **indexer_configuration_id** (int): tool used to compute
metadata
"""
db.origin_intrinsic_metadata_delete(entries, cur)
@remote_api_endpoint('origin_intrinsic_metadata/search/fulltext')
@db_transaction_generator()
def origin_intrinsic_metadata_search_fulltext(
self, conjunction, limit=100, db=None, cur=None):
"""Returns the list of origins whose metadata contain all the terms.
Args:
conjunction (List[str]): List of terms to be searched for.
limit (int): The maximum number of results to return
Yields:
list: dictionaries with the following keys:
- **id** (int)
- **metadata** (str): associated metadata
- **tool** (dict): tool used to compute metadata
- **mappings** (List[str]): list of mappings used to translate
these metadata
"""
for c in db.origin_intrinsic_metadata_search_fulltext(
conjunction, limit=limit, cur=cur):
yield converters.db_to_metadata(
dict(zip(db.origin_intrinsic_metadata_cols, c)))
@remote_api_endpoint('origin_intrinsic_metadata/search/by_producer')
@db_transaction_generator()
def origin_intrinsic_metadata_search_by_producer(
self, start=0, end=None, limit=100, ids_only=False,
mappings=None, tool_ids=None,
db=None, cur=None):
"""Returns the list of origins whose metadata contain all the terms.
Args:
start (int): The minimum origin id to return
end (int): The maximum origin id to return
limit (int): The maximum number of results to return
ids_only (bool): Determines whether only origin ids are returned
or the content as well
mappings (List[str]): Returns origins whose intrinsic metadata
were generated using at least one of these mappings.
Yields:
list: list of origin ids (int) if `ids_only=True`, else
dictionaries with the following keys:
- **id** (int)
- **metadata** (str): associated metadata
- **tool** (dict): tool used to compute metadata
- **mappings** (List[str]): list of mappings used to translate
these metadata
"""
res = db.origin_intrinsic_metadata_search_by_producer(
start, end, limit, ids_only, mappings, tool_ids, cur)
if ids_only:
for (origin_id,) in res:
yield origin_id
else:
for c in res:
yield converters.db_to_metadata(
dict(zip(db.origin_intrinsic_metadata_cols, c)))
@remote_api_endpoint('origin_intrinsic_metadata/stats')
@db_transaction()
def origin_intrinsic_metadata_stats(
self, db=None, cur=None):
"""Returns counts of indexed metadata per origins, broken down
into metadata types.
Returns:
dict: dictionary with keys:
- total (int): total number of origins that were indexed
(possibly yielding an empty metadata dictionary)
- non_empty (int): total number of origins that we extracted
a non-empty metadata dictionary from
- per_mapping (dict): a dictionary with mapping names as
keys and number of origins whose indexing used this
mapping. Note that indexing a given origin may use
0, 1, or many mappings.
"""
mapping_names = [m for m in MAPPING_NAMES]
select_parts = []
# Count rows for each mapping
for mapping_name in mapping_names:
select_parts.append((
"sum(case when (mappings @> ARRAY['%s']) "
" then 1 else 0 end)"
) % mapping_name)
# Total
select_parts.append("sum(1)")
# Rows whose metadata has at least one key that is not '@context'
select_parts.append(
"sum(case when ('{}'::jsonb @> (metadata - '@context')) "
" then 0 else 1 end)")
cur.execute('select ' + ', '.join(select_parts)
+ ' from origin_intrinsic_metadata')
results = dict(zip(mapping_names + ['total', 'non_empty'],
cur.fetchone()))
return {
'total': results.pop('total'),
'non_empty': results.pop('non_empty'),
'per_mapping': results,
}
@remote_api_endpoint('indexer_configuration/add')
@db_transaction_generator()
def indexer_configuration_add(self, tools, db=None, cur=None):
"""Add new tools to the storage.
Args:
tools ([dict]): List of dictionary representing tool to
insert in the db. Dictionary with the following keys:
- **tool_name** (str): tool's name
- **tool_version** (str): tool's version
- **tool_configuration** (dict): tool's configuration
(free form dict)
Returns:
List of dict inserted in the db (holding the id key as
well). The order of the list is not guaranteed to match
the order of the initial list.
"""
db.mktemp_indexer_configuration(cur)
db.copy_to(tools, 'tmp_indexer_configuration',
['tool_name', 'tool_version', 'tool_configuration'],
cur)
tools = db.indexer_configuration_add_from_temp(cur)
for line in tools:
yield dict(zip(db.indexer_configuration_cols, line))
@remote_api_endpoint('indexer_configuration/data')
@db_transaction()
def indexer_configuration_get(self, tool, db=None, cur=None):
"""Retrieve tool information.
Args:
tool (dict): Dictionary representing a tool with the
following keys:
- **tool_name** (str): tool's name
- **tool_version** (str): tool's version
- **tool_configuration** (dict): tool's configuration
(free form dict)
Returns:
The same dictionary with an `id` key, None otherwise.
"""
tool_conf = tool['tool_configuration']
if isinstance(tool_conf, dict):
tool_conf = json.dumps(tool_conf)
idx = db.indexer_configuration_get(tool['tool_name'],
tool['tool_version'],
tool_conf)
if not idx:
return None
return dict(zip(db.indexer_configuration_cols, idx))
diff --git a/swh/indexer/tests/storage/test_storage.py b/swh/indexer/tests/storage/test_storage.py
index cce030b..db27ee4 100644
--- a/swh/indexer/tests/storage/test_storage.py
+++ b/swh/indexer/tests/storage/test_storage.py
@@ -1,1766 +1,1964 @@
# Copyright (C) 2015-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import os
-import pytest
+import threading
import unittest
+import pytest
from hypothesis import given
from swh.model.hashutil import hash_to_bytes
from swh.indexer.storage import get_indexer_storage, MAPPING_NAMES
from swh.core.tests.db_testing import SingleDbTestFixture
from swh.indexer.tests.storage.generate_data_test import (
gen_content_mimetypes, gen_content_fossology_licenses
)
from swh.indexer.tests.storage import SQL_DIR
from swh.indexer.metadata_dictionary import MAPPINGS
TOOLS = [
{
'tool_name': 'universal-ctags',
'tool_version': '~git7859817b',
'tool_configuration': {
"command_line": "ctags --fields=+lnz --sort=no --links=no "
"--output-format=json <filepath>"}
},
{
'tool_name': 'swh-metadata-translator',
'tool_version': '0.0.1',
'tool_configuration': {"type": "local", "context": "NpmMapping"},
},
{
'tool_name': 'swh-metadata-detector',
'tool_version': '0.0.1',
'tool_configuration': {
"type": "local", "context": ["NpmMapping", "CodemetaMapping"]},
},
{
'tool_name': 'swh-metadata-detector2',
'tool_version': '0.0.1',
'tool_configuration': {
"type": "local", "context": ["NpmMapping", "CodemetaMapping"]},
},
{
'tool_name': 'file',
'tool_version': '5.22',
'tool_configuration': {"command_line": "file --mime <filepath>"},
},
{
'tool_name': 'pygments',
'tool_version': '2.0.1+dfsg-1.1+deb8u1',
'tool_configuration': {
"type": "library", "debian-package": "python3-pygments"},
},
{
'tool_name': 'pygments',
'tool_version': '2.0.1+dfsg-1.1+deb8u1',
'tool_configuration': {
"type": "library",
"debian-package": "python3-pygments",
"max_content_size": 10240
},
},
{
'tool_name': 'nomos',
'tool_version': '3.1.0rc2-31-ga2cbb8c',
'tool_configuration': {"command_line": "nomossa <filepath>"},
}
]
@pytest.mark.db
class BasePgTestStorage(SingleDbTestFixture):
"""Base test class for most indexer tests.
It adds support for Storage testing to the SingleDbTestFixture class.
It will also build the database from the swh-indexed/sql/*.sql files.
"""
TEST_DB_NAME = 'softwareheritage-test-indexer'
TEST_DB_DUMP = os.path.join(SQL_DIR, '*.sql')
def setUp(self):
super().setUp()
self.storage_config = {
'cls': 'local',
'args': {
'db': 'dbname=%s' % self.TEST_DB_NAME,
},
}
def tearDown(self):
self.reset_storage_tables()
self.storage = None
super().tearDown()
def reset_storage_tables(self):
excluded = {'indexer_configuration'}
self.reset_db_tables(self.TEST_DB_NAME, excluded=excluded)
db = self.test_db[self.TEST_DB_NAME]
db.conn.commit()
def gen_generic_endpoint_tests(endpoint_type, tool_name,
example_data1, example_data2):
def rename(f):
f.__name__ = 'test_' + endpoint_type + f.__name__
return f
def endpoint(self, endpoint_name):
return getattr(self.storage, endpoint_type + '_' + endpoint_name)
@rename
def missing(self):
# given
tool_id = self.tools[tool_name]['id']
query = [
{
'id': self.sha1_1,
'indexer_configuration_id': tool_id,
},
{
'id': self.sha1_2,
'indexer_configuration_id': tool_id,
}]
# when
actual_missing = endpoint(self, 'missing')(query)
# then
self.assertEqual(list(actual_missing), [
self.sha1_1,
self.sha1_2,
])
# given
endpoint(self, 'add')([{
'id': self.sha1_2,
**example_data1,
'indexer_configuration_id': tool_id,
}])
# when
actual_missing = endpoint(self, 'missing')(query)
# then
self.assertEqual(list(actual_missing), [self.sha1_1])
@rename
def add__drop_duplicate(self):
# given
tool_id = self.tools[tool_name]['id']
data_v1 = {
'id': self.sha1_2,
**example_data1,
'indexer_configuration_id': tool_id,
}
# given
endpoint(self, 'add')([data_v1])
# when
actual_data = list(endpoint(self, 'get')([self.sha1_2]))
# then
expected_data_v1 = [{
'id': self.sha1_2,
**example_data1,
'tool': self.tools[tool_name],
}]
self.assertEqual(actual_data, expected_data_v1)
# given
data_v2 = data_v1.copy()
data_v2.update(example_data2)
endpoint(self, 'add')([data_v2])
actual_data = list(endpoint(self, 'get')([self.sha1_2]))
# data did not change as the v2 was dropped.
self.assertEqual(actual_data, expected_data_v1)
@rename
def add__update_in_place_duplicate(self):
# given
tool_id = self.tools[tool_name]['id']
data_v1 = {
'id': self.sha1_2,
**example_data1,
'indexer_configuration_id': tool_id,
}
# given
endpoint(self, 'add')([data_v1])
# when
actual_data = list(endpoint(self, 'get')([self.sha1_2]))
expected_data_v1 = [{
'id': self.sha1_2,
**example_data1,
'tool': self.tools[tool_name],
}]
# then
self.assertEqual(actual_data, expected_data_v1)
# given
data_v2 = data_v1.copy()
data_v2.update(example_data2)
endpoint(self, 'add')([data_v2], conflict_update=True)
actual_data = list(endpoint(self, 'get')([self.sha1_2]))
expected_data_v2 = [{
'id': self.sha1_2,
**example_data2,
'tool': self.tools[tool_name],
}]
# data did change as the v2 was used to overwrite v1
self.assertEqual(actual_data, expected_data_v2)
+ @rename
+ def add__update_in_place_deadlock(self):
+ # given
+ tool_id = self.tools[tool_name]['id']
+
+ hashes = [
+ hash_to_bytes(
+ '34973274ccef6ab4dfaaf86599792fa9c3fe4{:03d}'.format(i))
+ for i in range(1000)]
+
+ data_v1 = [
+ {
+ 'id': hash_,
+ **example_data1,
+ 'indexer_configuration_id': tool_id,
+ }
+ for hash_ in hashes
+ ]
+ data_v2 = [
+ {
+ 'id': hash_,
+ **example_data2,
+ 'indexer_configuration_id': tool_id,
+ }
+ for hash_ in hashes
+ ]
+
+ # Remove one item from each, so that both queries have to succeed for
+ # all items to be in the DB.
+ data_v2a = data_v2[1:]
+ data_v2b = list(reversed(data_v2[0:-1]))
+
+ # given
+ endpoint(self, 'add')(data_v1)
+
+ # when
+ actual_data = list(endpoint(self, 'get')(hashes))
+
+ expected_data_v1 = [
+ {
+ 'id': hash_,
+ **example_data1,
+ 'tool': self.tools[tool_name],
+ }
+ for hash_ in hashes
+ ]
+
+ # then
+ self.assertEqual(actual_data, expected_data_v1)
+
+ # given
+ def f1():
+ endpoint(self, 'add')(data_v2a, conflict_update=True)
+
+ def f2():
+ endpoint(self, 'add')(data_v2b, conflict_update=True)
+
+ t1 = threading.Thread(target=f1)
+ t2 = threading.Thread(target=f2)
+ t2.start()
+ t1.start()
+
+ t1.join()
+ t2.join()
+
+ actual_data = list(endpoint(self, 'get')(hashes))
+
+ expected_data_v2 = [
+ {
+ 'id': hash_,
+ **example_data2,
+ 'tool': self.tools[tool_name],
+ }
+ for hash_ in hashes
+ ]
+
+ self.assertCountEqual(actual_data, expected_data_v2)
+
def add__duplicate_twice(self):
# given
tool_id = self.tools[tool_name]['id']
data_rev1 = {
'id': self.revision_id_2,
**example_data1,
'indexer_configuration_id': tool_id
}
data_rev2 = {
'id': self.revision_id_2,
**example_data2,
'indexer_configuration_id': tool_id
}
# when
endpoint(self, 'add')([data_rev1])
with self.assertRaises(ValueError):
endpoint(self, 'add')(
[data_rev2, data_rev2],
conflict_update=True)
# then
actual_data = list(endpoint(self, 'get')(
[self.revision_id_2, self.revision_id_1]))
expected_data = [{
'id': self.revision_id_2,
**example_data1,
'tool': self.tools[tool_name]
}]
self.assertEqual(actual_data, expected_data)
@rename
def get(self):
# given
tool_id = self.tools[tool_name]['id']
query = [self.sha1_2, self.sha1_1]
data1 = {
'id': self.sha1_2,
**example_data1,
'indexer_configuration_id': tool_id,
}
# when
endpoint(self, 'add')([data1])
# then
actual_data = list(endpoint(self, 'get')(query))
# then
expected_data = [{
'id': self.sha1_2,
**example_data1,
'tool': self.tools[tool_name]
}]
self.assertEqual(actual_data, expected_data)
@rename
def delete(self):
# given
tool_id = self.tools[tool_name]['id']
query = [self.sha1_2, self.sha1_1]
data1 = {
'id': self.sha1_2,
**example_data1,
'indexer_configuration_id': tool_id,
}
# when
endpoint(self, 'add')([data1])
endpoint(self, 'delete')([
{
'id': self.sha1_2,
'indexer_configuration_id': tool_id,
}
])
# then
actual_data = list(endpoint(self, 'get')(query))
# then
self.assertEqual(actual_data, [])
@rename
def delete_nonexisting(self):
tool_id = self.tools[tool_name]['id']
endpoint(self, 'delete')([
{
'id': self.sha1_2,
'indexer_configuration_id': tool_id,
}
])
return (
missing,
add__drop_duplicate,
add__update_in_place_duplicate,
+ add__update_in_place_deadlock,
add__duplicate_twice,
get,
delete,
delete_nonexisting,
)
class CommonTestStorage:
"""Base class for Indexer Storage testing.
"""
def setUp(self):
super().setUp()
self.storage = get_indexer_storage(**self.storage_config)
tools = self.storage.indexer_configuration_add(TOOLS)
self.tools = {}
for tool in tools:
tool_name = tool['tool_name']
while tool_name in self.tools:
tool_name += '_'
self.tools[tool_name] = {
'id': tool['id'],
'name': tool['tool_name'],
'version': tool['tool_version'],
'configuration': tool['tool_configuration'],
}
self.sha1_1 = hash_to_bytes('34973274ccef6ab4dfaaf86599792fa9c3fe4689')
self.sha1_2 = hash_to_bytes('61c2b3a30496d329e21af70dd2d7e097046d07b7')
self.revision_id_1 = hash_to_bytes(
'7026b7c1a2af56521e951c01ed20f255fa054238')
self.revision_id_2 = hash_to_bytes(
'7026b7c1a2af56521e9587659012345678904321')
self.revision_id_3 = hash_to_bytes(
'7026b7c1a2af56521e9587659012345678904320')
self.origin_id_1 = 44434341
self.origin_id_2 = 44434342
self.origin_id_3 = 54974445
def test_check_config(self):
self.assertTrue(self.storage.check_config(check_write=True))
self.assertTrue(self.storage.check_config(check_write=False))
# generate content_mimetype tests
(
test_content_mimetype_missing,
test_content_mimetype_add__drop_duplicate,
test_content_mimetype_add__update_in_place_duplicate,
+ test_content_mimetype_add__update_in_place_deadlock,
test_content_mimetype_add__duplicate_twice,
test_content_mimetype_get,
_, # content_mimetype_detete,
_, # content_mimetype_detete_nonexisting,
) = gen_generic_endpoint_tests(
endpoint_type='content_mimetype',
tool_name='file',
example_data1={
'mimetype': 'text/plain',
'encoding': 'utf-8',
},
example_data2={
'mimetype': 'text/html',
'encoding': 'us-ascii',
},
)
# content_language tests
(
test_content_language_missing,
test_content_language_add__drop_duplicate,
test_content_language_add__update_in_place_duplicate,
+ test_content_language_add__update_in_place_deadlock,
test_content_language_add__duplicate_twice,
test_content_language_get,
_, # test_content_language_delete,
_, # test_content_language_delete_nonexisting,
) = gen_generic_endpoint_tests(
endpoint_type='content_language',
tool_name='pygments',
example_data1={
'lang': 'haskell',
},
example_data2={
'lang': 'common-lisp',
},
)
# content_ctags tests
(
test_content_ctags_missing,
# the following tests are disabled because CTAGS behave differently
_, # test_content_ctags_add__drop_duplicate,
_, # test_content_ctags_add__update_in_place_duplicate,
+ _, # test_content_ctags_add__update_in_place_deadlock,
_, # test_content_ctags_add__duplicate_twice,
_, # test_content_ctags_get,
_, # test_content_ctags_delete,
_, # test_content_ctags_delete_nonexisting,
) = gen_generic_endpoint_tests(
endpoint_type='content_ctags',
tool_name='universal-ctags',
example_data1={
'ctags': [{
'name': 'done',
'kind': 'variable',
'line': 119,
'lang': 'OCaml',
}]
},
example_data2={
'ctags': [
{
'name': 'done',
'kind': 'variable',
'line': 100,
'lang': 'Python',
},
{
'name': 'main',
'kind': 'function',
'line': 119,
'lang': 'Python',
}]
},
)
def test_content_ctags_search(self):
# 1. given
tool = self.tools['universal-ctags']
tool_id = tool['id']
ctag1 = {
'id': self.sha1_1,
'indexer_configuration_id': tool_id,
'ctags': [
{
'name': 'hello',
'kind': 'function',
'line': 133,
'lang': 'Python',
},
{
'name': 'counter',
'kind': 'variable',
'line': 119,
'lang': 'Python',
},
{
'name': 'hello',
'kind': 'variable',
'line': 210,
'lang': 'Python',
},
]
}
ctag2 = {
'id': self.sha1_2,
'indexer_configuration_id': tool_id,
'ctags': [
{
'name': 'hello',
'kind': 'variable',
'line': 100,
'lang': 'C',
},
{
'name': 'result',
'kind': 'variable',
'line': 120,
'lang': 'C',
},
]
}
self.storage.content_ctags_add([ctag1, ctag2])
# 1. when
actual_ctags = list(self.storage.content_ctags_search('hello',
limit=1))
# 1. then
self.assertEqual(actual_ctags, [
{
'id': ctag1['id'],
'tool': tool,
'name': 'hello',
'kind': 'function',
'line': 133,
'lang': 'Python',
}
])
# 2. when
actual_ctags = list(self.storage.content_ctags_search(
'hello',
limit=1,
last_sha1=ctag1['id']))
# 2. then
self.assertEqual(actual_ctags, [
{
'id': ctag2['id'],
'tool': tool,
'name': 'hello',
'kind': 'variable',
'line': 100,
'lang': 'C',
}
])
# 3. when
actual_ctags = list(self.storage.content_ctags_search('hello'))
# 3. then
self.assertEqual(actual_ctags, [
{
'id': ctag1['id'],
'tool': tool,
'name': 'hello',
'kind': 'function',
'line': 133,
'lang': 'Python',
},
{
'id': ctag1['id'],
'tool': tool,
'name': 'hello',
'kind': 'variable',
'line': 210,
'lang': 'Python',
},
{
'id': ctag2['id'],
'tool': tool,
'name': 'hello',
'kind': 'variable',
'line': 100,
'lang': 'C',
},
])
# 4. when
actual_ctags = list(self.storage.content_ctags_search('counter'))
# then
self.assertEqual(actual_ctags, [{
'id': ctag1['id'],
'tool': tool,
'name': 'counter',
'kind': 'variable',
'line': 119,
'lang': 'Python',
}])
# 5. when
actual_ctags = list(self.storage.content_ctags_search('result',
limit=1))
# then
self.assertEqual(actual_ctags, [{
'id': ctag2['id'],
'tool': tool,
'name': 'result',
'kind': 'variable',
'line': 120,
'lang': 'C',
}])
def test_content_ctags_search_no_result(self):
actual_ctags = list(self.storage.content_ctags_search('counter'))
self.assertEqual(actual_ctags, [])
def test_content_ctags_add__add_new_ctags_added(self):
# given
tool = self.tools['universal-ctags']
tool_id = tool['id']
ctag_v1 = {
'id': self.sha1_2,
'indexer_configuration_id': tool_id,
'ctags': [{
'name': 'done',
'kind': 'variable',
'line': 100,
'lang': 'Scheme',
}]
}
# given
self.storage.content_ctags_add([ctag_v1])
self.storage.content_ctags_add([ctag_v1]) # conflict does nothing
# when
actual_ctags = list(self.storage.content_ctags_get(
[self.sha1_2]))
# then
expected_ctags = [{
'id': self.sha1_2,
'name': 'done',
'kind': 'variable',
'line': 100,
'lang': 'Scheme',
'tool': tool,
}]
self.assertEqual(actual_ctags, expected_ctags)
# given
ctag_v2 = ctag_v1.copy()
ctag_v2.update({
'ctags': [
{
'name': 'defn',
'kind': 'function',
'line': 120,
'lang': 'Scheme',
}
]
})
self.storage.content_ctags_add([ctag_v2])
expected_ctags = [
{
'id': self.sha1_2,
'name': 'done',
'kind': 'variable',
'line': 100,
'lang': 'Scheme',
'tool': tool,
}, {
'id': self.sha1_2,
'name': 'defn',
'kind': 'function',
'line': 120,
'lang': 'Scheme',
'tool': tool,
}
]
actual_ctags = list(self.storage.content_ctags_get(
[self.sha1_2]))
self.assertEqual(actual_ctags, expected_ctags)
def test_content_ctags_add__update_in_place(self):
# given
tool = self.tools['universal-ctags']
tool_id = tool['id']
ctag_v1 = {
'id': self.sha1_2,
'indexer_configuration_id': tool_id,
'ctags': [{
'name': 'done',
'kind': 'variable',
'line': 100,
'lang': 'Scheme',
}]
}
# given
self.storage.content_ctags_add([ctag_v1])
# when
actual_ctags = list(self.storage.content_ctags_get(
[self.sha1_2]))
# then
expected_ctags = [
{
'id': self.sha1_2,
'name': 'done',
'kind': 'variable',
'line': 100,
'lang': 'Scheme',
'tool': tool
}
]
self.assertEqual(actual_ctags, expected_ctags)
# given
ctag_v2 = ctag_v1.copy()
ctag_v2.update({
'ctags': [
{
'name': 'done',
'kind': 'variable',
'line': 100,
'lang': 'Scheme',
},
{
'name': 'defn',
'kind': 'function',
'line': 120,
'lang': 'Scheme',
}
]
})
self.storage.content_ctags_add([ctag_v2], conflict_update=True)
actual_ctags = list(self.storage.content_ctags_get(
[self.sha1_2]))
# ctag did change as the v2 was used to overwrite v1
expected_ctags = [
{
'id': self.sha1_2,
'name': 'done',
'kind': 'variable',
'line': 100,
'lang': 'Scheme',
'tool': tool,
},
{
'id': self.sha1_2,
'name': 'defn',
'kind': 'function',
'line': 120,
'lang': 'Scheme',
'tool': tool,
}
]
self.assertEqual(actual_ctags, expected_ctags)
# content_fossology_license tests
(
_, # The endpoint content_fossology_license_missing does not exist
# the following tests are disabled because fossology_license tests
# behave differently
_, # test_content_fossology_license_add__drop_duplicate,
_, # test_content_fossology_license_add__update_in_place_duplicate,
+ _, # test_content_fossology_license_add__update_in_place_deadlock,
_, # test_content_metadata_add__duplicate_twice,
_, # test_content_fossology_license_get,
_, # test_content_fossology_license_delete,
_, # test_content_fossology_license_delete_nonexisting,
) = gen_generic_endpoint_tests(
endpoint_type='content_fossology_license',
tool_name='nomos',
example_data1={
'licenses': ['Apache-2.0'],
},
example_data2={
'licenses': ['BSD-2-Clause'],
},
)
def test_content_fossology_license_add__new_license_added(self):
# given
tool = self.tools['nomos']
tool_id = tool['id']
license_v1 = {
'id': self.sha1_1,
'licenses': ['Apache-2.0'],
'indexer_configuration_id': tool_id,
}
# given
self.storage.content_fossology_license_add([license_v1])
# conflict does nothing
self.storage.content_fossology_license_add([license_v1])
# when
actual_licenses = list(self.storage.content_fossology_license_get(
[self.sha1_1]))
# then
expected_license = {
self.sha1_1: [{
'licenses': ['Apache-2.0'],
'tool': tool,
}]
}
self.assertEqual(actual_licenses, [expected_license])
# given
license_v2 = license_v1.copy()
license_v2.update({
'licenses': ['BSD-2-Clause'],
})
self.storage.content_fossology_license_add([license_v2])
actual_licenses = list(self.storage.content_fossology_license_get(
[self.sha1_1]))
expected_license = {
self.sha1_1: [{
'licenses': ['Apache-2.0', 'BSD-2-Clause'],
'tool': tool
}]
}
# license did not change as the v2 was dropped.
self.assertEqual(actual_licenses, [expected_license])
# content_metadata tests
(
test_content_metadata_missing,
test_content_metadata_add__drop_duplicate,
test_content_metadata_add__update_in_place_duplicate,
+ test_content_metadata_add__update_in_place_deadlock,
test_content_metadata_add__duplicate_twice,
test_content_metadata_get,
_, # test_content_metadata_delete,
_, # test_content_metadata_delete_nonexisting,
) = gen_generic_endpoint_tests(
endpoint_type='content_metadata',
tool_name='swh-metadata-detector',
example_data1={
'translated_metadata': {
'other': {},
'codeRepository': {
'type': 'git',
'url': 'https://github.com/moranegg/metadata_test'
},
'description': 'Simple package.json test for indexer',
'name': 'test_metadata',
'version': '0.0.1'
},
},
example_data2={
'translated_metadata': {
'other': {},
'name': 'test_metadata',
'version': '0.0.1'
},
},
)
# revision_metadata tests
(
test_revision_metadata_missing,
test_revision_metadata_add__drop_duplicate,
test_revision_metadata_add__update_in_place_duplicate,
+ test_revision_metadata_add__update_in_place_deadlock,
test_revision_metadata_add__duplicate_twice,
test_revision_metadata_get,
test_revision_metadata_delete,
test_revision_metadata_delete_nonexisting,
) = gen_generic_endpoint_tests(
endpoint_type='revision_metadata',
tool_name='swh-metadata-detector',
example_data1={
'translated_metadata': {
'other': {},
'codeRepository': {
'type': 'git',
'url': 'https://github.com/moranegg/metadata_test'
},
'description': 'Simple package.json test for indexer',
'name': 'test_metadata',
'version': '0.0.1'
},
'mappings': ['mapping1'],
},
example_data2={
'translated_metadata': {
'other': {},
'name': 'test_metadata',
'version': '0.0.1'
},
'mappings': ['mapping2'],
},
)
def test_origin_intrinsic_metadata_get(self):
# given
tool_id = self.tools['swh-metadata-detector']['id']
metadata = {
'version': None,
'name': None,
}
metadata_rev = {
'id': self.revision_id_2,
'translated_metadata': metadata,
'mappings': ['mapping1'],
'indexer_configuration_id': tool_id,
}
metadata_origin = {
'origin_id': self.origin_id_1,
'metadata': metadata,
'indexer_configuration_id': tool_id,
'mappings': ['mapping1'],
'from_revision': self.revision_id_2,
}
# when
self.storage.revision_metadata_add([metadata_rev])
self.storage.origin_intrinsic_metadata_add([metadata_origin])
# then
actual_metadata = list(self.storage.origin_intrinsic_metadata_get(
[self.origin_id_1, 42]))
expected_metadata = [{
'origin_id': self.origin_id_1,
'metadata': metadata,
'tool': self.tools['swh-metadata-detector'],
'from_revision': self.revision_id_2,
'mappings': ['mapping1'],
}]
self.assertEqual(actual_metadata, expected_metadata)
def test_origin_intrinsic_metadata_delete(self):
# given
tool_id = self.tools['swh-metadata-detector']['id']
metadata = {
'version': None,
'name': None,
}
metadata_rev = {
'id': self.revision_id_2,
'translated_metadata': metadata,
'mappings': ['mapping1'],
'indexer_configuration_id': tool_id,
}
metadata_origin = {
'origin_id': self.origin_id_1,
'metadata': metadata,
'indexer_configuration_id': tool_id,
'mappings': ['mapping1'],
'from_revision': self.revision_id_2,
}
+ metadata_origin2 = metadata_origin.copy()
+ metadata_origin2['origin_id'] = self.origin_id_2
# when
self.storage.revision_metadata_add([metadata_rev])
- self.storage.origin_intrinsic_metadata_add([metadata_origin])
+ self.storage.origin_intrinsic_metadata_add([
+ metadata_origin, metadata_origin2])
+
self.storage.origin_intrinsic_metadata_delete([
{
'origin_id': self.origin_id_1,
'indexer_configuration_id': tool_id
}
])
# then
actual_metadata = list(self.storage.origin_intrinsic_metadata_get(
- [self.origin_id_1, 42]))
-
- self.assertEqual(actual_metadata, [])
+ [self.origin_id_1, self.origin_id_2, 42]))
+ for item in actual_metadata:
+ item['indexer_configuration_id'] = item.pop('tool')['id']
+ self.assertEqual(actual_metadata, [metadata_origin2])
def test_origin_intrinsic_metadata_delete_nonexisting(self):
tool_id = self.tools['swh-metadata-detector']['id']
self.storage.origin_intrinsic_metadata_delete([
{
'origin_id': self.origin_id_1,
'indexer_configuration_id': tool_id
}
])
def test_origin_intrinsic_metadata_add_drop_duplicate(self):
# given
tool_id = self.tools['swh-metadata-detector']['id']
metadata_v1 = {
'version': None,
'name': None,
}
metadata_rev_v1 = {
'id': self.revision_id_1,
'translated_metadata': metadata_v1.copy(),
'mappings': [],
'indexer_configuration_id': tool_id,
}
metadata_origin_v1 = {
'origin_id': self.origin_id_1,
'metadata': metadata_v1.copy(),
'indexer_configuration_id': tool_id,
'mappings': [],
'from_revision': self.revision_id_1,
}
# given
self.storage.revision_metadata_add([metadata_rev_v1])
self.storage.origin_intrinsic_metadata_add([metadata_origin_v1])
# when
actual_metadata = list(self.storage.origin_intrinsic_metadata_get(
[self.origin_id_1, 42]))
expected_metadata_v1 = [{
'origin_id': self.origin_id_1,
'metadata': metadata_v1,
'tool': self.tools['swh-metadata-detector'],
'from_revision': self.revision_id_1,
'mappings': [],
}]
self.assertEqual(actual_metadata, expected_metadata_v1)
# given
metadata_v2 = metadata_v1.copy()
metadata_v2.update({
'name': 'test_metadata',
'author': 'MG',
})
metadata_rev_v2 = metadata_rev_v1.copy()
metadata_origin_v2 = metadata_origin_v1.copy()
metadata_rev_v2['translated_metadata'] = metadata_v2
metadata_origin_v2['translated_metadata'] = metadata_v2
self.storage.revision_metadata_add([metadata_rev_v2])
self.storage.origin_intrinsic_metadata_add([metadata_origin_v2])
# then
actual_metadata = list(self.storage.origin_intrinsic_metadata_get(
[self.origin_id_1]))
# metadata did not change as the v2 was dropped.
self.assertEqual(actual_metadata, expected_metadata_v1)
def test_origin_intrinsic_metadata_add_update_in_place_duplicate(self):
# given
tool_id = self.tools['swh-metadata-detector']['id']
metadata_v1 = {
'version': None,
'name': None,
}
metadata_rev_v1 = {
'id': self.revision_id_2,
'translated_metadata': metadata_v1,
'mappings': [],
'indexer_configuration_id': tool_id,
}
metadata_origin_v1 = {
'origin_id': self.origin_id_1,
'metadata': metadata_v1.copy(),
'indexer_configuration_id': tool_id,
'mappings': [],
'from_revision': self.revision_id_2,
}
# given
self.storage.revision_metadata_add([metadata_rev_v1])
self.storage.origin_intrinsic_metadata_add([metadata_origin_v1])
# when
actual_metadata = list(self.storage.origin_intrinsic_metadata_get(
[self.origin_id_1]))
# then
expected_metadata_v1 = [{
'origin_id': self.origin_id_1,
'metadata': metadata_v1,
'tool': self.tools['swh-metadata-detector'],
'from_revision': self.revision_id_2,
'mappings': [],
}]
self.assertEqual(actual_metadata, expected_metadata_v1)
# given
metadata_v2 = metadata_v1.copy()
metadata_v2.update({
'name': 'test_update_duplicated_metadata',
'author': 'MG',
})
metadata_rev_v2 = metadata_rev_v1.copy()
metadata_origin_v2 = metadata_origin_v1.copy()
metadata_rev_v2['translated_metadata'] = metadata_v2
metadata_origin_v2['metadata'] = metadata_v2
self.storage.revision_metadata_add([metadata_rev_v2],
conflict_update=True)
self.storage.origin_intrinsic_metadata_add([metadata_origin_v2],
conflict_update=True)
actual_metadata = list(self.storage.origin_intrinsic_metadata_get(
[self.origin_id_1]))
expected_metadata_v2 = [{
'origin_id': self.origin_id_1,
'metadata': metadata_v2,
'tool': self.tools['swh-metadata-detector'],
'from_revision': self.revision_id_2,
'mappings': [],
}]
# metadata did change as the v2 was used to overwrite v1
self.assertEqual(actual_metadata, expected_metadata_v2)
+ def test_origin_intrinsic_metadata_add__update_in_place_deadlock(self):
+ # given
+ tool_id = self.tools['swh-metadata-detector']['id']
+
+ ids = list(range(1000))
+
+ example_data1 = {
+ 'metadata': {
+ 'version': None,
+ 'name': None,
+ },
+ 'mappings': [],
+ }
+ example_data2 = {
+ 'metadata': {
+ 'version': 'v1.1.1',
+ 'name': 'foo',
+ },
+ 'mappings': [],
+ }
+
+ metadata_rev_v1 = {
+ 'id': self.revision_id_2,
+ 'translated_metadata': {
+ 'version': None,
+ 'name': None,
+ },
+ 'mappings': [],
+ 'indexer_configuration_id': tool_id,
+ }
+
+ data_v1 = [
+ {
+ 'origin_id': id_,
+ 'from_revision': self.revision_id_2,
+ **example_data1,
+ 'indexer_configuration_id': tool_id,
+ }
+ for id_ in ids
+ ]
+ data_v2 = [
+ {
+ 'origin_id': id_,
+ 'from_revision': self.revision_id_2,
+ **example_data2,
+ 'indexer_configuration_id': tool_id,
+ }
+ for id_ in ids
+ ]
+
+ # Remove one item from each, so that both queries have to succeed for
+ # all items to be in the DB.
+ data_v2a = data_v2[1:]
+ data_v2b = list(reversed(data_v2[0:-1]))
+
+ # given
+ self.storage.revision_metadata_add([metadata_rev_v1])
+ self.storage.origin_intrinsic_metadata_add(data_v1)
+
+ # when
+ actual_data = list(self.storage.origin_intrinsic_metadata_get(ids))
+
+ expected_data_v1 = [
+ {
+ 'origin_id': id_,
+ 'from_revision': self.revision_id_2,
+ **example_data1,
+ 'tool': self.tools['swh-metadata-detector'],
+ }
+ for id_ in ids
+ ]
+
+ # then
+ self.assertEqual(actual_data, expected_data_v1)
+
+ # given
+ def f1():
+ self.storage.origin_intrinsic_metadata_add(
+ data_v2a, conflict_update=True)
+
+ def f2():
+ self.storage.origin_intrinsic_metadata_add(
+ data_v2b, conflict_update=True)
+
+ t1 = threading.Thread(target=f1)
+ t2 = threading.Thread(target=f2)
+ t2.start()
+ t1.start()
+
+ t1.join()
+ t2.join()
+
+ actual_data = list(self.storage.origin_intrinsic_metadata_get(ids))
+
+ expected_data_v2 = [
+ {
+ 'origin_id': id_,
+ 'from_revision': self.revision_id_2,
+ **example_data2,
+ 'tool': self.tools['swh-metadata-detector'],
+ }
+ for id_ in ids
+ ]
+
+ self.maxDiff = None
+ self.assertCountEqual(actual_data, expected_data_v2)
+
def test_origin_intrinsic_metadata_add__duplicate_twice(self):
# given
tool_id = self.tools['swh-metadata-detector']['id']
metadata = {
'developmentStatus': None,
'name': None,
}
metadata_rev = {
'id': self.revision_id_2,
'translated_metadata': metadata,
'mappings': ['mapping1'],
'indexer_configuration_id': tool_id,
}
metadata_origin = {
'origin_id': self.origin_id_1,
'metadata': metadata,
'indexer_configuration_id': tool_id,
'mappings': ['mapping1'],
'from_revision': self.revision_id_2,
}
# when
self.storage.revision_metadata_add([metadata_rev])
with self.assertRaises(ValueError):
self.storage.origin_intrinsic_metadata_add([
metadata_origin, metadata_origin])
def test_origin_intrinsic_metadata_search_fulltext(self):
# given
tool_id = self.tools['swh-metadata-detector']['id']
metadata1 = {
'author': 'John Doe',
}
metadata1_rev = {
'id': self.revision_id_1,
'translated_metadata': metadata1,
'mappings': [],
'indexer_configuration_id': tool_id,
}
metadata1_origin = {
'origin_id': self.origin_id_1,
'metadata': metadata1,
'mappings': [],
'indexer_configuration_id': tool_id,
'from_revision': self.revision_id_1,
}
metadata2 = {
'author': 'Jane Doe',
}
metadata2_rev = {
'id': self.revision_id_2,
'translated_metadata': metadata2,
'mappings': [],
'indexer_configuration_id': tool_id,
}
metadata2_origin = {
'origin_id': self.origin_id_2,
'metadata': metadata2,
'mappings': [],
'indexer_configuration_id': tool_id,
'from_revision': self.revision_id_2,
}
# when
self.storage.revision_metadata_add([metadata1_rev])
self.storage.origin_intrinsic_metadata_add([metadata1_origin])
self.storage.revision_metadata_add([metadata2_rev])
self.storage.origin_intrinsic_metadata_add([metadata2_origin])
# then
search = self.storage.origin_intrinsic_metadata_search_fulltext
self.assertCountEqual(
[res['origin_id'] for res in search(['Doe'])],
[self.origin_id_1, self.origin_id_2])
self.assertEqual(
[res['origin_id'] for res in search(['John', 'Doe'])],
[self.origin_id_1])
self.assertEqual(
[res['origin_id'] for res in search(['John'])],
[self.origin_id_1])
self.assertEqual(
[res['origin_id'] for res in search(['John', 'Jane'])],
[])
def test_origin_intrinsic_metadata_search_fulltext_rank(self):
# given
tool_id = self.tools['swh-metadata-detector']['id']
# The following authors have "Random Person" to add some more content
# to the JSON data, to work around normalization quirks when there
# are few words (rank/(1+ln(nb_words)) is very sensitive to nb_words
# for small values of nb_words).
metadata1 = {
'author': [
'Random Person',
'John Doe',
'Jane Doe',
]
}
metadata1_rev = {
'id': self.revision_id_1,
'translated_metadata': metadata1,
'mappings': [],
'indexer_configuration_id': tool_id,
}
metadata1_origin = {
'origin_id': self.origin_id_1,
'metadata': metadata1,
'mappings': [],
'indexer_configuration_id': tool_id,
'from_revision': self.revision_id_1,
}
metadata2 = {
'author': [
'Random Person',
'Jane Doe',
]
}
metadata2_rev = {
'id': self.revision_id_2,
'translated_metadata': metadata2,
'mappings': [],
'indexer_configuration_id': tool_id,
}
metadata2_origin = {
'origin_id': self.origin_id_2,
'metadata': metadata2,
'mappings': [],
'indexer_configuration_id': tool_id,
'from_revision': self.revision_id_2,
}
# when
self.storage.revision_metadata_add([metadata1_rev])
self.storage.origin_intrinsic_metadata_add([metadata1_origin])
self.storage.revision_metadata_add([metadata2_rev])
self.storage.origin_intrinsic_metadata_add([metadata2_origin])
# then
search = self.storage.origin_intrinsic_metadata_search_fulltext
self.assertEqual(
[res['origin_id'] for res in search(['Doe'])],
[self.origin_id_1, self.origin_id_2])
self.assertEqual(
[res['origin_id'] for res in search(['Doe'], limit=1)],
[self.origin_id_1])
self.assertEqual(
[res['origin_id'] for res in search(['John'])],
[self.origin_id_1])
self.assertEqual(
[res['origin_id'] for res in search(['Jane'])],
[self.origin_id_2, self.origin_id_1])
self.assertEqual(
[res['origin_id'] for res in search(['John', 'Jane'])],
[self.origin_id_1])
def _fill_origin_intrinsic_metadata(self):
tool1_id = self.tools['swh-metadata-detector']['id']
tool2_id = self.tools['swh-metadata-detector2']['id']
metadata1 = {
'@context': 'foo',
'author': 'John Doe',
}
metadata1_rev = {
'id': self.revision_id_1,
'translated_metadata': metadata1,
'mappings': ['npm'],
'indexer_configuration_id': tool1_id,
}
metadata1_origin = {
'origin_id': self.origin_id_1,
'metadata': metadata1,
'mappings': ['npm'],
'indexer_configuration_id': tool1_id,
'from_revision': self.revision_id_1,
}
metadata2 = {
'@context': 'foo',
'author': 'Jane Doe',
}
metadata2_rev = {
'id': self.revision_id_2,
'translated_metadata': metadata2,
'mappings': ['npm', 'gemspec'],
'indexer_configuration_id': tool2_id,
}
metadata2_origin = {
'origin_id': self.origin_id_2,
'metadata': metadata2,
'mappings': ['npm', 'gemspec'],
'indexer_configuration_id': tool2_id,
'from_revision': self.revision_id_2,
}
metadata3 = {
'@context': 'foo',
}
metadata3_rev = {
'id': self.revision_id_3,
'translated_metadata': metadata3,
'mappings': ['npm', 'gemspec'],
'indexer_configuration_id': tool2_id,
}
metadata3_origin = {
'origin_id': self.origin_id_3,
'metadata': metadata3,
'mappings': ['pkg-info'],
'indexer_configuration_id': tool2_id,
'from_revision': self.revision_id_3,
}
self.storage.revision_metadata_add([metadata1_rev])
self.storage.origin_intrinsic_metadata_add([metadata1_origin])
self.storage.revision_metadata_add([metadata2_rev])
self.storage.origin_intrinsic_metadata_add([metadata2_origin])
self.storage.revision_metadata_add([metadata3_rev])
self.storage.origin_intrinsic_metadata_add([metadata3_origin])
def test_origin_intrinsic_metadata_search_by_producer(self):
self._fill_origin_intrinsic_metadata()
tool1 = self.tools['swh-metadata-detector']
tool2 = self.tools['swh-metadata-detector2']
endpoint = self.storage.origin_intrinsic_metadata_search_by_producer
# test pagination
self.assertCountEqual(
endpoint(ids_only=True),
[self.origin_id_1, self.origin_id_2, self.origin_id_3])
self.assertCountEqual(
endpoint(start=0, ids_only=True),
[self.origin_id_1, self.origin_id_2, self.origin_id_3])
self.assertCountEqual(
endpoint(start=0, limit=2, ids_only=True),
[self.origin_id_1, self.origin_id_2])
self.assertCountEqual(
endpoint(start=self.origin_id_1+1, ids_only=True),
[self.origin_id_2, self.origin_id_3])
self.assertCountEqual(
endpoint(start=self.origin_id_1+1, end=self.origin_id_3-1,
ids_only=True),
[self.origin_id_2])
# test mappings filtering
self.assertCountEqual(
endpoint(mappings=['npm'], ids_only=True),
[self.origin_id_1, self.origin_id_2])
self.assertCountEqual(
endpoint(mappings=['npm', 'gemspec'], ids_only=True),
[self.origin_id_1, self.origin_id_2])
self.assertCountEqual(
endpoint(mappings=['gemspec'], ids_only=True),
[self.origin_id_2])
self.assertCountEqual(
endpoint(mappings=['pkg-info'], ids_only=True),
[self.origin_id_3])
self.assertCountEqual(
endpoint(mappings=['foobar'], ids_only=True),
[])
# test pagination + mappings
self.assertCountEqual(
endpoint(mappings=['npm'], limit=1, ids_only=True),
[self.origin_id_1])
# test tool filtering
self.assertCountEqual(
endpoint(tool_ids=[tool1['id']], ids_only=True),
[self.origin_id_1])
self.assertCountEqual(
endpoint(tool_ids=[tool2['id']], ids_only=True),
[self.origin_id_2, self.origin_id_3])
self.assertCountEqual(
endpoint(tool_ids=[tool1['id'], tool2['id']], ids_only=True),
[self.origin_id_1, self.origin_id_2, self.origin_id_3])
# test ids_only=False
self.assertEqual(list(endpoint(mappings=['gemspec'])), [{
'origin_id': self.origin_id_2,
'metadata': {
'@context': 'foo',
'author': 'Jane Doe',
},
'mappings': ['npm', 'gemspec'],
'tool': tool2,
'from_revision': self.revision_id_2,
}])
def test_origin_intrinsic_metadata_stats(self):
self._fill_origin_intrinsic_metadata()
result = self.storage.origin_intrinsic_metadata_stats()
self.assertEqual(result, {
'per_mapping': {
'gemspec': 1,
'npm': 2,
'pkg-info': 1,
'codemeta': 0,
'maven': 0,
},
'total': 3,
'non_empty': 2,
})
def test_indexer_configuration_add(self):
tool = {
'tool_name': 'some-unknown-tool',
'tool_version': 'some-version',
'tool_configuration': {"debian-package": "some-package"},
}
actual_tool = self.storage.indexer_configuration_get(tool)
self.assertIsNone(actual_tool) # does not exist
# add it
actual_tools = list(self.storage.indexer_configuration_add([tool]))
self.assertEqual(len(actual_tools), 1)
actual_tool = actual_tools[0]
self.assertIsNotNone(actual_tool) # now it exists
new_id = actual_tool.pop('id')
self.assertEqual(actual_tool, tool)
actual_tools2 = list(self.storage.indexer_configuration_add([tool]))
actual_tool2 = actual_tools2[0]
self.assertIsNotNone(actual_tool2) # now it exists
new_id2 = actual_tool2.pop('id')
self.assertEqual(new_id, new_id2)
self.assertEqual(actual_tool, actual_tool2)
def test_indexer_configuration_add_multiple(self):
tool = {
'tool_name': 'some-unknown-tool',
'tool_version': 'some-version',
'tool_configuration': {"debian-package": "some-package"},
}
actual_tools = list(self.storage.indexer_configuration_add([tool]))
self.assertEqual(len(actual_tools), 1)
new_tools = [tool, {
'tool_name': 'yet-another-tool',
'tool_version': 'version',
'tool_configuration': {},
}]
actual_tools = list(self.storage.indexer_configuration_add(new_tools))
self.assertEqual(len(actual_tools), 2)
# order not guaranteed, so we iterate over results to check
for tool in actual_tools:
_id = tool.pop('id')
self.assertIsNotNone(_id)
self.assertIn(tool, new_tools)
def test_indexer_configuration_get_missing(self):
tool = {
'tool_name': 'unknown-tool',
'tool_version': '3.1.0rc2-31-ga2cbb8c',
'tool_configuration': {"command_line": "nomossa <filepath>"},
}
actual_tool = self.storage.indexer_configuration_get(tool)
self.assertIsNone(actual_tool)
def test_indexer_configuration_get(self):
tool = {
'tool_name': 'nomos',
'tool_version': '3.1.0rc2-31-ga2cbb8c',
'tool_configuration': {"command_line": "nomossa <filepath>"},
}
self.storage.indexer_configuration_add([tool])
actual_tool = self.storage.indexer_configuration_get(tool)
expected_tool = tool.copy()
del actual_tool['id']
self.assertEqual(expected_tool, actual_tool)
def test_indexer_configuration_metadata_get_missing_context(self):
tool = {
'tool_name': 'swh-metadata-translator',
'tool_version': '0.0.1',
'tool_configuration': {"context": "unknown-context"},
}
actual_tool = self.storage.indexer_configuration_get(tool)
self.assertIsNone(actual_tool)
def test_indexer_configuration_metadata_get(self):
tool = {
'tool_name': 'swh-metadata-translator',
'tool_version': '0.0.1',
'tool_configuration': {"type": "local", "context": "NpmMapping"},
}
self.storage.indexer_configuration_add([tool])
actual_tool = self.storage.indexer_configuration_get(tool)
expected_tool = tool.copy()
expected_tool['id'] = actual_tool['id']
self.assertEqual(expected_tool, actual_tool)
@pytest.mark.property_based
def test_generate_content_mimetype_get_range_limit_none(self):
"""mimetype_get_range call with wrong limit input should fail"""
with self.assertRaises(ValueError) as e:
self.storage.content_mimetype_get_range(
start=None, end=None, indexer_configuration_id=None,
limit=None)
self.assertEqual(e.exception.args, (
'Development error: limit should not be None',))
@pytest.mark.property_based
@given(gen_content_mimetypes(min_size=1, max_size=4))
def test_generate_content_mimetype_get_range_no_limit(self, mimetypes):
"""mimetype_get_range returns mimetypes within range provided"""
self.reset_storage_tables()
# add mimetypes to storage
self.storage.content_mimetype_add(mimetypes)
# All ids from the db
content_ids = sorted([c['id'] for c in mimetypes])
start = content_ids[0]
end = content_ids[-1]
# retrieve mimetypes
tool_id = mimetypes[0]['indexer_configuration_id']
actual_result = self.storage.content_mimetype_get_range(
start, end, indexer_configuration_id=tool_id)
actual_ids = actual_result['ids']
actual_next = actual_result['next']
self.assertEqual(len(mimetypes), len(actual_ids))
self.assertIsNone(actual_next)
self.assertEqual(content_ids, actual_ids)
@pytest.mark.property_based
@given(gen_content_mimetypes(min_size=4, max_size=4))
def test_generate_content_mimetype_get_range_limit(self, mimetypes):
"""mimetype_get_range paginates results if limit exceeded"""
self.reset_storage_tables()
# add mimetypes to storage
self.storage.content_mimetype_add(mimetypes)
# input the list of sha1s we want from storage
content_ids = sorted([c['id'] for c in mimetypes])
start = content_ids[0]
end = content_ids[-1]
# retrieve mimetypes limited to 3 results
limited_results = len(mimetypes) - 1
tool_id = mimetypes[0]['indexer_configuration_id']
actual_result = self.storage.content_mimetype_get_range(
start, end,
indexer_configuration_id=tool_id, limit=limited_results)
actual_ids = actual_result['ids']
actual_next = actual_result['next']
self.assertEqual(limited_results, len(actual_ids))
self.assertIsNotNone(actual_next)
self.assertEqual(actual_next, content_ids[-1])
expected_mimetypes = content_ids[:-1]
self.assertEqual(expected_mimetypes, actual_ids)
# retrieve next part
actual_results2 = self.storage.content_mimetype_get_range(
start=end, end=end, indexer_configuration_id=tool_id)
actual_ids2 = actual_results2['ids']
actual_next2 = actual_results2['next']
self.assertIsNone(actual_next2)
expected_mimetypes2 = [content_ids[-1]]
self.assertEqual(expected_mimetypes2, actual_ids2)
@pytest.mark.property_based
def test_generate_content_fossology_license_get_range_limit_none(self):
"""license_get_range call with wrong limit input should fail"""
with self.assertRaises(ValueError) as e:
self.storage.content_fossology_license_get_range(
start=None, end=None, indexer_configuration_id=None,
limit=None)
self.assertEqual(e.exception.args, (
'Development error: limit should not be None',))
@pytest.mark.property_based
def prepare_mimetypes_from(self, fossology_licenses):
"""Fossology license needs some consistent data in db to run.
"""
mimetypes = []
for c in fossology_licenses:
mimetypes.append({
'id': c['id'],
'mimetype': 'text/plain',
'encoding': 'utf-8',
'indexer_configuration_id': c['indexer_configuration_id'],
})
return mimetypes
@pytest.mark.property_based
@given(gen_content_fossology_licenses(min_size=1, max_size=4))
def test_generate_content_fossology_license_get_range_no_limit(
self, fossology_licenses):
"""license_get_range returns licenses within range provided"""
self.reset_storage_tables()
# craft some consistent mimetypes
mimetypes = self.prepare_mimetypes_from(fossology_licenses)
self.storage.content_mimetype_add(mimetypes)
# add fossology_licenses to storage
self.storage.content_fossology_license_add(fossology_licenses)
# All ids from the db
content_ids = sorted([c['id'] for c in fossology_licenses])
start = content_ids[0]
end = content_ids[-1]
# retrieve fossology_licenses
tool_id = fossology_licenses[0]['indexer_configuration_id']
actual_result = self.storage.content_fossology_license_get_range(
start, end, indexer_configuration_id=tool_id)
actual_ids = actual_result['ids']
actual_next = actual_result['next']
self.assertEqual(len(fossology_licenses), len(actual_ids))
self.assertIsNone(actual_next)
self.assertEqual(content_ids, actual_ids)
@pytest.mark.property_based
@given(gen_content_fossology_licenses(min_size=1, max_size=4),
gen_content_mimetypes(min_size=1, max_size=1))
def test_generate_content_fossology_license_get_range_no_limit_with_filter(
self, fossology_licenses, mimetypes):
"""This filters non textual, then returns results within range"""
self.reset_storage_tables()
# craft some consistent mimetypes
_mimetypes = self.prepare_mimetypes_from(fossology_licenses)
# add binary mimetypes which will get filtered out in results
for m in mimetypes:
_mimetypes.append({
'mimetype': 'binary',
**m,
})
self.storage.content_mimetype_add(_mimetypes)
# add fossology_licenses to storage
self.storage.content_fossology_license_add(fossology_licenses)
# All ids from the db
content_ids = sorted([c['id'] for c in fossology_licenses])
start = content_ids[0]
end = content_ids[-1]
# retrieve fossology_licenses
tool_id = fossology_licenses[0]['indexer_configuration_id']
actual_result = self.storage.content_fossology_license_get_range(
start, end, indexer_configuration_id=tool_id)
actual_ids = actual_result['ids']
actual_next = actual_result['next']
self.assertEqual(len(fossology_licenses), len(actual_ids))
self.assertIsNone(actual_next)
self.assertEqual(content_ids, actual_ids)
@pytest.mark.property_based
@given(gen_content_fossology_licenses(min_size=4, max_size=4))
def test_generate_fossology_license_get_range_limit(
self, fossology_licenses):
"""fossology_license_get_range paginates results if limit exceeded"""
self.reset_storage_tables()
# craft some consistent mimetypes
mimetypes = self.prepare_mimetypes_from(fossology_licenses)
# add fossology_licenses to storage
self.storage.content_mimetype_add(mimetypes)
self.storage.content_fossology_license_add(fossology_licenses)
# input the list of sha1s we want from storage
content_ids = sorted([c['id'] for c in fossology_licenses])
start = content_ids[0]
end = content_ids[-1]
# retrieve fossology_licenses limited to 3 results
limited_results = len(fossology_licenses) - 1
tool_id = fossology_licenses[0]['indexer_configuration_id']
actual_result = self.storage.content_fossology_license_get_range(
start, end,
indexer_configuration_id=tool_id, limit=limited_results)
actual_ids = actual_result['ids']
actual_next = actual_result['next']
self.assertEqual(limited_results, len(actual_ids))
self.assertIsNotNone(actual_next)
self.assertEqual(actual_next, content_ids[-1])
expected_fossology_licenses = content_ids[:-1]
self.assertEqual(expected_fossology_licenses, actual_ids)
# retrieve next part
actual_results2 = self.storage.content_fossology_license_get_range(
start=end, end=end, indexer_configuration_id=tool_id)
actual_ids2 = actual_results2['ids']
actual_next2 = actual_results2['next']
self.assertIsNone(actual_next2)
expected_fossology_licenses2 = [content_ids[-1]]
self.assertEqual(expected_fossology_licenses2, actual_ids2)
@pytest.mark.db
class IndexerTestStorage(CommonTestStorage, BasePgTestStorage,
unittest.TestCase):
"""Running the tests locally.
For the client api tests (remote storage), see
`class`:swh.indexer.storage.test_api_client:TestRemoteStorage
class.
"""
pass
def test_mapping_names():
assert set(MAPPING_NAMES) == {m.name for m in MAPPINGS.values()}

File Metadata

Mime Type
text/x-diff
Expires
Fri, Jul 4, 3:49 PM (1 w, 6 d ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3213657

Event Timeline