diff --git a/PKG-INFO b/PKG-INFO index 3440d24..87edb52 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,69 +1,69 @@ Metadata-Version: 2.1 Name: swh.indexer -Version: 0.0.167 +Version: 0.0.168 Summary: Software Heritage Content Indexer Home-page: https://forge.softwareheritage.org/diffusion/78/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-indexer Description: swh-indexer ============ Tools to compute multiple indexes on SWH's raw contents: - content: - mimetype - ctags - language - fossology-license - metadata - revision: - metadata An indexer is in charge of: - looking up objects - extracting information from those objects - store those information in the swh-indexer db There are multiple indexers working on different object types: - content indexer: works with content sha1 hashes - revision indexer: works with revision sha1 hashes - origin indexer: works with origin identifiers Indexation procedure: - receive batch of ids - retrieve the associated data depending on object type - compute for that object some index - store the result to swh's storage Current content indexers: - mimetype (queue swh_indexer_content_mimetype): detect the encoding and mimetype - language (queue swh_indexer_content_language): detect the programming language - ctags (queue swh_indexer_content_ctags): compute tags information - fossology-license (queue swh_indexer_fossology_license): compute the license - metadata: translate file into translated_metadata dict Current revision indexers: - metadata: detects files containing metadata and retrieves translated_metadata in content_metadata table in storage or run content indexer to translate files. Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/sql/upgrades/132.sql b/sql/upgrades/132.sql new file mode 100644 index 0000000..3d6b759 --- /dev/null +++ b/sql/upgrades/132.sql @@ -0,0 +1,45 @@ +-- SWH Indexer DB schema upgrade +-- from_version: 131 +-- to_version: 132 +-- description: _add function returns the inserted rows + +insert into dbversion(version, release, description) + values(132, now(), 'Work In Progress'); + +create or replace function swh_content_fossology_license_add(conflict_update boolean) + returns bigint + language plpgsql +as $$ +declare + res bigint; +begin + -- insert unknown licenses first + insert into fossology_license (name) + select distinct license from tmp_content_fossology_license tmp + where not exists (select 1 from fossology_license where name=tmp.license) + on conflict(name) do nothing; + + if conflict_update then + insert into content_fossology_license (id, license_id, indexer_configuration_id) + select tcl.id, + (select id from fossology_license where name = tcl.license) as license, + indexer_configuration_id + from tmp_content_fossology_license tcl + on conflict(id, license_id, indexer_configuration_id) + do update set license_id = excluded.license_id; + else + insert into content_fossology_license (id, license_id, indexer_configuration_id) + select tcl.id, + (select id from fossology_license where name = tcl.license) as license, + indexer_configuration_id + from tmp_content_fossology_license tcl + on conflict(id, license_id, indexer_configuration_id) + do nothing; + end if; + + get diagnostics res = ROW_COUNT; + return res; +end +$$; + +comment on function swh_content_fossology_license_add(boolean) IS 'Add new content licenses'; diff --git a/swh.indexer.egg-info/PKG-INFO b/swh.indexer.egg-info/PKG-INFO index 3440d24..87edb52 100644 --- a/swh.indexer.egg-info/PKG-INFO +++ b/swh.indexer.egg-info/PKG-INFO @@ -1,69 +1,69 @@ Metadata-Version: 2.1 Name: swh.indexer -Version: 0.0.167 +Version: 0.0.168 Summary: Software Heritage Content Indexer Home-page: https://forge.softwareheritage.org/diffusion/78/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-indexer Description: swh-indexer ============ Tools to compute multiple indexes on SWH's raw contents: - content: - mimetype - ctags - language - fossology-license - metadata - revision: - metadata An indexer is in charge of: - looking up objects - extracting information from those objects - store those information in the swh-indexer db There are multiple indexers working on different object types: - content indexer: works with content sha1 hashes - revision indexer: works with revision sha1 hashes - origin indexer: works with origin identifiers Indexation procedure: - receive batch of ids - retrieve the associated data depending on object type - compute for that object some index - store the result to swh's storage Current content indexers: - mimetype (queue swh_indexer_content_mimetype): detect the encoding and mimetype - language (queue swh_indexer_content_language): detect the programming language - ctags (queue swh_indexer_content_ctags): compute tags information - fossology-license (queue swh_indexer_fossology_license): compute the license - metadata: translate file into translated_metadata dict Current revision indexers: - metadata: detects files containing metadata and retrieves translated_metadata in content_metadata table in storage or run content indexer to translate files. Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/swh.indexer.egg-info/SOURCES.txt b/swh.indexer.egg-info/SOURCES.txt index 0f73f27..92fa9ab 100644 --- a/swh.indexer.egg-info/SOURCES.txt +++ b/swh.indexer.egg-info/SOURCES.txt @@ -1,105 +1,106 @@ MANIFEST.in Makefile README.md requirements-swh.txt requirements-test.txt requirements.txt setup.py version.txt sql/bin/db-upgrade sql/bin/dot_add_content sql/doc/json/.gitignore sql/doc/json/Makefile sql/doc/json/indexer_configuration.tool_configuration.schema.json sql/doc/json/revision_metadata.translated_metadata.json sql/json/.gitignore sql/json/Makefile sql/json/indexer_configuration.tool_configuration.schema.json sql/json/revision_metadata.translated_metadata.json sql/upgrades/115.sql sql/upgrades/116.sql sql/upgrades/117.sql sql/upgrades/118.sql sql/upgrades/119.sql sql/upgrades/120.sql sql/upgrades/121.sql sql/upgrades/122.sql sql/upgrades/123.sql sql/upgrades/124.sql sql/upgrades/125.sql sql/upgrades/126.sql sql/upgrades/127.sql sql/upgrades/128.sql sql/upgrades/129.sql sql/upgrades/130.sql sql/upgrades/131.sql +sql/upgrades/132.sql swh/__init__.py swh.indexer.egg-info/PKG-INFO swh.indexer.egg-info/SOURCES.txt swh.indexer.egg-info/dependency_links.txt swh.indexer.egg-info/entry_points.txt swh.indexer.egg-info/requires.txt swh.indexer.egg-info/top_level.txt swh/indexer/__init__.py swh/indexer/cli.py swh/indexer/codemeta.py swh/indexer/ctags.py swh/indexer/fossology_license.py swh/indexer/indexer.py swh/indexer/journal_client.py swh/indexer/metadata.py swh/indexer/metadata_detector.py swh/indexer/mimetype.py swh/indexer/origin_head.py swh/indexer/py.typed swh/indexer/rehash.py swh/indexer/tasks.py swh/indexer/data/codemeta/CITATION swh/indexer/data/codemeta/LICENSE swh/indexer/data/codemeta/codemeta.jsonld swh/indexer/data/codemeta/crosswalk.csv swh/indexer/metadata_dictionary/__init__.py swh/indexer/metadata_dictionary/base.py swh/indexer/metadata_dictionary/codemeta.py swh/indexer/metadata_dictionary/maven.py swh/indexer/metadata_dictionary/npm.py swh/indexer/metadata_dictionary/python.py swh/indexer/metadata_dictionary/ruby.py swh/indexer/sql/10-swh-init.sql swh/indexer/sql/20-swh-enums.sql swh/indexer/sql/30-swh-schema.sql swh/indexer/sql/40-swh-func.sql swh/indexer/sql/50-swh-data.sql swh/indexer/sql/60-swh-indexes.sql swh/indexer/storage/__init__.py swh/indexer/storage/converters.py swh/indexer/storage/db.py swh/indexer/storage/exc.py swh/indexer/storage/in_memory.py swh/indexer/storage/interface.py swh/indexer/storage/metrics.py swh/indexer/storage/api/__init__.py swh/indexer/storage/api/client.py swh/indexer/storage/api/server.py swh/indexer/tests/__init__.py swh/indexer/tests/conftest.py swh/indexer/tests/tasks.py swh/indexer/tests/test_cli.py swh/indexer/tests/test_codemeta.py swh/indexer/tests/test_ctags.py swh/indexer/tests/test_fossology_license.py swh/indexer/tests/test_journal_client.py swh/indexer/tests/test_metadata.py swh/indexer/tests/test_mimetype.py swh/indexer/tests/test_origin_head.py swh/indexer/tests/test_origin_metadata.py swh/indexer/tests/utils.py swh/indexer/tests/storage/__init__.py swh/indexer/tests/storage/conftest.py swh/indexer/tests/storage/generate_data_test.py swh/indexer/tests/storage/test_api_client.py swh/indexer/tests/storage/test_converters.py swh/indexer/tests/storage/test_in_memory.py swh/indexer/tests/storage/test_metrics.py swh/indexer/tests/storage/test_server.py swh/indexer/tests/storage/test_storage.py \ No newline at end of file diff --git a/swh/indexer/mimetype.py b/swh/indexer/mimetype.py index cce9eef..7d47c23 100644 --- a/swh/indexer/mimetype.py +++ b/swh/indexer/mimetype.py @@ -1,154 +1,157 @@ # Copyright (C) 2016-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Optional, Dict, Any, List import magic from .indexer import ContentIndexer, ContentRangeIndexer if not hasattr(magic.Magic, 'from_buffer'): raise ImportError( 'Expected "import magic" to import python-magic, but file_magic ' 'was imported instead.') def compute_mimetype_encoding(raw_content: bytes) -> Dict[str, bytes]: """Determine mimetype and encoding from the raw content. Args: raw_content: content's raw data Returns: dict: mimetype and encoding key and corresponding values. """ m = magic.Magic(mime=True, mime_encoding=True) res = m.from_buffer(raw_content) - (mimetype, encoding) = res.split('; charset=') + try: + mimetype, encoding = res.split('; charset=') + except ValueError: + mimetype, encoding = res, '' return { 'mimetype': mimetype, 'encoding': encoding, } class MixinMimetypeIndexer: """Mixin mimetype indexer. See :class:`MimetypeIndexer` and :class:`MimetypeRangeIndexer` """ tool: Dict[str, Any] idx_storage: Any ADDITIONAL_CONFIG = { 'tools': ('dict', { 'name': 'file', 'version': '1:5.30-1+deb9u1', 'configuration': { "type": "library", "debian-package": "python3-magic" }, }), 'write_batch_size': ('int', 1000), } CONFIG_BASE_FILENAME = 'indexer/mimetype' # type: Optional[str] def index(self, id: bytes, data: Optional[bytes] = None, **kwargs) -> Dict[str, Any]: """Index sha1s' content and store result. Args: id: content's identifier data: raw content in bytes Returns: dict: content's mimetype; dict keys being - id: content's identifier (sha1) - mimetype: mimetype in bytes - encoding: encoding in bytes """ assert data is not None properties = compute_mimetype_encoding(data) properties.update({ 'id': id, 'indexer_configuration_id': self.tool['id'], }) return properties def persist_index_computations( self, results: List[Dict], policy_update: str ) -> Dict[str, int]: """Persist the results in storage. Args: results: list of content's mimetype dicts (see :meth:`.index`) policy_update: either 'update-dups' or 'ignore-dups' to respectively update duplicates or ignore them """ return self.idx_storage.content_mimetype_add( results, conflict_update=(policy_update == 'update-dups')) class MimetypeIndexer(MixinMimetypeIndexer, ContentIndexer): """Mimetype Indexer working on list of content identifiers. It: - (optionally) filters out content already indexed (cf. :meth:`.filter`) - reads content from objstorage per the content's id (sha1) - computes {mimetype, encoding} from that content - stores result in storage """ def filter(self, ids): """Filter out known sha1s and return only missing ones. """ yield from self.idx_storage.content_mimetype_missing(( { 'id': sha1, 'indexer_configuration_id': self.tool['id'], } for sha1 in ids )) class MimetypeRangeIndexer(MixinMimetypeIndexer, ContentRangeIndexer): """Mimetype Range Indexer working on range of content identifiers. It: - (optionally) filters out content already indexed (cf :meth:`.indexed_contents_in_range`) - reads content from objstorage per the content's id (sha1) - computes {mimetype, encoding} from that content - stores result in storage """ def indexed_contents_in_range( self, start: bytes, end: bytes ) -> Dict[str, Optional[bytes]]: """Retrieve indexed content id within range [start, end]. Args: start: Starting bound from range identifier end: End range identifier Returns: dict: a dict with keys: - ids: iterable of content ids within the range. - next: The next range of sha1 starts at this sha1 if any """ return self.idx_storage.content_mimetype_get_range( start, end, self.tool['id']) diff --git a/swh/indexer/sql/30-swh-schema.sql b/swh/indexer/sql/30-swh-schema.sql index 44532be..acdf2e0 100644 --- a/swh/indexer/sql/30-swh-schema.sql +++ b/swh/indexer/sql/30-swh-schema.sql @@ -1,145 +1,145 @@ --- --- Software Heritage Indexers Data Model --- -- drop schema if exists swh cascade; -- create schema swh; -- set search_path to swh; create table dbversion ( version int primary key, release timestamptz, description text ); insert into dbversion(version, release, description) - values(131, now(), 'Work In Progress'); + values(132, now(), 'Work In Progress'); -- Computing metadata on sha1's contents -- a SHA1 checksum (not necessarily originating from Git) create domain sha1 as bytea check (length(value) = 20); -- a Git object ID, i.e., a SHA1 checksum create domain sha1_git as bytea check (length(value) = 20); create table indexer_configuration ( id serial not null, tool_name text not null, tool_version text not null, tool_configuration jsonb ); comment on table indexer_configuration is 'Indexer''s configuration version'; comment on column indexer_configuration.id is 'Tool identifier'; comment on column indexer_configuration.tool_version is 'Tool name'; comment on column indexer_configuration.tool_version is 'Tool version'; comment on column indexer_configuration.tool_configuration is 'Tool configuration: command line, flags, etc...'; -- Properties (mimetype, encoding, etc...) create table content_mimetype ( id sha1 not null, mimetype text not null, encoding text not null, indexer_configuration_id bigint not null ); comment on table content_mimetype is 'Metadata associated to a raw content'; comment on column content_mimetype.mimetype is 'Raw content Mimetype'; comment on column content_mimetype.encoding is 'Raw content encoding'; comment on column content_mimetype.indexer_configuration_id is 'Tool used to compute the information'; -- Language metadata create table content_language ( id sha1 not null, lang languages not null, indexer_configuration_id bigint not null ); comment on table content_language is 'Language information on a raw content'; comment on column content_language.lang is 'Language information'; comment on column content_language.indexer_configuration_id is 'Tool used to compute the information'; -- ctags information per content create table content_ctags ( id sha1 not null, name text not null, kind text not null, line bigint not null, lang ctags_languages not null, indexer_configuration_id bigint not null ); comment on table content_ctags is 'Ctags information on a raw content'; comment on column content_ctags.id is 'Content identifier'; comment on column content_ctags.name is 'Symbol name'; comment on column content_ctags.kind is 'Symbol kind (function, class, variable, const...)'; comment on column content_ctags.line is 'Symbol line'; comment on column content_ctags.lang is 'Language information for that content'; comment on column content_ctags.indexer_configuration_id is 'Tool used to compute the information'; create table fossology_license( id smallserial, name text not null ); comment on table fossology_license is 'Possible license recognized by license indexer'; comment on column fossology_license.id is 'License identifier'; comment on column fossology_license.name is 'License name'; create table content_fossology_license ( id sha1 not null, license_id smallserial not null, indexer_configuration_id bigint not null ); comment on table content_fossology_license is 'license associated to a raw content'; comment on column content_fossology_license.id is 'Raw content identifier'; comment on column content_fossology_license.license_id is 'One of the content''s license identifier'; comment on column content_fossology_license.indexer_configuration_id is 'Tool used to compute the information'; -- The table content_metadata provides a translation to files -- identified as potentially containning metadata with a translation tool (indexer_configuration_id) create table content_metadata( id sha1 not null, metadata jsonb not null, indexer_configuration_id bigint not null ); comment on table content_metadata is 'metadata semantically translated from a content file'; comment on column content_metadata.id is 'sha1 of content file'; comment on column content_metadata.metadata is 'result of translation with defined format'; comment on column content_metadata.indexer_configuration_id is 'tool used for translation'; -- The table revision_intrinsic_metadata provides a minimal set of intrinsic -- metadata detected with the detection tool (indexer_configuration_id) and -- aggregated from the content_metadata translation. create table revision_intrinsic_metadata( id sha1_git not null, metadata jsonb not null, indexer_configuration_id bigint not null, mappings text array not null ); comment on table revision_intrinsic_metadata is 'metadata semantically detected and translated in a revision'; comment on column revision_intrinsic_metadata.id is 'sha1_git of revision'; comment on column revision_intrinsic_metadata.metadata is 'result of detection and translation with defined format'; comment on column revision_intrinsic_metadata.indexer_configuration_id is 'tool used for detection'; comment on column revision_intrinsic_metadata.mappings is 'type of metadata files used to obtain this metadata (eg. pkg-info, npm)'; create table origin_intrinsic_metadata( id text not null, -- origin url metadata jsonb, indexer_configuration_id bigint not null, from_revision sha1_git not null, metadata_tsvector tsvector, mappings text array not null ); comment on table origin_intrinsic_metadata is 'keeps intrinsic metadata for an origin'; comment on column origin_intrinsic_metadata.id is 'url of the origin'; comment on column origin_intrinsic_metadata.metadata is 'metadata extracted from a revision'; comment on column origin_intrinsic_metadata.indexer_configuration_id is 'tool used to generate this metadata'; comment on column origin_intrinsic_metadata.from_revision is 'sha1 of the revision this metadata was copied from.'; comment on column origin_intrinsic_metadata.mappings is 'type of metadata files used to obtain this metadata (eg. pkg-info, npm)'; diff --git a/swh/indexer/sql/40-swh-func.sql b/swh/indexer/sql/40-swh-func.sql index 81fca5b..e6601d0 100644 --- a/swh/indexer/sql/40-swh-func.sql +++ b/swh/indexer/sql/40-swh-func.sql @@ -1,472 +1,472 @@ -- Postgresql index helper function create or replace function hash_sha1(text) returns text language sql strict immutable as $$ select encode(public.digest($1, 'sha1'), 'hex') $$; comment on function hash_sha1(text) is 'Compute sha1 hash as text'; -- create a temporary table called tmp_TBLNAME, mimicking existing table -- TBLNAME -- -- Args: -- tblname: name of the table to mimic create or replace function swh_mktemp(tblname regclass) returns void language plpgsql as $$ begin execute format(' create temporary table if not exists tmp_%1$I (like %1$I including defaults) on commit delete rows; alter table tmp_%1$I drop column if exists object_id; ', tblname); return; end $$; -- create a temporary table for content_mimetype tmp_content_mimetype, create or replace function swh_mktemp_content_mimetype() returns void language sql as $$ create temporary table if not exists tmp_content_mimetype ( like content_mimetype including defaults ) on commit delete rows; $$; comment on function swh_mktemp_content_mimetype() IS 'Helper table to add mimetype information'; -- add tmp_content_mimetype entries to content_mimetype, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- If filtering duplicates is in order, the call to -- swh_content_mimetype_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_mimetype), 1. COPY to tmp_content_mimetype, -- 2. call this function create or replace function swh_content_mimetype_add(conflict_update boolean) returns bigint language plpgsql as $$ declare res bigint; begin if conflict_update then insert into content_mimetype (id, mimetype, encoding, indexer_configuration_id) select id, mimetype, encoding, indexer_configuration_id from tmp_content_mimetype tcm on conflict(id, indexer_configuration_id) do update set mimetype = excluded.mimetype, encoding = excluded.encoding; else insert into content_mimetype (id, mimetype, encoding, indexer_configuration_id) select id, mimetype, encoding, indexer_configuration_id from tmp_content_mimetype tcm on conflict(id, indexer_configuration_id) do nothing; end if; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_content_mimetype_add(boolean) IS 'Add new content mimetypes'; -- add tmp_content_language entries to content_language, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- If filtering duplicates is in order, the call to -- swh_content_language_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_content_language, 2. call this function create or replace function swh_content_language_add(conflict_update boolean) returns bigint language plpgsql as $$ declare res bigint; begin if conflict_update then insert into content_language (id, lang, indexer_configuration_id) select id, lang, indexer_configuration_id from tmp_content_language tcl on conflict(id, indexer_configuration_id) do update set lang = excluded.lang; else insert into content_language (id, lang, indexer_configuration_id) select id, lang, indexer_configuration_id from tmp_content_language tcl on conflict(id, indexer_configuration_id) do nothing; end if; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_content_language_add(boolean) IS 'Add new content languages'; -- create a temporary table for retrieving content_language create or replace function swh_mktemp_content_language() returns void language sql as $$ create temporary table if not exists tmp_content_language ( like content_language including defaults ) on commit delete rows; $$; comment on function swh_mktemp_content_language() is 'Helper table to add content language'; -- create a temporary table for content_ctags tmp_content_ctags, create or replace function swh_mktemp_content_ctags() returns void language sql as $$ create temporary table if not exists tmp_content_ctags ( like content_ctags including defaults ) on commit delete rows; $$; comment on function swh_mktemp_content_ctags() is 'Helper table to add content ctags'; -- add tmp_content_ctags entries to content_ctags, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- operates in bulk: 0. swh_mktemp(content_ctags), 1. COPY to tmp_content_ctags, -- 2. call this function create or replace function swh_content_ctags_add(conflict_update boolean) returns bigint language plpgsql as $$ declare res bigint; begin if conflict_update then delete from content_ctags where id in (select tmp.id from tmp_content_ctags tmp inner join indexer_configuration i on i.id=tmp.indexer_configuration_id); end if; insert into content_ctags (id, name, kind, line, lang, indexer_configuration_id) select id, name, kind, line, lang, indexer_configuration_id from tmp_content_ctags tct on conflict(id, hash_sha1(name), kind, line, lang, indexer_configuration_id) do nothing; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_content_ctags_add(boolean) IS 'Add new ctags symbols per content'; create type content_ctags_signature as ( id sha1, name text, kind text, line bigint, lang ctags_languages, tool_id integer, tool_name text, tool_version text, tool_configuration jsonb ); -- Search within ctags content. -- create or replace function swh_content_ctags_search( expression text, l integer default 10, last_sha1 sha1 default '\x0000000000000000000000000000000000000000') returns setof content_ctags_signature language sql as $$ select c.id, name, kind, line, lang, i.id as tool_id, tool_name, tool_version, tool_configuration from content_ctags c inner join indexer_configuration i on i.id = c.indexer_configuration_id where hash_sha1(name) = hash_sha1(expression) and c.id > last_sha1 order by id limit l; $$; comment on function swh_content_ctags_search(text, integer, sha1) IS 'Equality search through ctags'' symbols'; -- create a temporary table for content_fossology_license tmp_content_fossology_license, create or replace function swh_mktemp_content_fossology_license() returns void language sql as $$ create temporary table if not exists tmp_content_fossology_license ( id sha1, license text, indexer_configuration_id integer ) on commit delete rows; $$; comment on function swh_mktemp_content_fossology_license() is 'Helper table to add content license'; -- add tmp_content_fossology_license entries to content_fossology_license, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- operates in bulk: 0. swh_mktemp(content_fossology_license), 1. COPY to -- tmp_content_fossology_license, 2. call this function create or replace function swh_content_fossology_license_add(conflict_update boolean) returns bigint language plpgsql as $$ declare res bigint; begin -- insert unknown licenses first insert into fossology_license (name) select distinct license from tmp_content_fossology_license tmp where not exists (select 1 from fossology_license where name=tmp.license) on conflict(name) do nothing; if conflict_update then insert into content_fossology_license (id, license_id, indexer_configuration_id) select tcl.id, (select id from fossology_license where name = tcl.license) as license, indexer_configuration_id from tmp_content_fossology_license tcl on conflict(id, license_id, indexer_configuration_id) do update set license_id = excluded.license_id; + else + insert into content_fossology_license (id, license_id, indexer_configuration_id) + select tcl.id, + (select id from fossology_license where name = tcl.license) as license, + indexer_configuration_id + from tmp_content_fossology_license tcl + on conflict(id, license_id, indexer_configuration_id) + do nothing; end if; - insert into content_fossology_license (id, license_id, indexer_configuration_id) - select tcl.id, - (select id from fossology_license where name = tcl.license) as license, - indexer_configuration_id - from tmp_content_fossology_license tcl - on conflict(id, license_id, indexer_configuration_id) - do nothing; - get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_content_fossology_license_add(boolean) IS 'Add new content licenses'; -- content_metadata functions -- add tmp_content_metadata entries to content_metadata, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- If filtering duplicates is in order, the call to -- swh_content_metadata_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_content_metadata, 2. call this function create or replace function swh_content_metadata_add(conflict_update boolean) returns bigint language plpgsql as $$ declare res bigint; begin if conflict_update then insert into content_metadata (id, metadata, indexer_configuration_id) select id, metadata, indexer_configuration_id from tmp_content_metadata tcm on conflict(id, indexer_configuration_id) do update set metadata = excluded.metadata; else insert into content_metadata (id, metadata, indexer_configuration_id) select id, metadata, indexer_configuration_id from tmp_content_metadata tcm on conflict(id, indexer_configuration_id) do nothing; end if; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_content_metadata_add(boolean) IS 'Add new content metadata'; -- create a temporary table for retrieving content_metadata create or replace function swh_mktemp_content_metadata() returns void language sql as $$ create temporary table if not exists tmp_content_metadata ( like content_metadata including defaults ) on commit delete rows; $$; comment on function swh_mktemp_content_metadata() is 'Helper table to add content metadata'; -- end content_metadata functions -- add tmp_revision_intrinsic_metadata entries to revision_intrinsic_metadata, -- overwriting duplicates if conflict_update is true, skipping duplicates -- otherwise. -- -- If filtering duplicates is in order, the call to -- swh_revision_intrinsic_metadata_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_revision_intrinsic_metadata, 2. call this function create or replace function swh_revision_intrinsic_metadata_add(conflict_update boolean) returns bigint language plpgsql as $$ declare res bigint; begin if conflict_update then insert into revision_intrinsic_metadata (id, metadata, mappings, indexer_configuration_id) select id, metadata, mappings, indexer_configuration_id from tmp_revision_intrinsic_metadata tcm on conflict(id, indexer_configuration_id) do update set metadata = excluded.metadata, mappings = excluded.mappings; else insert into revision_intrinsic_metadata (id, metadata, mappings, indexer_configuration_id) select id, metadata, mappings, indexer_configuration_id from tmp_revision_intrinsic_metadata tcm on conflict(id, indexer_configuration_id) do nothing; end if; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_revision_intrinsic_metadata_add(boolean) IS 'Add new revision intrinsic metadata'; -- create a temporary table for retrieving revision_intrinsic_metadata create or replace function swh_mktemp_revision_intrinsic_metadata() returns void language sql as $$ create temporary table if not exists tmp_revision_intrinsic_metadata ( like revision_intrinsic_metadata including defaults ) on commit delete rows; $$; comment on function swh_mktemp_revision_intrinsic_metadata() is 'Helper table to add revision intrinsic metadata'; -- create a temporary table for retrieving origin_intrinsic_metadata create or replace function swh_mktemp_origin_intrinsic_metadata() returns void language sql as $$ create temporary table if not exists tmp_origin_intrinsic_metadata ( like origin_intrinsic_metadata including defaults ) on commit delete rows; $$; comment on function swh_mktemp_origin_intrinsic_metadata() is 'Helper table to add origin intrinsic metadata'; create or replace function swh_mktemp_indexer_configuration() returns void language sql as $$ create temporary table if not exists tmp_indexer_configuration ( like indexer_configuration including defaults ) on commit delete rows; alter table tmp_indexer_configuration drop column if exists id; $$; -- add tmp_indexer_configuration entries to indexer_configuration, -- skipping duplicates if any. -- -- operates in bulk: 0. create temporary tmp_indexer_configuration, 1. COPY to -- it, 2. call this function to insert and filtering out duplicates create or replace function swh_indexer_configuration_add() returns setof indexer_configuration language plpgsql as $$ begin insert into indexer_configuration(tool_name, tool_version, tool_configuration) select tool_name, tool_version, tool_configuration from tmp_indexer_configuration tmp on conflict(tool_name, tool_version, tool_configuration) do nothing; return query select id, tool_name, tool_version, tool_configuration from tmp_indexer_configuration join indexer_configuration using(tool_name, tool_version, tool_configuration); return; end $$; -- add tmp_origin_intrinsic_metadata entries to origin_intrinsic_metadata, -- overwriting duplicates if conflict_update is true, skipping duplicates -- otherwise. -- -- If filtering duplicates is in order, the call to -- swh_origin_intrinsic_metadata_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_origin_intrinsic_metadata, 2. call this function create or replace function swh_origin_intrinsic_metadata_add( conflict_update boolean) returns bigint language plpgsql as $$ declare res bigint; begin perform swh_origin_intrinsic_metadata_compute_tsvector(); if conflict_update then insert into origin_intrinsic_metadata (id, metadata, indexer_configuration_id, from_revision, metadata_tsvector, mappings) select id, metadata, indexer_configuration_id, from_revision, metadata_tsvector, mappings from tmp_origin_intrinsic_metadata on conflict(id, indexer_configuration_id) do update set metadata = excluded.metadata, metadata_tsvector = excluded.metadata_tsvector, mappings = excluded.mappings, from_revision = excluded.from_revision; else insert into origin_intrinsic_metadata (id, metadata, indexer_configuration_id, from_revision, metadata_tsvector, mappings) select id, metadata, indexer_configuration_id, from_revision, metadata_tsvector, mappings from tmp_origin_intrinsic_metadata on conflict(id, indexer_configuration_id) do nothing; end if; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_origin_intrinsic_metadata_add(boolean) IS 'Add new origin intrinsic metadata'; -- Compute the metadata_tsvector column in tmp_origin_intrinsic_metadata. -- -- It uses the "pg_catalog.simple" dictionary, as it has no stopword, -- so it should be suitable for proper names and non-English text. create or replace function swh_origin_intrinsic_metadata_compute_tsvector() returns void language plpgsql as $$ begin update tmp_origin_intrinsic_metadata set metadata_tsvector = to_tsvector('pg_catalog.simple', metadata); end $$; diff --git a/swh/indexer/tests/storage/test_converters.py b/swh/indexer/tests/storage/test_converters.py index 0a5a0f7..0aa5d6a 100644 --- a/swh/indexer/tests/storage/test_converters.py +++ b/swh/indexer/tests/storage/test_converters.py @@ -1,188 +1,187 @@ -# Copyright (C) 2015-2019 The Software Heritage developers +# Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -import unittest - from swh.indexer.storage import converters -class TestConverters(unittest.TestCase): - def setUp(self): - self.maxDiff = None - - def test_ctags_to_db(self): - input_ctag = { - 'id': b'some-id', - 'indexer_configuration_id': 100, - 'ctags': [ - { - 'name': 'some-name', - 'kind': 'some-kind', - 'line': 10, - 'lang': 'Yaml', - }, { - 'name': 'main', - 'kind': 'function', - 'line': 12, - 'lang': 'Yaml', - }, - ] - } - - expected_ctags = [ +def test_ctags_to_db(): + input_ctag = { + 'id': b'some-id', + 'indexer_configuration_id': 100, + 'ctags': [ { - 'id': b'some-id', 'name': 'some-name', 'kind': 'some-kind', 'line': 10, 'lang': 'Yaml', - 'indexer_configuration_id': 100, }, { - 'id': b'some-id', 'name': 'main', 'kind': 'function', 'line': 12, 'lang': 'Yaml', - 'indexer_configuration_id': 100, - }] + }, + ] + } - # when - actual_ctags = list(converters.ctags_to_db(input_ctag)) - - # then - self.assertEqual(actual_ctags, expected_ctags) - - def test_db_to_ctags(self): - input_ctags = { + expected_ctags = [ + { 'id': b'some-id', 'name': 'some-name', 'kind': 'some-kind', 'line': 10, 'lang': 'Yaml', - 'tool_id': 200, - 'tool_name': 'some-toolname', - 'tool_version': 'some-toolversion', - 'tool_configuration': {} - } - expected_ctags = { + 'indexer_configuration_id': 100, + }, { 'id': b'some-id', - 'name': 'some-name', - 'kind': 'some-kind', - 'line': 10, + 'name': 'main', + 'kind': 'function', + 'line': 12, 'lang': 'Yaml', - 'tool': { - 'id': 200, - 'name': 'some-toolname', - 'version': 'some-toolversion', - 'configuration': {}, - } - } - - # when - actual_ctags = converters.db_to_ctags(input_ctags) - - # then - self.assertEqual(actual_ctags, expected_ctags) - - def test_db_to_mimetype(self): - input_mimetype = { - 'id': b'some-id', - 'tool_id': 10, - 'tool_name': 'some-toolname', - 'tool_version': 'some-toolversion', - 'tool_configuration': {}, - 'encoding': b'ascii', - 'mimetype': b'text/plain', - } - - expected_mimetype = { - 'id': b'some-id', - 'encoding': b'ascii', - 'mimetype': b'text/plain', - 'tool': { - 'id': 10, - 'name': 'some-toolname', - 'version': 'some-toolversion', - 'configuration': {}, - } - } - - actual_mimetype = converters.db_to_mimetype(input_mimetype) - - self.assertEqual(actual_mimetype, expected_mimetype) - - def test_db_to_language(self): - input_language = { - 'id': b'some-id', - 'tool_id': 20, - 'tool_name': 'some-toolname', - 'tool_version': 'some-toolversion', - 'tool_configuration': {}, - 'lang': b'css', - } - - expected_language = { - 'id': b'some-id', - 'lang': b'css', - 'tool': { - 'id': 20, - 'name': 'some-toolname', - 'version': 'some-toolversion', - 'configuration': {}, - } + 'indexer_configuration_id': 100, + }] + + # when + actual_ctags = list(converters.ctags_to_db(input_ctag)) + + # then + assert actual_ctags == expected_ctags + + +def test_db_to_ctags(): + input_ctags = { + 'id': b'some-id', + 'name': 'some-name', + 'kind': 'some-kind', + 'line': 10, + 'lang': 'Yaml', + 'tool_id': 200, + 'tool_name': 'some-toolname', + 'tool_version': 'some-toolversion', + 'tool_configuration': {} + } + expected_ctags = { + 'id': b'some-id', + 'name': 'some-name', + 'kind': 'some-kind', + 'line': 10, + 'lang': 'Yaml', + 'tool': { + 'id': 200, + 'name': 'some-toolname', + 'version': 'some-toolversion', + 'configuration': {}, } - - actual_language = converters.db_to_language(input_language) - - self.assertEqual(actual_language, expected_language) - - def test_db_to_fossology_license(self): - input_license = { - 'id': b'some-id', - 'tool_id': 20, - 'tool_name': 'nomossa', - 'tool_version': '5.22', - 'tool_configuration': {}, - 'licenses': ['GPL2.0'], + } + + # when + actual_ctags = converters.db_to_ctags(input_ctags) + + # then + assert actual_ctags == expected_ctags + + +def test_db_to_mimetype(): + input_mimetype = { + 'id': b'some-id', + 'tool_id': 10, + 'tool_name': 'some-toolname', + 'tool_version': 'some-toolversion', + 'tool_configuration': {}, + 'encoding': b'ascii', + 'mimetype': b'text/plain', + } + + expected_mimetype = { + 'id': b'some-id', + 'encoding': b'ascii', + 'mimetype': b'text/plain', + 'tool': { + 'id': 10, + 'name': 'some-toolname', + 'version': 'some-toolversion', + 'configuration': {}, } - - expected_license = { - 'licenses': ['GPL2.0'], - 'tool': { - 'id': 20, - 'name': 'nomossa', - 'version': '5.22', - 'configuration': {}, - } + } + + actual_mimetype = converters.db_to_mimetype(input_mimetype) + + assert actual_mimetype == expected_mimetype + + +def test_db_to_language(): + input_language = { + 'id': b'some-id', + 'tool_id': 20, + 'tool_name': 'some-toolname', + 'tool_version': 'some-toolversion', + 'tool_configuration': {}, + 'lang': b'css', + } + + expected_language = { + 'id': b'some-id', + 'lang': b'css', + 'tool': { + 'id': 20, + 'name': 'some-toolname', + 'version': 'some-toolversion', + 'configuration': {}, } - - actual_license = converters.db_to_fossology_license(input_license) - - self.assertEqual(actual_license, expected_license) - - def test_db_to_metadata(self): - input_metadata = { - 'id': b'some-id', - 'tool_id': 20, - 'tool_name': 'some-toolname', - 'tool_version': 'some-toolversion', - 'tool_configuration': {}, - 'metadata': b'metadata', + } + + actual_language = converters.db_to_language(input_language) + + assert actual_language == expected_language + + +def test_db_to_fossology_license(): + input_license = { + 'id': b'some-id', + 'tool_id': 20, + 'tool_name': 'nomossa', + 'tool_version': '5.22', + 'tool_configuration': {}, + 'licenses': ['GPL2.0'], + } + + expected_license = { + 'licenses': ['GPL2.0'], + 'tool': { + 'id': 20, + 'name': 'nomossa', + 'version': '5.22', + 'configuration': {}, } - - expected_metadata = { - 'id': b'some-id', - 'metadata': b'metadata', - 'tool': { - 'id': 20, - 'name': 'some-toolname', - 'version': 'some-toolversion', - 'configuration': {}, - } + } + + actual_license = converters.db_to_fossology_license(input_license) + + assert actual_license == expected_license + + +def test_db_to_metadata(): + input_metadata = { + 'id': b'some-id', + 'tool_id': 20, + 'tool_name': 'some-toolname', + 'tool_version': 'some-toolversion', + 'tool_configuration': {}, + 'metadata': b'metadata', + } + + expected_metadata = { + 'id': b'some-id', + 'metadata': b'metadata', + 'tool': { + 'id': 20, + 'name': 'some-toolname', + 'version': 'some-toolversion', + 'configuration': {}, } + } - actual_metadata = converters.db_to_metadata(input_metadata) + actual_metadata = converters.db_to_metadata(input_metadata) - self.assertEqual(actual_metadata, expected_metadata) + assert actual_metadata == expected_metadata diff --git a/swh/indexer/tests/storage/test_storage.py b/swh/indexer/tests/storage/test_storage.py index 8c3d994..ab11855 100644 --- a/swh/indexer/tests/storage/test_storage.py +++ b/swh/indexer/tests/storage/test_storage.py @@ -1,1933 +1,1929 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import inspect import threading from typing import Dict import pytest from swh.model.hashutil import hash_to_bytes from swh.indexer.storage.exc import ( IndexerStorageArgumentException, DuplicateId, ) from swh.indexer.storage.interface import IndexerStorageInterface def prepare_mimetypes_from(fossology_licenses): """Fossology license needs some consistent data in db to run. """ mimetypes = [] for c in fossology_licenses: mimetypes.append({ 'id': c['id'], 'mimetype': 'text/plain', 'encoding': 'utf-8', 'indexer_configuration_id': c['indexer_configuration_id'], }) return mimetypes def endpoint_name(etype: str, ename: str) -> str: """Compute the storage's endpoint's name >>> endpoint_name('content_mimetype', 'add') 'content_mimetype_add' >>> endpoint_name('content_fosso_license', 'delete') 'content_fosso_license_delete' """ return f'{etype}_{ename}' def endpoint(storage, etype: str, ename: str): return getattr(storage, endpoint_name(etype, ename)) def expected_summary( count: int, etype: str, ename: str = 'add') -> Dict[str, int]: """Compute the expected summary The key is determine according to etype and ename >>> expected_summary(10, 'content_mimetype', 'add') {'content_mimetype:add': 10} >>> expected_summary(9, 'origin_intrinsic_metadata', 'delete') {'origin_intrinsic_metadata:del': 9} """ pattern = ename[0:3] key = endpoint_name(etype, ename).replace(f'_{ename}', f':{pattern}') return { key: count } +def test_check_config(swh_indexer_storage): + assert swh_indexer_storage.check_config(check_write=True) + assert swh_indexer_storage.check_config(check_write=False) + + +def test_types(swh_indexer_storage): + """Checks all methods of StorageInterface are implemented by this + backend, and that they have the same signature.""" + # Create an instance of the protocol (which cannot be instantiated + # directly, so this creates a subclass, then instantiates it) + interface = type('_', (IndexerStorageInterface,), {})() + + assert 'content_mimetype_add' in dir(interface) + + missing_methods = [] + + for meth_name in dir(interface): + if meth_name.startswith('_'): + continue + interface_meth = getattr(interface, meth_name) + try: + concrete_meth = getattr(swh_indexer_storage, meth_name) + except AttributeError: + missing_methods.append(meth_name) + continue + + expected_signature = inspect.signature(interface_meth) + actual_signature = inspect.signature(concrete_meth) + + assert expected_signature == actual_signature, meth_name + + assert missing_methods == [] + + class StorageETypeTester: """Base class for testing a series of common behaviour between a bunch of endpoint types supported by an IndexerStorage. This is supposed to be inherited with the following class attributes: - endpoint_type - tool_name - example_data See below for example usage. """ def test_missing(self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool_id = data.tools[self.tool_name]['id'] # given 2 (hopefully) unknown objects query = [ { 'id': data.sha1_1, 'indexer_configuration_id': tool_id, }, { 'id': data.sha1_2, 'indexer_configuration_id': tool_id, }] # we expect these are both returned by the xxx_missing endpoint actual_missing = endpoint(storage, etype, 'missing')(query) assert list(actual_missing) == [ data.sha1_1, data.sha1_2, ] # now, when we add one of them summary = endpoint(storage, etype, 'add')([{ 'id': data.sha1_2, **self.example_data[0], 'indexer_configuration_id': tool_id, }]) assert summary == expected_summary(1, etype) # we expect only the other one returned actual_missing = endpoint(storage, etype, 'missing')(query) assert list(actual_missing) == [data.sha1_1] def test_add__drop_duplicate(self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool_id = data.tools[self.tool_name]['id'] # add the first object data_v1 = { 'id': data.sha1_2, **self.example_data[0], 'indexer_configuration_id': tool_id, } summary = endpoint(storage, etype, 'add')([data_v1]) assert summary == expected_summary(1, etype) # should be able to retrieve it actual_data = list(endpoint(storage, etype, 'get')([data.sha1_2])) expected_data_v1 = [{ 'id': data.sha1_2, **self.example_data[0], 'tool': data.tools[self.tool_name], }] assert actual_data == expected_data_v1 # now if we add a modified version of the same object (same id) data_v2 = data_v1.copy() data_v2.update(self.example_data[1]) summary2 = endpoint(storage, etype, 'add')([data_v2]) assert summary2 == expected_summary(0, etype) # not added # we expect to retrieve the original data, not the modified one actual_data = list(endpoint(storage, etype, 'get')([data.sha1_2])) assert actual_data == expected_data_v1 def test_add__update_in_place_duplicate( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] data_v1 = { 'id': data.sha1_2, **self.example_data[0], 'indexer_configuration_id': tool['id'], } # given summary = endpoint(storage, etype, 'add')([data_v1]) assert summary == expected_summary(1, etype) # not added # when actual_data = list(endpoint(storage, etype, 'get')([data.sha1_2])) expected_data_v1 = [{ 'id': data.sha1_2, **self.example_data[0], 'tool': tool, }] # then assert actual_data == expected_data_v1 # given data_v2 = data_v1.copy() data_v2.update(self.example_data[1]) endpoint(storage, etype, 'add')([data_v2], conflict_update=True) assert summary == expected_summary(1, etype) # modified so counted actual_data = list(endpoint(storage, etype, 'get')([data.sha1_2])) expected_data_v2 = [{ 'id': data.sha1_2, **self.example_data[1], 'tool': tool, }] # data did change as the v2 was used to overwrite v1 assert actual_data == expected_data_v2 def test_add__update_in_place_deadlock( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] hashes = [ hash_to_bytes( '34973274ccef6ab4dfaaf86599792fa9c3fe4{:03d}'.format(i)) for i in range(1000)] data_v1 = [ { 'id': hash_, **self.example_data[0], 'indexer_configuration_id': tool['id'], } for hash_ in hashes ] data_v2 = [ { 'id': hash_, **self.example_data[1], 'indexer_configuration_id': tool['id'], } for hash_ in hashes ] # Remove one item from each, so that both queries have to succeed for # all items to be in the DB. data_v2a = data_v2[1:] data_v2b = list(reversed(data_v2[0:-1])) # given endpoint(storage, etype, 'add')(data_v1) # when actual_data = list(endpoint(storage, etype, 'get')(hashes)) expected_data_v1 = [ { 'id': hash_, **self.example_data[0], 'tool': tool, } for hash_ in hashes ] # then assert actual_data == expected_data_v1 # given def f1(): endpoint(storage, etype, 'add')(data_v2a, conflict_update=True) def f2(): endpoint(storage, etype, 'add')(data_v2b, conflict_update=True) t1 = threading.Thread(target=f1) t2 = threading.Thread(target=f2) t2.start() t1.start() t1.join() t2.join() actual_data = sorted(endpoint(storage, etype, 'get')(hashes), key=lambda x: x['id']) expected_data_v2 = [ { 'id': hash_, **self.example_data[1], 'tool': tool, } for hash_ in hashes ] assert actual_data == expected_data_v2 def test_add__duplicate_twice(self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] data_rev1 = { 'id': data.revision_id_2, **self.example_data[0], 'indexer_configuration_id': tool['id'] } data_rev2 = { 'id': data.revision_id_2, **self.example_data[1], 'indexer_configuration_id': tool['id'] } # when summary = endpoint(storage, etype, 'add')([data_rev1]) assert summary == expected_summary(1, etype) with pytest.raises(DuplicateId): endpoint(storage, etype, 'add')( [data_rev2, data_rev2], conflict_update=True) # then actual_data = list(endpoint(storage, etype, 'get')( [data.revision_id_2, data.revision_id_1])) expected_data = [{ 'id': data.revision_id_2, **self.example_data[0], 'tool': tool, }] assert actual_data == expected_data def test_get(self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] query = [data.sha1_2, data.sha1_1] data1 = { 'id': data.sha1_2, **self.example_data[0], 'indexer_configuration_id': tool['id'], } # when summary = endpoint(storage, etype, 'add')([data1]) assert summary == expected_summary(1, etype) # then actual_data = list(endpoint(storage, etype, 'get')(query)) # then expected_data = [{ 'id': data.sha1_2, **self.example_data[0], 'tool': tool, }] assert actual_data == expected_data class TestIndexerStorageContentMimetypes(StorageETypeTester): """Test Indexer Storage content_mimetype related methods """ endpoint_type = 'content_mimetype' tool_name = 'file' example_data = [ { 'mimetype': 'text/plain', 'encoding': 'utf-8', }, { 'mimetype': 'text/html', 'encoding': 'us-ascii', }, ] def test_generate_content_mimetype_get_range_limit_none( self, swh_indexer_storage): """mimetype_get_range call with wrong limit input should fail""" storage = swh_indexer_storage with pytest.raises(IndexerStorageArgumentException) as e: storage.content_mimetype_get_range( start=None, end=None, indexer_configuration_id=None, limit=None) assert e.value.args == ('limit should not be None',) def test_generate_content_mimetype_get_range_no_limit( self, swh_indexer_storage_with_data): """mimetype_get_range returns mimetypes within range provided""" storage, data = swh_indexer_storage_with_data mimetypes = data.mimetypes # All ids from the db content_ids = sorted([c['id'] for c in mimetypes]) start = content_ids[0] end = content_ids[-1] # retrieve mimetypes tool_id = mimetypes[0]['indexer_configuration_id'] actual_result = storage.content_mimetype_get_range( start, end, indexer_configuration_id=tool_id) actual_ids = actual_result['ids'] actual_next = actual_result['next'] assert len(mimetypes) == len(actual_ids) assert actual_next is None assert content_ids == actual_ids def test_generate_content_mimetype_get_range_limit( self, swh_indexer_storage_with_data): """mimetype_get_range paginates results if limit exceeded""" storage, data = swh_indexer_storage_with_data indexer_configuration_id = data.tools['file']['id'] # input the list of sha1s we want from storage content_ids = sorted( [c['id'] for c in data.mimetypes]) mimetypes = list(storage.content_mimetype_get(content_ids)) assert len(mimetypes) == len(data.mimetypes) start = content_ids[0] end = content_ids[-1] # retrieve mimetypes limited to 10 results actual_result = storage.content_mimetype_get_range( start, end, indexer_configuration_id=indexer_configuration_id, limit=10) assert actual_result assert set(actual_result.keys()) == {'ids', 'next'} actual_ids = actual_result['ids'] actual_next = actual_result['next'] assert len(actual_ids) == 10 assert actual_next is not None assert actual_next == content_ids[10] expected_mimetypes = content_ids[:10] assert expected_mimetypes == actual_ids # retrieve next part actual_result = storage.content_mimetype_get_range( start=end, end=end, indexer_configuration_id=indexer_configuration_id) assert set(actual_result.keys()) == {'ids', 'next'} actual_ids = actual_result['ids'] actual_next = actual_result['next'] assert actual_next is None expected_mimetypes = [content_ids[-1]] assert expected_mimetypes == actual_ids class TestIndexerStorageContentLanguage(StorageETypeTester): """Test Indexer Storage content_language related methods """ endpoint_type = 'content_language' tool_name = 'pygments' example_data = [ { 'lang': 'haskell', }, { 'lang': 'common-lisp', }, ] class TestIndexerStorageContentCTags(StorageETypeTester): """Test Indexer Storage content_ctags related methods """ endpoint_type = 'content_ctags' tool_name = 'universal-ctags' example_data = [ { 'ctags': [{ 'name': 'done', 'kind': 'variable', 'line': 119, 'lang': 'OCaml', }] }, { 'ctags': [ { 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Python', }, { 'name': 'main', 'kind': 'function', 'line': 119, 'lang': 'Python', }] }, ] # the following tests are disabled because CTAGS behaves differently @pytest.mark.skip def test_add__drop_duplicate(self): pass @pytest.mark.skip def test_add__update_in_place_duplicate(self): pass @pytest.mark.skip def test_add__update_in_place_deadlock(self): pass @pytest.mark.skip def test_add__duplicate_twice(self): pass @pytest.mark.skip def test_get(self): pass def test_content_ctags_search(self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # 1. given tool = data.tools['universal-ctags'] tool_id = tool['id'] ctag1 = { 'id': data.sha1_1, 'indexer_configuration_id': tool_id, 'ctags': [ { 'name': 'hello', 'kind': 'function', 'line': 133, 'lang': 'Python', }, { 'name': 'counter', 'kind': 'variable', 'line': 119, 'lang': 'Python', }, { 'name': 'hello', 'kind': 'variable', 'line': 210, 'lang': 'Python', }, ] } ctag2 = { 'id': data.sha1_2, 'indexer_configuration_id': tool_id, 'ctags': [ { 'name': 'hello', 'kind': 'variable', 'line': 100, 'lang': 'C', }, { 'name': 'result', 'kind': 'variable', 'line': 120, 'lang': 'C', }, ] } storage.content_ctags_add([ctag1, ctag2]) # 1. when actual_ctags = list(storage.content_ctags_search('hello', limit=1)) # 1. then assert actual_ctags == [ { 'id': ctag1['id'], 'tool': tool, 'name': 'hello', 'kind': 'function', 'line': 133, 'lang': 'Python', } ] # 2. when actual_ctags = list(storage.content_ctags_search( 'hello', limit=1, last_sha1=ctag1['id'])) # 2. then assert actual_ctags == [ { 'id': ctag2['id'], 'tool': tool, 'name': 'hello', 'kind': 'variable', 'line': 100, 'lang': 'C', } ] # 3. when actual_ctags = list(storage.content_ctags_search('hello')) # 3. then assert actual_ctags == [ { 'id': ctag1['id'], 'tool': tool, 'name': 'hello', 'kind': 'function', 'line': 133, 'lang': 'Python', }, { 'id': ctag1['id'], 'tool': tool, 'name': 'hello', 'kind': 'variable', 'line': 210, 'lang': 'Python', }, { 'id': ctag2['id'], 'tool': tool, 'name': 'hello', 'kind': 'variable', 'line': 100, 'lang': 'C', }, ] # 4. when actual_ctags = list(storage.content_ctags_search('counter')) # then assert actual_ctags == [{ 'id': ctag1['id'], 'tool': tool, 'name': 'counter', 'kind': 'variable', 'line': 119, 'lang': 'Python', }] # 5. when actual_ctags = list(storage.content_ctags_search('result', limit=1)) # then assert actual_ctags == [{ 'id': ctag2['id'], 'tool': tool, 'name': 'result', 'kind': 'variable', 'line': 120, 'lang': 'C', }] def test_content_ctags_search_no_result(self, swh_indexer_storage): storage = swh_indexer_storage actual_ctags = list(storage.content_ctags_search('counter')) assert not actual_ctags def test_content_ctags_add__add_new_ctags_added( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool = data.tools['universal-ctags'] tool_id = tool['id'] ctag_v1 = { 'id': data.sha1_2, 'indexer_configuration_id': tool_id, 'ctags': [{ 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', }] } # given storage.content_ctags_add([ctag_v1]) storage.content_ctags_add([ctag_v1]) # conflict does nothing # when actual_ctags = list(storage.content_ctags_get([data.sha1_2])) # then expected_ctags = [{ 'id': data.sha1_2, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': tool, }] assert actual_ctags == expected_ctags # given ctag_v2 = ctag_v1.copy() ctag_v2.update({ 'ctags': [ { 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', } ] }) storage.content_ctags_add([ctag_v2]) expected_ctags = [ { 'id': data.sha1_2, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': tool, }, { 'id': data.sha1_2, 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', 'tool': tool, } ] actual_ctags = list(storage.content_ctags_get( [data.sha1_2])) assert actual_ctags == expected_ctags def test_content_ctags_add__update_in_place( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool = data.tools['universal-ctags'] tool_id = tool['id'] ctag_v1 = { 'id': data.sha1_2, 'indexer_configuration_id': tool_id, 'ctags': [{ 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', }] } # given storage.content_ctags_add([ctag_v1]) # when actual_ctags = list(storage.content_ctags_get( [data.sha1_2])) # then expected_ctags = [ { 'id': data.sha1_2, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': tool } ] assert actual_ctags == expected_ctags # given ctag_v2 = ctag_v1.copy() ctag_v2.update({ 'ctags': [ { 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', }, { 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', } ] }) storage.content_ctags_add([ctag_v2], conflict_update=True) actual_ctags = list(storage.content_ctags_get( [data.sha1_2])) # ctag did change as the v2 was used to overwrite v1 expected_ctags = [ { 'id': data.sha1_2, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': tool, }, { 'id': data.sha1_2, 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', 'tool': tool, } ] assert actual_ctags == expected_ctags class TestIndexerStorageContentMetadata(StorageETypeTester): """Test Indexer Storage content_metadata related methods """ tool_name = 'swh-metadata-detector' endpoint_type = 'content_metadata' example_data = [ { 'metadata': { 'other': {}, 'codeRepository': { 'type': 'git', 'url': 'https://github.com/moranegg/metadata_test' }, 'description': 'Simple package.json test for indexer', 'name': 'test_metadata', 'version': '0.0.1' }, }, { 'metadata': { 'other': {}, 'name': 'test_metadata', 'version': '0.0.1' }, }, ] class TestIndexerStorageRevisionIntrinsicMetadata(StorageETypeTester): """Test Indexer Storage revision_intrinsic_metadata related methods """ tool_name = 'swh-metadata-detector' endpoint_type = 'revision_intrinsic_metadata' example_data = [ { 'metadata': { 'other': {}, 'codeRepository': { 'type': 'git', 'url': 'https://github.com/moranegg/metadata_test' }, 'description': 'Simple package.json test for indexer', 'name': 'test_metadata', 'version': '0.0.1' }, 'mappings': ['mapping1'], }, { 'metadata': { 'other': {}, 'name': 'test_metadata', 'version': '0.0.1' }, 'mappings': ['mapping2'], }, ] def test_revision_intrinsic_metadata_delete( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] query = [data.sha1_2, data.sha1_1] data1 = { 'id': data.sha1_2, **self.example_data[0], 'indexer_configuration_id': tool['id'], } # when summary = endpoint(storage, etype, 'add')([data1]) assert summary == expected_summary(1, etype) summary2 = endpoint(storage, etype, 'delete')([ { 'id': data.sha1_2, 'indexer_configuration_id': tool['id'], } ]) assert summary2 == expected_summary(1, etype, 'del') # then actual_data = list(endpoint(storage, etype, 'get')(query)) # then assert not actual_data def test_revision_intrinsic_metadata_delete_nonexisting( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] endpoint(storage, etype, 'delete')([ { 'id': data.sha1_2, 'indexer_configuration_id': tool['id'], } ]) class TestIndexerStorageContentFossologyLicence: def test_content_fossology_license_add__new_license_added( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool = data.tools['nomos'] tool_id = tool['id'] license_v1 = { 'id': data.sha1_1, 'licenses': ['Apache-2.0'], 'indexer_configuration_id': tool_id, } # given storage.content_fossology_license_add([license_v1]) # conflict does nothing storage.content_fossology_license_add([license_v1]) # when actual_licenses = list(storage.content_fossology_license_get( [data.sha1_1])) # then expected_license = { data.sha1_1: [{ 'licenses': ['Apache-2.0'], 'tool': tool, }] } assert actual_licenses == [expected_license] # given license_v2 = license_v1.copy() license_v2.update({ 'licenses': ['BSD-2-Clause'], }) storage.content_fossology_license_add([license_v2]) actual_licenses = list(storage.content_fossology_license_get( [data.sha1_1])) expected_license = { data.sha1_1: [{ 'licenses': ['Apache-2.0', 'BSD-2-Clause'], 'tool': tool }] } # license did not change as the v2 was dropped. assert actual_licenses == [expected_license] def test_generate_content_fossology_license_get_range_limit_none( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data """license_get_range call with wrong limit input should fail""" with pytest.raises(IndexerStorageArgumentException) as e: storage.content_fossology_license_get_range( start=None, end=None, indexer_configuration_id=None, limit=None) assert e.value.args == ('limit should not be None',) def test_generate_content_fossology_license_get_range_no_limit( self, swh_indexer_storage_with_data): """license_get_range returns licenses within range provided""" storage, data = swh_indexer_storage_with_data # craft some consistent mimetypes fossology_licenses = data.fossology_licenses mimetypes = prepare_mimetypes_from(fossology_licenses) storage.content_mimetype_add(mimetypes, conflict_update=True) # add fossology_licenses to storage storage.content_fossology_license_add(fossology_licenses) # All ids from the db content_ids = sorted([c['id'] for c in fossology_licenses]) start = content_ids[0] end = content_ids[-1] # retrieve fossology_licenses tool_id = fossology_licenses[0]['indexer_configuration_id'] actual_result = storage.content_fossology_license_get_range( start, end, indexer_configuration_id=tool_id) actual_ids = actual_result['ids'] actual_next = actual_result['next'] assert len(fossology_licenses) == len(actual_ids) assert actual_next is None assert content_ids == actual_ids def test_generate_content_fossology_license_get_range_no_limit_with_filter( self, swh_indexer_storage_with_data): """This filters non textual, then returns results within range""" storage, data = swh_indexer_storage_with_data fossology_licenses = data.fossology_licenses mimetypes = data.mimetypes # craft some consistent mimetypes _mimetypes = prepare_mimetypes_from(fossology_licenses) # add binary mimetypes which will get filtered out in results for m in mimetypes: _mimetypes.append({ 'mimetype': 'binary', **m, }) storage.content_mimetype_add(_mimetypes, conflict_update=True) # add fossology_licenses to storage storage.content_fossology_license_add(fossology_licenses) # All ids from the db content_ids = sorted([c['id'] for c in fossology_licenses]) start = content_ids[0] end = content_ids[-1] # retrieve fossology_licenses tool_id = fossology_licenses[0]['indexer_configuration_id'] actual_result = storage.content_fossology_license_get_range( start, end, indexer_configuration_id=tool_id) actual_ids = actual_result['ids'] actual_next = actual_result['next'] assert len(fossology_licenses) == len(actual_ids) assert actual_next is None assert content_ids == actual_ids def test_generate_fossology_license_get_range_limit( self, swh_indexer_storage_with_data): """fossology_license_get_range paginates results if limit exceeded""" storage, data = swh_indexer_storage_with_data fossology_licenses = data.fossology_licenses # craft some consistent mimetypes mimetypes = prepare_mimetypes_from(fossology_licenses) # add fossology_licenses to storage storage.content_mimetype_add(mimetypes, conflict_update=True) storage.content_fossology_license_add(fossology_licenses) # input the list of sha1s we want from storage content_ids = sorted([c['id'] for c in fossology_licenses]) start = content_ids[0] end = content_ids[-1] # retrieve fossology_licenses limited to 3 results limited_results = len(fossology_licenses) - 1 tool_id = fossology_licenses[0]['indexer_configuration_id'] actual_result = storage.content_fossology_license_get_range( start, end, indexer_configuration_id=tool_id, limit=limited_results) actual_ids = actual_result['ids'] actual_next = actual_result['next'] assert limited_results == len(actual_ids) assert actual_next is not None assert actual_next == content_ids[-1] expected_fossology_licenses = content_ids[:-1] assert expected_fossology_licenses == actual_ids # retrieve next part actual_results2 = storage.content_fossology_license_get_range( start=end, end=end, indexer_configuration_id=tool_id) actual_ids2 = actual_results2['ids'] actual_next2 = actual_results2['next'] assert actual_next2 is None expected_fossology_licenses2 = [content_ids[-1]] assert expected_fossology_licenses2 == actual_ids2 class TestIndexerStorageOriginIntrinsicMetadata: def test_origin_intrinsic_metadata_get( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] metadata = { 'version': None, 'name': None, } metadata_rev = { 'id': data.revision_id_2, 'metadata': metadata, 'mappings': ['mapping1'], 'indexer_configuration_id': tool_id, } metadata_origin = { 'id': data.origin_url_1, 'metadata': metadata, 'indexer_configuration_id': tool_id, 'mappings': ['mapping1'], 'from_revision': data.revision_id_2, } # when storage.revision_intrinsic_metadata_add([metadata_rev]) storage.origin_intrinsic_metadata_add([metadata_origin]) # then actual_metadata = list(storage.origin_intrinsic_metadata_get( [data.origin_url_1, 'no://where'])) expected_metadata = [{ 'id': data.origin_url_1, 'metadata': metadata, 'tool': data.tools['swh-metadata-detector'], 'from_revision': data.revision_id_2, 'mappings': ['mapping1'], }] assert actual_metadata == expected_metadata def test_origin_intrinsic_metadata_delete( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] metadata = { 'version': None, 'name': None, } metadata_rev = { 'id': data.revision_id_2, 'metadata': metadata, 'mappings': ['mapping1'], 'indexer_configuration_id': tool_id, } metadata_origin = { 'id': data.origin_url_1, 'metadata': metadata, 'indexer_configuration_id': tool_id, 'mappings': ['mapping1'], 'from_revision': data.revision_id_2, } metadata_origin2 = metadata_origin.copy() metadata_origin2['id'] = data.origin_url_2 # when storage.revision_intrinsic_metadata_add([metadata_rev]) storage.origin_intrinsic_metadata_add([ metadata_origin, metadata_origin2]) storage.origin_intrinsic_metadata_delete([ { 'id': data.origin_url_1, 'indexer_configuration_id': tool_id } ]) # then actual_metadata = list(storage.origin_intrinsic_metadata_get( [data.origin_url_1, data.origin_url_2, 'no://where'])) for item in actual_metadata: item['indexer_configuration_id'] = item.pop('tool')['id'] assert actual_metadata == [metadata_origin2] def test_origin_intrinsic_metadata_delete_nonexisting( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool_id = data.tools['swh-metadata-detector']['id'] storage.origin_intrinsic_metadata_delete([ { 'id': data.origin_url_1, 'indexer_configuration_id': tool_id } ]) def test_origin_intrinsic_metadata_add_drop_duplicate( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] metadata_v1 = { 'version': None, 'name': None, } metadata_rev_v1 = { 'id': data.revision_id_1, 'metadata': metadata_v1.copy(), 'mappings': [], 'indexer_configuration_id': tool_id, } metadata_origin_v1 = { 'id': data.origin_url_1, 'metadata': metadata_v1.copy(), 'indexer_configuration_id': tool_id, 'mappings': [], 'from_revision': data.revision_id_1, } # given storage.revision_intrinsic_metadata_add([metadata_rev_v1]) storage.origin_intrinsic_metadata_add([metadata_origin_v1]) # when actual_metadata = list(storage.origin_intrinsic_metadata_get( [data.origin_url_1, 'no://where'])) expected_metadata_v1 = [{ 'id': data.origin_url_1, 'metadata': metadata_v1, 'tool': data.tools['swh-metadata-detector'], 'from_revision': data.revision_id_1, 'mappings': [], }] assert actual_metadata == expected_metadata_v1 # given metadata_v2 = metadata_v1.copy() metadata_v2.update({ 'name': 'test_metadata', 'author': 'MG', }) metadata_rev_v2 = metadata_rev_v1.copy() metadata_origin_v2 = metadata_origin_v1.copy() metadata_rev_v2['metadata'] = metadata_v2 metadata_origin_v2['metadata'] = metadata_v2 storage.revision_intrinsic_metadata_add([metadata_rev_v2]) storage.origin_intrinsic_metadata_add([metadata_origin_v2]) # then actual_metadata = list(storage.origin_intrinsic_metadata_get( [data.origin_url_1])) # metadata did not change as the v2 was dropped. assert actual_metadata == expected_metadata_v1 def test_origin_intrinsic_metadata_add_update_in_place_duplicate( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] metadata_v1 = { 'version': None, 'name': None, } metadata_rev_v1 = { 'id': data.revision_id_2, 'metadata': metadata_v1, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata_origin_v1 = { 'id': data.origin_url_1, 'metadata': metadata_v1.copy(), 'indexer_configuration_id': tool_id, 'mappings': [], 'from_revision': data.revision_id_2, } # given storage.revision_intrinsic_metadata_add([metadata_rev_v1]) storage.origin_intrinsic_metadata_add([metadata_origin_v1]) # when actual_metadata = list(storage.origin_intrinsic_metadata_get( [data.origin_url_1])) # then expected_metadata_v1 = [{ 'id': data.origin_url_1, 'metadata': metadata_v1, 'tool': data.tools['swh-metadata-detector'], 'from_revision': data.revision_id_2, 'mappings': [], }] assert actual_metadata == expected_metadata_v1 # given metadata_v2 = metadata_v1.copy() metadata_v2.update({ 'name': 'test_update_duplicated_metadata', 'author': 'MG', }) metadata_rev_v2 = metadata_rev_v1.copy() metadata_origin_v2 = metadata_origin_v1.copy() metadata_rev_v2['metadata'] = metadata_v2 metadata_origin_v2 = { 'id': data.origin_url_1, 'metadata': metadata_v2.copy(), 'indexer_configuration_id': tool_id, 'mappings': ['npm'], 'from_revision': data.revision_id_1, } storage.revision_intrinsic_metadata_add( [metadata_rev_v2], conflict_update=True) storage.origin_intrinsic_metadata_add( [metadata_origin_v2], conflict_update=True) actual_metadata = list(storage.origin_intrinsic_metadata_get( [data.origin_url_1])) expected_metadata_v2 = [{ 'id': data.origin_url_1, 'metadata': metadata_v2, 'tool': data.tools['swh-metadata-detector'], 'from_revision': data.revision_id_1, 'mappings': ['npm'], }] # metadata did change as the v2 was used to overwrite v1 assert actual_metadata == expected_metadata_v2 def test_origin_intrinsic_metadata_add__update_in_place_deadlock( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] ids = list(range(10)) example_data1 = { 'metadata': { 'version': None, 'name': None, }, 'mappings': [], } example_data2 = { 'metadata': { 'version': 'v1.1.1', 'name': 'foo', }, 'mappings': [], } metadata_rev_v1 = { 'id': data.revision_id_2, 'metadata': { 'version': None, 'name': None, }, 'mappings': [], 'indexer_configuration_id': tool_id, } data_v1 = [ { 'id': 'file:///tmp/origin%d' % id_, 'from_revision': data.revision_id_2, **example_data1, 'indexer_configuration_id': tool_id, } for id_ in ids ] data_v2 = [ { 'id': 'file:///tmp/origin%d' % id_, 'from_revision': data.revision_id_2, **example_data2, 'indexer_configuration_id': tool_id, } for id_ in ids ] # Remove one item from each, so that both queries have to succeed for # all items to be in the DB. data_v2a = data_v2[1:] data_v2b = list(reversed(data_v2[0:-1])) # given storage.revision_intrinsic_metadata_add([metadata_rev_v1]) storage.origin_intrinsic_metadata_add(data_v1) # when origins = ['file:///tmp/origin%d' % i for i in ids] actual_data = list(storage.origin_intrinsic_metadata_get(origins)) expected_data_v1 = [ { 'id': 'file:///tmp/origin%d' % id_, 'from_revision': data.revision_id_2, **example_data1, 'tool': data.tools['swh-metadata-detector'], } for id_ in ids ] # then assert actual_data == expected_data_v1 # given def f1(): storage.origin_intrinsic_metadata_add( data_v2a, conflict_update=True) def f2(): storage.origin_intrinsic_metadata_add( data_v2b, conflict_update=True) t1 = threading.Thread(target=f1) t2 = threading.Thread(target=f2) t2.start() t1.start() t1.join() t2.join() actual_data = list(storage.origin_intrinsic_metadata_get(origins)) expected_data_v2 = [ { 'id': 'file:///tmp/origin%d' % id_, 'from_revision': data.revision_id_2, **example_data2, 'tool': data.tools['swh-metadata-detector'], } for id_ in ids ] assert len(actual_data) == len(expected_data_v2) assert sorted(actual_data, key=lambda x: x['id']) == expected_data_v2 def test_origin_intrinsic_metadata_add__duplicate_twice( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] metadata = { 'developmentStatus': None, 'name': None, } metadata_rev = { 'id': data.revision_id_2, 'metadata': metadata, 'mappings': ['mapping1'], 'indexer_configuration_id': tool_id, } metadata_origin = { 'id': data.origin_url_1, 'metadata': metadata, 'indexer_configuration_id': tool_id, 'mappings': ['mapping1'], 'from_revision': data.revision_id_2, } # when storage.revision_intrinsic_metadata_add([metadata_rev]) with pytest.raises(DuplicateId): storage.origin_intrinsic_metadata_add([ metadata_origin, metadata_origin]) def test_origin_intrinsic_metadata_search_fulltext( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] metadata1 = { 'author': 'John Doe', } metadata1_rev = { 'id': data.revision_id_1, 'metadata': metadata1, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata1_origin = { 'id': data.origin_url_1, 'metadata': metadata1, 'mappings': [], 'indexer_configuration_id': tool_id, 'from_revision': data.revision_id_1, } metadata2 = { 'author': 'Jane Doe', } metadata2_rev = { 'id': data.revision_id_2, 'metadata': metadata2, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata2_origin = { 'id': data.origin_url_2, 'metadata': metadata2, 'mappings': [], 'indexer_configuration_id': tool_id, 'from_revision': data.revision_id_2, } # when storage.revision_intrinsic_metadata_add([metadata1_rev]) storage.origin_intrinsic_metadata_add([metadata1_origin]) storage.revision_intrinsic_metadata_add([metadata2_rev]) storage.origin_intrinsic_metadata_add([metadata2_origin]) # then search = storage.origin_intrinsic_metadata_search_fulltext assert set([res['id'] for res in search(['Doe'])]) \ == set([data.origin_url_1, data.origin_url_2]) assert [res['id'] for res in search(['John', 'Doe'])] \ == [data.origin_url_1] assert [res['id'] for res in search(['John'])] \ == [data.origin_url_1] assert not list(search(['John', 'Jane'])) def test_origin_intrinsic_metadata_search_fulltext_rank( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] # The following authors have "Random Person" to add some more content # to the JSON data, to work around normalization quirks when there # are few words (rank/(1+ln(nb_words)) is very sensitive to nb_words # for small values of nb_words). metadata1 = { 'author': [ 'Random Person', 'John Doe', 'Jane Doe', ] } metadata1_rev = { 'id': data.revision_id_1, 'metadata': metadata1, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata1_origin = { 'id': data.origin_url_1, 'metadata': metadata1, 'mappings': [], 'indexer_configuration_id': tool_id, 'from_revision': data.revision_id_1, } metadata2 = { 'author': [ 'Random Person', 'Jane Doe', ] } metadata2_rev = { 'id': data.revision_id_2, 'metadata': metadata2, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata2_origin = { 'id': data.origin_url_2, 'metadata': metadata2, 'mappings': [], 'indexer_configuration_id': tool_id, 'from_revision': data.revision_id_2, } # when storage.revision_intrinsic_metadata_add([metadata1_rev]) storage.origin_intrinsic_metadata_add([metadata1_origin]) storage.revision_intrinsic_metadata_add([metadata2_rev]) storage.origin_intrinsic_metadata_add([metadata2_origin]) # then search = storage.origin_intrinsic_metadata_search_fulltext assert [res['id'] for res in search(['Doe'])] \ == [data.origin_url_1, data.origin_url_2] assert [res['id'] for res in search(['Doe'], limit=1)] \ == [data.origin_url_1] assert [res['id'] for res in search(['John'])] \ == [data.origin_url_1] assert [res['id'] for res in search(['Jane'])] \ == [data.origin_url_2, data.origin_url_1] assert [res['id'] for res in search(['John', 'Jane'])] \ == [data.origin_url_1] def _fill_origin_intrinsic_metadata( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool1_id = data.tools['swh-metadata-detector']['id'] tool2_id = data.tools['swh-metadata-detector2']['id'] metadata1 = { '@context': 'foo', 'author': 'John Doe', } metadata1_rev = { 'id': data.revision_id_1, 'metadata': metadata1, 'mappings': ['npm'], 'indexer_configuration_id': tool1_id, } metadata1_origin = { 'id': data.origin_url_1, 'metadata': metadata1, 'mappings': ['npm'], 'indexer_configuration_id': tool1_id, 'from_revision': data.revision_id_1, } metadata2 = { '@context': 'foo', 'author': 'Jane Doe', } metadata2_rev = { 'id': data.revision_id_2, 'metadata': metadata2, 'mappings': ['npm', 'gemspec'], 'indexer_configuration_id': tool2_id, } metadata2_origin = { 'id': data.origin_url_2, 'metadata': metadata2, 'mappings': ['npm', 'gemspec'], 'indexer_configuration_id': tool2_id, 'from_revision': data.revision_id_2, } metadata3 = { '@context': 'foo', } metadata3_rev = { 'id': data.revision_id_3, 'metadata': metadata3, 'mappings': ['npm', 'gemspec'], 'indexer_configuration_id': tool2_id, } metadata3_origin = { 'id': data.origin_url_3, 'metadata': metadata3, 'mappings': ['pkg-info'], 'indexer_configuration_id': tool2_id, 'from_revision': data.revision_id_3, } storage.revision_intrinsic_metadata_add([metadata1_rev]) storage.origin_intrinsic_metadata_add([metadata1_origin]) storage.revision_intrinsic_metadata_add([metadata2_rev]) storage.origin_intrinsic_metadata_add([metadata2_origin]) storage.revision_intrinsic_metadata_add([metadata3_rev]) storage.origin_intrinsic_metadata_add([metadata3_origin]) def test_origin_intrinsic_metadata_search_by_producer( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data self._fill_origin_intrinsic_metadata( swh_indexer_storage_with_data) tool1 = data.tools['swh-metadata-detector'] tool2 = data.tools['swh-metadata-detector2'] endpoint = storage.origin_intrinsic_metadata_search_by_producer # test pagination # no 'page_token' param, return all origins result = endpoint(ids_only=True) assert result['origins'] \ == [data.origin_url_1, data.origin_url_2, data.origin_url_3] assert 'next_page_token' not in result # 'page_token' is < than origin_1, return everything result = endpoint(page_token=data.origin_url_1[:-1], ids_only=True) assert result['origins'] \ == [data.origin_url_1, data.origin_url_2, data.origin_url_3] assert 'next_page_token' not in result # 'page_token' is origin_3, return nothing result = endpoint(page_token=data.origin_url_3, ids_only=True) assert not result['origins'] assert 'next_page_token' not in result # test limit argument result = endpoint(page_token=data.origin_url_1[:-1], limit=2, ids_only=True) assert result['origins'] == [data.origin_url_1, data.origin_url_2] assert result['next_page_token'] == result['origins'][-1] result = endpoint(page_token=data.origin_url_1, limit=2, ids_only=True) assert result['origins'] == [data.origin_url_2, data.origin_url_3] assert 'next_page_token' not in result result = endpoint(page_token=data.origin_url_2, limit=2, ids_only=True) assert result['origins'] == [data.origin_url_3] assert 'next_page_token' not in result # test mappings filtering result = endpoint(mappings=['npm'], ids_only=True) assert result['origins'] == [data.origin_url_1, data.origin_url_2] assert 'next_page_token' not in result result = endpoint(mappings=['npm', 'gemspec'], ids_only=True) assert result['origins'] == [data.origin_url_1, data.origin_url_2] assert 'next_page_token' not in result result = endpoint(mappings=['gemspec'], ids_only=True) assert result['origins'] == [data.origin_url_2] assert 'next_page_token' not in result result = endpoint(mappings=['pkg-info'], ids_only=True) assert result['origins'] == [data.origin_url_3] assert 'next_page_token' not in result result = endpoint(mappings=['foobar'], ids_only=True) assert not result['origins'] assert 'next_page_token' not in result # test pagination + mappings result = endpoint(mappings=['npm'], limit=1, ids_only=True) assert result['origins'] == [data.origin_url_1] assert result['next_page_token'] == result['origins'][-1] # test tool filtering result = endpoint(tool_ids=[tool1['id']], ids_only=True) assert result['origins'] == [data.origin_url_1] assert 'next_page_token' not in result result = endpoint(tool_ids=[tool2['id']], ids_only=True) assert sorted(result['origins']) \ == [data.origin_url_2, data.origin_url_3] assert 'next_page_token' not in result result = endpoint(tool_ids=[tool1['id'], tool2['id']], ids_only=True) assert sorted(result['origins']) \ == [data.origin_url_1, data.origin_url_2, data.origin_url_3] assert 'next_page_token' not in result # test ids_only=False assert endpoint(mappings=['gemspec'])['origins'] \ == [{ 'id': data.origin_url_2, 'metadata': { '@context': 'foo', 'author': 'Jane Doe', }, 'mappings': ['npm', 'gemspec'], 'tool': tool2, 'from_revision': data.revision_id_2, }] def test_origin_intrinsic_metadata_stats( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data self._fill_origin_intrinsic_metadata( swh_indexer_storage_with_data) result = storage.origin_intrinsic_metadata_stats() assert result == { 'per_mapping': { 'gemspec': 1, 'npm': 2, 'pkg-info': 1, 'codemeta': 0, 'maven': 0, }, 'total': 3, 'non_empty': 2, } class TestIndexerStorageIndexerCondifuration: def test_indexer_configuration_add( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'some-unknown-tool', 'tool_version': 'some-version', 'tool_configuration': {"debian-package": "some-package"}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool is None # does not exist # add it actual_tools = list(storage.indexer_configuration_add([tool])) assert len(actual_tools) == 1 actual_tool = actual_tools[0] assert actual_tool is not None # now it exists new_id = actual_tool.pop('id') assert actual_tool == tool actual_tools2 = list(storage.indexer_configuration_add([tool])) actual_tool2 = actual_tools2[0] assert actual_tool2 is not None # now it exists new_id2 = actual_tool2.pop('id') assert new_id == new_id2 assert actual_tool == actual_tool2 def test_indexer_configuration_add_multiple( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'some-unknown-tool', 'tool_version': 'some-version', 'tool_configuration': {"debian-package": "some-package"}, } actual_tools = list(storage.indexer_configuration_add([tool])) assert len(actual_tools) == 1 new_tools = [tool, { 'tool_name': 'yet-another-tool', 'tool_version': 'version', 'tool_configuration': {}, }] actual_tools = list(storage.indexer_configuration_add(new_tools)) assert len(actual_tools) == 2 # order not guaranteed, so we iterate over results to check for tool in actual_tools: _id = tool.pop('id') assert _id is not None assert tool in new_tools def test_indexer_configuration_get_missing( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'unknown-tool', 'tool_version': '3.1.0rc2-31-ga2cbb8c', 'tool_configuration': {"command_line": "nomossa "}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool is None def test_indexer_configuration_get( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'nomos', 'tool_version': '3.1.0rc2-31-ga2cbb8c', 'tool_configuration': {"command_line": "nomossa "}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool expected_tool = tool.copy() del actual_tool['id'] assert expected_tool == actual_tool def test_indexer_configuration_metadata_get_missing_context( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'swh-metadata-translator', 'tool_version': '0.0.1', 'tool_configuration': {"context": "unknown-context"}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool is None def test_indexer_configuration_metadata_get( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'swh-metadata-translator', 'tool_version': '0.0.1', 'tool_configuration': {"type": "local", "context": "NpmMapping"}, } storage.indexer_configuration_add([tool]) actual_tool = storage.indexer_configuration_get(tool) assert actual_tool expected_tool = tool.copy() expected_tool['id'] = actual_tool['id'] assert expected_tool == actual_tool - - -class TestIndexerStorageMisc: - """Misc endpoints tests for the IndexerStorage. - """ - - def test_check_config(self, swh_indexer_storage): - storage = swh_indexer_storage - assert storage.check_config(check_write=True) - assert storage.check_config(check_write=False) - - def test_types(self, swh_indexer_storage): - """Checks all methods of StorageInterface are implemented by this - backend, and that they have the same signature.""" - # Create an instance of the protocol (which cannot be instantiated - # directly, so this creates a subclass, then instantiates it) - interface = type('_', (IndexerStorageInterface,), {})() - - assert 'content_mimetype_add' in dir(interface) - - missing_methods = [] - - for meth_name in dir(interface): - if meth_name.startswith('_'): - continue - interface_meth = getattr(interface, meth_name) - try: - concrete_meth = getattr(swh_indexer_storage, meth_name) - except AttributeError: - missing_methods.append(meth_name) - continue - - expected_signature = inspect.signature(interface_meth) - actual_signature = inspect.signature(concrete_meth) - - assert expected_signature == actual_signature, meth_name - - assert missing_methods == [] diff --git a/swh/indexer/tests/test_mimetype.py b/swh/indexer/tests/test_mimetype.py index cf39bc3..91895c9 100644 --- a/swh/indexer/tests/test_mimetype.py +++ b/swh/indexer/tests/test_mimetype.py @@ -1,147 +1,148 @@ -# Copyright (C) 2017-2018 The Software Heritage developers +# Copyright (C) 2017-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import pytest import unittest from typing import Any, Dict from swh.indexer.mimetype import ( MimetypeIndexer, MimetypeRangeIndexer, compute_mimetype_encoding ) from swh.indexer.tests.utils import ( CommonContentIndexerTest, CommonContentIndexerRangeTest, BASE_TEST_CONFIG, fill_storage, fill_obj_storage, filter_dict, ) -class BasicTest(unittest.TestCase): - def test_compute_mimetype_encoding(self): - """Compute mimetype encoding should return results""" - for _input, _mimetype, _encoding in [ - ('du français'.encode(), 'text/plain', 'utf-8'), - (b'def __init__(self):', 'text/x-python', 'us-ascii')]: - - actual_result = compute_mimetype_encoding(_input) - self.assertEqual(actual_result, { - 'mimetype': _mimetype, - 'encoding': _encoding - }) +def test_compute_mimetype_encoding(): + """Compute mimetype encoding should return results""" + for _input, _mimetype, _encoding in [ + ('du français'.encode(), 'text/plain', 'utf-8'), + (b'def __init__(self):', 'text/x-python', 'us-ascii'), + (b'\xff\xfe\x00\x00\x00\x00\xff\xfe\xff\xff', + 'application/octet-stream', '') + ]: + actual_result = compute_mimetype_encoding(_input) + assert actual_result == { + 'mimetype': _mimetype, + 'encoding': _encoding + } CONFIG = { **BASE_TEST_CONFIG, 'tools': { 'name': 'file', 'version': '1:5.30-1+deb9u1', 'configuration': { "type": "library", "debian-package": "python3-magic" }, }, } # type: Dict[str, Any] class TestMimetypeIndexer(CommonContentIndexerTest, unittest.TestCase): """Mimetype indexer test scenarios: - Known sha1s in the input list have their data indexed - Unknown sha1 in the input list are not indexed """ legacy_get_format = True def get_indexer_results(self, ids): yield from self.idx_storage.content_mimetype_get(ids) def setUp(self): self.indexer = MimetypeIndexer(config=CONFIG) self.indexer.catch_exceptions = False self.idx_storage = self.indexer.idx_storage fill_storage(self.indexer.storage) fill_obj_storage(self.indexer.objstorage) self.id0 = '01c9379dfc33803963d07c1ccc748d3fe4c96bb5' self.id1 = '688a5ef812c53907562fe379d4b3851e69c7cb15' self.id2 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709' tool = {k.replace('tool_', ''): v for (k, v) in self.indexer.tool.items()} self.expected_results = { self.id0: { 'id': self.id0, 'tool': tool, 'mimetype': 'text/plain', 'encoding': 'us-ascii', }, self.id1: { 'id': self.id1, 'tool': tool, 'mimetype': 'text/plain', 'encoding': 'us-ascii', }, self.id2: { 'id': self.id2, 'tool': tool, 'mimetype': 'application/x-empty', 'encoding': 'binary', } } RANGE_CONFIG = dict(list(CONFIG.items()) + [('write_batch_size', 100)]) class TestMimetypeRangeIndexer( CommonContentIndexerRangeTest, unittest.TestCase): """Range Mimetype Indexer tests. - new data within range are indexed - no data outside a range are indexed - with filtering existing indexed data prior to compute new index - without filtering existing indexed data prior to compute new index """ def setUp(self): super().setUp() self.indexer = MimetypeRangeIndexer(config=RANGE_CONFIG) self.indexer.catch_exceptions = False fill_storage(self.indexer.storage) fill_obj_storage(self.indexer.objstorage) self.id0 = '01c9379dfc33803963d07c1ccc748d3fe4c96bb5' self.id1 = '02fb2c89e14f7fab46701478c83779c7beb7b069' self.id2 = '103bc087db1d26afc3a0283f38663d081e9b01e6' tool_id = self.indexer.tool['id'] self.expected_results = { self.id0: { 'encoding': 'us-ascii', 'id': self.id0, 'indexer_configuration_id': tool_id, 'mimetype': 'text/plain'}, self.id1: { 'encoding': 'us-ascii', 'id': self.id1, 'indexer_configuration_id': tool_id, 'mimetype': 'text/x-python'}, self.id2: { 'encoding': 'us-ascii', 'id': self.id2, 'indexer_configuration_id': tool_id, 'mimetype': 'text/plain'} } def test_mimetype_w_no_tool(): with pytest.raises(ValueError): MimetypeIndexer(config=filter_dict(CONFIG, 'tools')) def test_mimetype_range_w_no_tool(): with pytest.raises(ValueError): MimetypeRangeIndexer(config=filter_dict(CONFIG, 'tools')) diff --git a/version.txt b/version.txt index eb0bb85..a14f0a3 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.167-0-g28b03c5 \ No newline at end of file +v0.0.168-0-g54ac740 \ No newline at end of file