diff --git a/PKG-INFO b/PKG-INFO index d4fdd5c70..807cbc165 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,10 +1,10 @@ Metadata-Version: 1.0 Name: swh.storage -Version: 0.0.79 +Version: 0.0.80 Summary: Software Heritage storage manager Home-page: https://forge.softwareheritage.org/diffusion/DSTO/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN diff --git a/README.dev b/README.dev index 76b29f94c..ba833e99f 100644 --- a/README.dev +++ b/README.dev @@ -1,19 +1,77 @@ -# A test server should be running for tests +README.dev +========== -Sample configuration (e.g. ~/.config/swh/storage.ini): +A test server could locally be running for tests. - [main] - db=dbname=softwareheritage-dev - storage_base=/tmp/swh/storage/ +# Sample configuration -Note: `storage_base` entry referenced should exist. +In either /etc/softwareheritage/storage/storage.yml, +~/.config/swh/storage.yml or ~/.swh/storage.yml: -# Start +``` +storage: + cls: local + args: + db: "dbname=softwareheritage-dev user=" + objstorage: + cls: pathslicing + args: + root: /home/storage/swh-storage/ + slicing: 0:2/2:4/4:6 +``` -local only: +which means, this uses: - python3 -m swh.storage.api.server ~/.config/swh/storage.ini +- a local storage instance whose db connection is to + softwareheritage-dev local instance -local only but accessible from other machine: +- the objstorage uses a local objstorage instance whose: - python3 -m swh.storage.api.server ~/.config/swh/storage.ini 0.0.0.0 + - root path is /home/storage/swh-storage + + - slicing scheme is 0:2/2:4/4:6. This means that the identifier of + the content (sha1) which will be stored on disk at first level + with the first 2 hex characters, the second level with the next 2 + hex characters and the third level with the next 2 hex + characters. And finally the complete hash file holding the raw + content. For example: 00062f8bd330715c4f819373653d97b3cd34394c + will be stored at 00/06/2f/00062f8bd330715c4f819373653d97b3cd34394c + +Note that the 'root' path should exist on disk. + + +# Run server + +Command: +``` +python3 -m swh.storage.api.server ~/.config/swh/storage.yml +``` + +This runs a local swh-storage api at 5002 port. + + +# And then what? + +In your upper layer (loader-git, loader-svn, etc...), you can define a +remote storage with this snippet of yaml configuration. + +``` +storage: + cls: remote + args: + url: http://localhost:5002/ +``` + +You could directly define a local storage with the following snippet: + +``` +storage: + cls: local + args: + db: service=swh-dev + objstorage: + cls: pathslicing + args: + root: /home/storage/swh-storage/ + slicing: 0:2/2:4/4:6 +``` diff --git a/debian/control b/debian/control index 45595a4e6..4a39407d1 100644 --- a/debian/control +++ b/debian/control @@ -1,54 +1,54 @@ Source: swh-storage Maintainer: Software Heritage developers Section: python Priority: optional Build-Depends: debhelper (>= 9), dh-python, python3-all, python3-click, python3-dateutil, python3-flask, python3-nose, python3-psycopg2, python3-requests, python3-setuptools, python3-swh.core (>= 0.0.28~), python3-swh.model (>= 0.0.13~), python3-swh.objstorage (>= 0.0.17~), - python3-swh.scheduler, + python3-swh.scheduler (>= 0.0.11~), python3-vcversioner Standards-Version: 3.9.6 Homepage: https://forge.softwareheritage.org/diffusion/DSTO/ Package: python3-swh.storage Architecture: all Depends: python3-swh.core (>= 0.0.28~), python3-swh.model (>= 0.0.13~), python3-swh.objstorage (>= 0.0.17~), ${misc:Depends}, ${python3:Depends} Description: Software Heritage storage utilities Package: python3-swh.storage.listener Architecture: all Depends: python3-kafka (>= 1.3.1~), python3-swh.storage (= ${binary:Version}), ${misc:Depends}, ${python3:Depends} Description: Software Heritage storage listener Package: python3-swh.storage.archiver Architecture: all -Depends: python3-swh.scheduler, +Depends: python3-swh.scheduler (>= 0.0.11~), python3-swh.storage (= ${binary:Version}), ${misc:Depends}, ${python3:Depends} Description: Software Heritage storage Archiver Package: python3-swh.storage.provenance Architecture: all -Depends: python3-swh.scheduler, +Depends: python3-swh.scheduler (>= 0.0.11~), python3-swh.storage (= ${binary:Version}), ${misc:Depends}, ${python3:Depends} Description: Software Heritage storage Provenance diff --git a/requirements-swh.txt b/requirements-swh.txt index 97cfac1e1..9fb3c4d82 100644 --- a/requirements-swh.txt +++ b/requirements-swh.txt @@ -1,4 +1,4 @@ swh.core >= 0.0.28 swh.model >= 0.0.13 swh.objstorage >= 0.0.17 -swh.scheduler +swh.scheduler >= 0.0.11 diff --git a/sql/archiver/swh-archiver-func.sql b/sql/archiver/swh-archiver-func.sql index 3290ffe8a..625ed28bb 100644 --- a/sql/archiver/swh-archiver-func.sql +++ b/sql/archiver/swh-archiver-func.sql @@ -1,92 +1,127 @@ create or replace function swh_mktemp_content_archive() returns void language sql as $$ create temporary table tmp_content_archive ( like content_archive including defaults ) on commit drop; alter table tmp_content_archive drop column copies; alter table tmp_content_archive drop column num_present; $$; COMMENT ON FUNCTION swh_mktemp_content_archive() IS 'Create temporary table content_archive'; create or replace function swh_content_archive_missing(backend_name text) returns setof sha1 language plpgsql as $$ begin return query select content_id from tmp_content_archive tmp where exists ( select 1 from content_archive c where tmp.content_id = c.content_id and (not c.copies ? backend_name or c.copies @> jsonb_build_object(backend_name, '{"status": "missing"}'::jsonb)) ); end $$; COMMENT ON FUNCTION swh_content_archive_missing(text) IS 'Filter missing data from a specific backend'; create or replace function swh_content_archive_unknown() returns setof sha1 language plpgsql as $$ begin return query select content_id from tmp_content_archive tmp where not exists ( select 1 from content_archive c where tmp.content_id = c.content_id ); end $$; COMMENT ON FUNCTION swh_content_archive_unknown() IS 'Retrieve list of unknown sha1s'; CREATE OR REPLACE FUNCTION count_copies(from_id bytea, to_id bytea) returns void language sql as $$ with sample as ( select content_id, copies from content_archive where content_id > from_id and content_id <= to_id ), data as ( select substring(content_id from 19) as bucket, jbe.key as archive from sample join lateral jsonb_each(copies) jbe on true where jbe.value->>'status' = 'present' ), bucketed as ( select bucket, archive, count(*) as count from data group by bucket, archive ) update content_archive_counts cac set count = cac.count + bucketed.count from bucketed where cac.archive = bucketed.archive and cac.bucket = bucketed.bucket; $$; comment on function count_copies(bytea, bytea) is 'Count the objects between from_id and to_id, add the results to content_archive_counts'; CREATE OR REPLACE FUNCTION init_content_archive_counts() returns void language sql as $$ insert into content_archive_counts ( select id, decode(lpad(to_hex(bucket), 4, '0'), 'hex')::bucket as bucket, 0 as count from archive join lateral generate_series(0, 65535) bucket on true ) on conflict (archive, bucket) do nothing; $$; comment on function init_content_archive_counts() is 'Initialize the content archive counts for the registered archives'; create type content_archive_count as ( archive text, count bigint ); create or replace function get_content_archive_counts() returns setof content_archive_count language sql as $$ select archive, sum(count)::bigint from content_archive_counts group by archive order by archive; $$; comment on function get_content_archive_counts() is 'Get count for each archive'; + +-- create a temporary table called tmp_TBLNAME, mimicking existing table +-- TBLNAME +create or replace function swh_mktemp(tblname regclass) + returns void + language plpgsql +as $$ +begin + execute format(' + create temporary table tmp_%1$I + (like %1$I including defaults) + on commit drop; + ', tblname); + return; +end +$$; + +comment on function swh_mktemp(regclass) is 'Helper function to create a temporary table mimicking the existing one'; + +-- Helper function to insert new entries in content_archive from a +-- temporary table skipping duplicates. +create or replace function swh_content_archive_add() + returns void + language plpgsql +as $$ +begin + insert into content_archive (content_id, copies, num_present) + select distinct content_id, copies, num_present + from tmp_content_archive + on conflict(content_id) do nothing; + return; +end +$$; + +comment on function swh_content_archive_add() is 'Helper function to insert new entry in content_archive'; diff --git a/sql/archiver/swh-archiver-schema.sql b/sql/archiver/swh-archiver-schema.sql index d6f7f1647..1514d3e63 100644 --- a/sql/archiver/swh-archiver-schema.sql +++ b/sql/archiver/swh-archiver-schema.sql @@ -1,121 +1,121 @@ -- In order to archive the content of the object storage, add -- some tables to keep trace of what have already been archived. create table dbversion ( version int primary key, release timestamptz, description text ); comment on table dbversion is 'Schema update tracking'; INSERT INTO dbversion(version, release, description) -VALUES(7, now(), 'Work In Progress'); +VALUES(9, now(), 'Work In Progress'); CREATE TABLE archive ( id text PRIMARY KEY ); comment on table archive is 'Possible archives'; comment on column archive.id is 'Short identifier for the archive'; CREATE TYPE archive_status AS ENUM ( 'missing', 'ongoing', 'present', 'corrupted' ); comment on type archive_status is 'Status of a given archive'; -- a SHA1 checksum (not necessarily originating from Git) CREATE DOMAIN sha1 AS bytea CHECK (LENGTH(VALUE) = 20); -- a bucket for which we count items CREATE DOMAIN bucket AS bytea CHECK (LENGTH(VALUE) = 2); CREATE TABLE content_archive ( content_id sha1 primary key, copies jsonb, num_present int default null ); create index on content_archive(num_present); comment on table content_archive is 'Referencing the status and whereabouts of a content'; comment on column content_archive.content_id is 'content identifier'; comment on column content_archive.copies is 'map archive_id -> { "status": archive_status, "mtime": epoch timestamp }'; comment on column content_archive.num_present is 'Number of copies marked as present (cache updated via trigger)'; CREATE TABLE content_archive_counts ( archive text not null references archive(id), bucket bucket not null, count bigint, primary key (archive, bucket) ); comment on table content_archive_counts is 'Bucketed count of archive contents'; comment on column content_archive_counts.archive is 'the archive for which we''re counting'; comment on column content_archive_counts.bucket is 'the bucket of items we''re counting'; comment on column content_archive_counts.count is 'the number of items counted in the given bucket'; -- Keep the num_copies cache updated CREATE FUNCTION update_num_present() RETURNS TRIGGER AS $$ BEGIN NEW.num_present := (select count(*) from jsonb_each(NEW.copies) where value->>'status' = 'present'); RETURN new; END; $$ LANGUAGE PLPGSQL; CREATE TRIGGER update_num_present BEFORE INSERT OR UPDATE OF copies ON content_archive FOR EACH ROW EXECUTE PROCEDURE update_num_present(); -- keep the content_archive_counts updated -CREATE FUNCTION update_content_archive_counts() RETURNS TRIGGER LANGUAGE PLPGSQL AS $$ +CREATE OR REPLACE FUNCTION update_content_archive_counts() RETURNS TRIGGER LANGUAGE PLPGSQL AS $$ DECLARE content_id sha1; content_bucket bucket; copies record; old_row content_archive; new_row content_archive; BEGIN -- default values for old or new row depending on trigger type if tg_op = 'INSERT' then old_row := (null::sha1, '{}'::jsonb, 0); else old_row := old; end if; if tg_op = 'DELETE' then new_row := (null::sha1, '{}'::jsonb, 0); else new_row := new; end if; -- get the content bucket content_id := coalesce(old_row.content_id, new_row.content_id); content_bucket := substring(content_id from 19)::bucket; -- compare copies present in old and new row for each archive type FOR copies IN select coalesce(o.key, n.key) as archive, o.value->>'status' as old_status, n.value->>'status' as new_status from jsonb_each(old_row.copies) o full outer join lateral jsonb_each(new_row.copies) n on o.key = n.key LOOP -- the count didn't change - CONTINUE WHEN copies.old_status is distinct from copies.new_status OR + CONTINUE WHEN copies.old_status is not distinct from copies.new_status OR (copies.old_status != 'present' AND copies.new_status != 'present'); update content_archive_counts cac set count = count + (case when copies.old_status = 'present' then -1 else 1 end) where archive = copies.archive and bucket = content_bucket; END LOOP; return null; END; $$; create trigger update_content_archive_counts AFTER INSERT OR UPDATE OR DELETE ON content_archive FOR EACH ROW EXECUTE PROCEDURE update_content_archive_counts(); diff --git a/sql/archiver/upgrades/008.sql b/sql/archiver/upgrades/008.sql new file mode 100644 index 000000000..6527aca69 --- /dev/null +++ b/sql/archiver/upgrades/008.sql @@ -0,0 +1,49 @@ +-- SWH DB schema upgrade +-- from_version: 7 +-- to_version: 8 +-- description: Fix silly bug in update_content_archive_counts + +INSERT INTO dbversion(version, release, description) +VALUES(8, now(), 'Work In Progress'); + +-- keep the content_archive_counts updated +CREATE OR REPLACE FUNCTION update_content_archive_counts() RETURNS TRIGGER LANGUAGE PLPGSQL AS $$ + DECLARE + content_id sha1; + content_bucket bucket; + copies record; + old_row content_archive; + new_row content_archive; + BEGIN + -- default values for old or new row depending on trigger type + if tg_op = 'INSERT' then + old_row := (null::sha1, '{}'::jsonb, 0); + else + old_row := old; + end if; + if tg_op = 'DELETE' then + new_row := (null::sha1, '{}'::jsonb, 0); + else + new_row := new; + end if; + + -- get the content bucket + content_id := coalesce(old_row.content_id, new_row.content_id); + content_bucket := substring(content_id from 19)::bucket; + + -- compare copies present in old and new row for each archive type + FOR copies IN + select coalesce(o.key, n.key) as archive, o.value->>'status' as old_status, n.value->>'status' as new_status + from jsonb_each(old_row.copies) o full outer join lateral jsonb_each(new_row.copies) n on o.key = n.key + LOOP + -- the count didn't change + CONTINUE WHEN copies.old_status is not distinct from copies.new_status OR + (copies.old_status != 'present' AND copies.new_status != 'present'); + + update content_archive_counts cac + set count = count + (case when copies.old_status = 'present' then -1 else 1 end) + where archive = copies.archive and bucket = content_bucket; + END LOOP; + return null; + END; +$$; diff --git a/sql/archiver/upgrades/009.sql b/sql/archiver/upgrades/009.sql new file mode 100644 index 000000000..5a3133bab --- /dev/null +++ b/sql/archiver/upgrades/009.sql @@ -0,0 +1,42 @@ +-- SWH Archiver DB schema upgrade +-- from_version: 8 +-- to_version: 9 +-- description: Add helper functions to create temporary table and insert new entries in content_archive table + +insert into dbversion(version, release, description) +values(9, now(), 'Work In Progress'); + +-- create a temporary table called tmp_TBLNAME, mimicking existing +-- table TBLNAME +create or replace function swh_mktemp(tblname regclass) + returns void + language plpgsql +as $$ +begin + execute format(' + create temporary table tmp_%1$I + (like %1$I including defaults) + on commit drop; + ', tblname); + return; +end +$$; + +comment on function swh_mktemp(regclass) is 'Helper function to create a temporary table mimicking the existing one'; + +-- Helper function to insert new entries in content_archive from a +-- temporary table skipping duplicates. +create or replace function swh_content_archive_add() + returns void + language plpgsql +as $$ +begin + insert into content_archive (content_id, copies, num_present) + select distinct content_id, copies, num_present + from tmp_content_archive + on conflict(content_id) do nothing; + return; +end +$$; + +comment on function swh_content_archive_add() is 'Helper function to insert new entry in content_archive'; diff --git a/sql/swh-func.sql b/sql/swh-func.sql index fec23fa2a..2beeff1b4 100644 --- a/sql/swh-func.sql +++ b/sql/swh-func.sql @@ -1,2043 +1,2072 @@ -- create a temporary table called tmp_TBLNAME, mimicking existing table -- TBLNAME -- -- Args: -- tblname: name of the table to mimick create or replace function swh_mktemp(tblname regclass) returns void language plpgsql as $$ begin execute format(' create temporary table tmp_%1$I (like %1$I including defaults) on commit drop; alter table tmp_%1$I drop column if exists object_id; ', tblname); return; end $$; -- create a temporary table for directory entries called tmp_TBLNAME, -- mimicking existing table TBLNAME with an extra dir_id (sha1_git) -- column, and dropping the id column. -- -- This is used to create the tmp_directory_entry_ tables. -- -- Args: -- tblname: name of the table to mimick create or replace function swh_mktemp_dir_entry(tblname regclass) returns void language plpgsql as $$ begin execute format(' create temporary table tmp_%1$I (like %1$I including defaults, dir_id sha1_git) on commit drop; alter table tmp_%1$I drop column id; ', tblname); return; end $$; -- create a temporary table for revisions called tmp_revisions, -- mimicking existing table revision, replacing the foreign keys to -- people with an email and name field -- create or replace function swh_mktemp_revision() returns void language sql as $$ create temporary table tmp_revision ( like revision including defaults, author_fullname bytea, author_name bytea, author_email bytea, committer_fullname bytea, committer_name bytea, committer_email bytea ) on commit drop; alter table tmp_revision drop column author; alter table tmp_revision drop column committer; alter table tmp_revision drop column object_id; $$; -- create a temporary table for releases called tmp_release, -- mimicking existing table release, replacing the foreign keys to -- people with an email and name field -- create or replace function swh_mktemp_release() returns void language sql as $$ create temporary table tmp_release ( like release including defaults, author_fullname bytea, author_name bytea, author_email bytea ) on commit drop; alter table tmp_release drop column author; alter table tmp_release drop column object_id; $$; -- create a temporary table with a single "bytea" column for fast object lookup. create or replace function swh_mktemp_bytea() returns void language sql as $$ create temporary table tmp_bytea ( id bytea ) on commit drop; $$; -- create a temporary table for occurrence_history create or replace function swh_mktemp_occurrence_history() returns void language sql as $$ create temporary table tmp_occurrence_history( like occurrence_history including defaults, visit bigint not null ) on commit drop; alter table tmp_occurrence_history drop column visits, drop column object_id; $$; -- create a temporary table for entity_history, sans id create or replace function swh_mktemp_entity_history() returns void language sql as $$ create temporary table tmp_entity_history ( like entity_history including defaults) on commit drop; alter table tmp_entity_history drop column id; $$; -- create a temporary table for entities called tmp_entity_lister, -- with only the columns necessary for retrieving the uuid of a listed -- entity. create or replace function swh_mktemp_entity_lister() returns void language sql as $$ create temporary table tmp_entity_lister ( id bigint, lister_metadata jsonb ) on commit drop; $$; -- a content signature is a set of cryptographic checksums that we use to -- uniquely identify content, for the purpose of verifying if we already have -- some content or not during content injection create type content_signature as ( sha1 sha1, sha1_git sha1_git, sha256 sha256 ); -- check which entries of tmp_content are missing from content -- -- operates in bulk: 0. swh_mktemp(content), 1. COPY to tmp_content, -- 2. call this function create or replace function swh_content_missing() returns setof content_signature language plpgsql as $$ begin -- This query is critical for (single-algorithm) hash collision detection, -- so we cannot rely only on the fact that a single hash (e.g., sha1) is -- missing from the table content to conclude that a given content is -- missing. Ideally, we would want to (try to) add to content all entries -- in tmp_content that, when considering all columns together, are missing -- from content. -- -- But doing that naively would require a *compound* index on all checksum -- columns; that index would not be significantly smaller than the content -- table itself, and therefore won't be used. Therefore we union together -- all contents that differ on at least one column from what is already -- available. If there is a collision on some (but not all) columns, the -- relevant tmp_content entry will be included in the set of content to be -- added, causing a downstream violation of unicity constraint. return query (select sha1, sha1_git, sha256 from tmp_content as tmp where not exists (select 1 from content as c where c.sha1 = tmp.sha1)) union (select sha1, sha1_git, sha256 from tmp_content as tmp where not exists (select 1 from content as c where c.sha1_git = tmp.sha1_git)) union (select sha1, sha1_git, sha256 from tmp_content as tmp where not exists (select 1 from content as c where c.sha256 = tmp.sha256)); return; end $$; -- check which entries of tmp_content_sha1 are missing from content -- -- operates in bulk: 0. swh_mktemp_content_sha1(), 1. COPY to tmp_content_sha1, -- 2. call this function create or replace function swh_content_missing_per_sha1() returns setof sha1 language plpgsql as $$ begin return query (select id::sha1 from tmp_bytea as tmp where not exists (select 1 from content as c where c.sha1=tmp.id)); end $$; -- check which entries of tmp_skipped_content are missing from skipped_content -- -- operates in bulk: 0. swh_mktemp(skipped_content), 1. COPY to tmp_skipped_content, -- 2. call this function create or replace function swh_skipped_content_missing() returns setof content_signature language plpgsql as $$ begin return query select sha1, sha1_git, sha256 from tmp_skipped_content t where not exists (select 1 from skipped_content s where s.sha1 is not distinct from t.sha1 and s.sha1_git is not distinct from t.sha1_git and s.sha256 is not distinct from t.sha256); return; end $$; -- Look up content based on one or several different checksums. Return all -- content information if the content is found; a NULL row otherwise. -- -- At least one checksum should be not NULL. If several are not NULL, they will -- be AND-ed together in the lookup query. -- -- Note: this function is meant to be used to look up individual contents -- (e.g., for the web app), for batch lookup of missing content (e.g., to be -- added) see swh_content_missing create or replace function swh_content_find( sha1 sha1 default NULL, sha1_git sha1_git default NULL, sha256 sha256 default NULL ) returns content language plpgsql as $$ declare con content; filters text[] := array[] :: text[]; -- AND-clauses used to filter content q text; begin if sha1 is not null then filters := filters || format('sha1 = %L', sha1); end if; if sha1_git is not null then filters := filters || format('sha1_git = %L', sha1_git); end if; if sha256 is not null then filters := filters || format('sha256 = %L', sha256); end if; if cardinality(filters) = 0 then return null; else q = format('select * from content where %s', array_to_string(filters, ' and ')); execute q into con; return con; end if; end $$; -- add tmp_content entries to content, skipping duplicates -- -- operates in bulk: 0. swh_mktemp(content), 1. COPY to tmp_content, -- 2. call this function create or replace function swh_content_add() returns void language plpgsql as $$ begin insert into content (sha1, sha1_git, sha256, length, status) select distinct sha1, sha1_git, sha256, length, status from tmp_content where (sha1, sha1_git, sha256) in (select * from swh_content_missing()); -- TODO XXX use postgres 9.5 "UPSERT" support here, when available. -- Specifically, using "INSERT .. ON CONFLICT IGNORE" we can avoid -- the extra swh_content_missing() query here. return; end $$; -- add tmp_skipped_content entries to skipped_content, skipping duplicates -- -- operates in bulk: 0. swh_mktemp(skipped_content), 1. COPY to tmp_skipped_content, -- 2. call this function create or replace function swh_skipped_content_add() returns void language plpgsql as $$ begin insert into skipped_content (sha1, sha1_git, sha256, length, status, reason, origin) select distinct sha1, sha1_git, sha256, length, status, reason, origin from tmp_skipped_content where (coalesce(sha1, ''), coalesce(sha1_git, ''), coalesce(sha256, '')) in (select coalesce(sha1, ''), coalesce(sha1_git, ''), coalesce(sha256, '') from swh_skipped_content_missing()); -- TODO XXX use postgres 9.5 "UPSERT" support here, when available. -- Specifically, using "INSERT .. ON CONFLICT IGNORE" we can avoid -- the extra swh_content_missing() query here. return; end $$; +-- Update content entries from temporary table. +-- (columns are potential new columns added to the schema, this cannot be empty) +-- +create or replace function swh_content_update(columns_update text[]) + returns void + language plpgsql +as $$ +declare + query text; + tmp_array text[]; +begin + if array_length(columns_update, 1) = 0 then + raise exception 'Please, provide the list of column names to update.'; + end if; + + tmp_array := array(select format('%1$s=t.%1$s', unnest) from unnest(columns_update)); + + query = format('update content set %s + from tmp_content t where t.sha1 = content.sha1', + array_to_string(tmp_array, ', ')); + + execute query; + + return; +end +$$; + +comment on function swh_content_update(text[]) IS 'Update existing content''s columns'; + -- check which entries of tmp_directory are missing from directory -- -- operates in bulk: 0. swh_mktemp(directory), 1. COPY to tmp_directory, -- 2. call this function create or replace function swh_directory_missing() returns setof sha1_git language plpgsql as $$ begin return query select id from tmp_directory t where not exists ( select 1 from directory d where d.id = t.id); return; end $$; -- Retrieve information on directory from temporary table create or replace function swh_directory_get() returns setof directory language plpgsql as $$ begin return query select d.* from tmp_directory t inner join directory d on t.id = d.id; return; end $$; create type directory_entry_type as enum('file', 'dir', 'rev'); -- Add tmp_directory_entry_* entries to directory_entry_* and directory, -- skipping duplicates in directory_entry_*. This is a generic function that -- works on all kind of directory entries. -- -- operates in bulk: 0. swh_mktemp_dir_entry('directory_entry_*'), 1 COPY to -- tmp_directory_entry_*, 2. call this function -- -- Assumption: this function is used in the same transaction that inserts the -- context directory in table "directory". create or replace function swh_directory_entry_add(typ directory_entry_type) returns void language plpgsql as $$ begin execute format(' insert into directory_entry_%1$s (target, name, perms) select distinct t.target, t.name, t.perms from tmp_directory_entry_%1$s t where not exists ( select 1 from directory_entry_%1$s i where t.target = i.target and t.name = i.name and t.perms = i.perms) ', typ); execute format(' with new_entries as ( select t.dir_id, array_agg(i.id) as entries from tmp_directory_entry_%1$s t inner join directory_entry_%1$s i using (target, name, perms) group by t.dir_id ) update tmp_directory as d set %1$s_entries = new_entries.entries from new_entries where d.id = new_entries.dir_id ', typ); return; end $$; -- Insert the data from tmp_directory, tmp_directory_entry_file, -- tmp_directory_entry_dir, tmp_directory_entry_rev into their final -- tables. -- -- Prerequisites: -- directory ids in tmp_directory -- entries in tmp_directory_entry_{file,dir,rev} -- create or replace function swh_directory_add() returns void language plpgsql as $$ begin perform swh_directory_entry_add('file'); perform swh_directory_entry_add('dir'); perform swh_directory_entry_add('rev'); insert into directory select * from tmp_directory t where not exists ( select 1 from directory d where d.id = t.id); return; end $$; -- a directory listing entry with all the metadata -- -- can be used to list a directory, and retrieve all the data in one go. create type directory_entry as ( dir_id sha1_git, -- id of the parent directory type directory_entry_type, -- type of entry target sha1_git, -- id of target name unix_path, -- path name, relative to containing dir perms file_perms, -- unix-like permissions status content_status, -- visible or absent sha1 sha1, -- content if sha1 if type is not dir sha1_git sha1_git, -- content's sha1 git if type is not dir sha256 sha256 -- content's sha256 if type is not dir ); -- List a single level of directory walked_dir_id -- FIXME: order by name is not correct. For git, we need to order by -- lexicographic order but as if a trailing / is present in directory -- name create or replace function swh_directory_walk_one(walked_dir_id sha1_git) returns setof directory_entry language sql stable as $$ with dir as ( select id as dir_id, dir_entries, file_entries, rev_entries from directory where id = walked_dir_id), ls_d as (select dir_id, unnest(dir_entries) as entry_id from dir), ls_f as (select dir_id, unnest(file_entries) as entry_id from dir), ls_r as (select dir_id, unnest(rev_entries) as entry_id from dir) (select dir_id, 'dir'::directory_entry_type as type, e.target, e.name, e.perms, NULL::content_status, NULL::sha1, NULL::sha1_git, NULL::sha256 from ls_d left join directory_entry_dir e on ls_d.entry_id = e.id) union (select dir_id, 'file'::directory_entry_type as type, e.target, e.name, e.perms, c.status, c.sha1, c.sha1_git, c.sha256 from ls_f left join directory_entry_file e on ls_f.entry_id = e.id left join content c on e.target = c.sha1_git) union (select dir_id, 'rev'::directory_entry_type as type, e.target, e.name, e.perms, NULL::content_status, NULL::sha1, NULL::sha1_git, NULL::sha256 from ls_r left join directory_entry_rev e on ls_r.entry_id = e.id) order by name; $$; -- List recursively the revision directory arborescence create or replace function swh_directory_walk(walked_dir_id sha1_git) returns setof directory_entry language sql stable as $$ with recursive entries as ( select dir_id, type, target, name, perms, status, sha1, sha1_git, sha256 from swh_directory_walk_one(walked_dir_id) union all select dir_id, type, target, (dirname || '/' || name)::unix_path as name, perms, status, sha1, sha1_git, sha256 from (select (swh_directory_walk_one(dirs.target)).*, dirs.name as dirname from (select target, name from entries where type = 'dir') as dirs) as with_parent ) select dir_id, type, target, name, perms, status, sha1, sha1_git, sha256 from entries $$; create or replace function swh_revision_walk(revision_id sha1_git) returns setof directory_entry language sql stable as $$ select dir_id, type, target, name, perms, status, sha1, sha1_git, sha256 from swh_directory_walk((select directory from revision where id=revision_id)) $$; COMMENT ON FUNCTION swh_revision_walk(sha1_git) IS 'Recursively list the revision targeted directory arborescence'; -- Find a directory entry by its path create or replace function swh_find_directory_entry_by_path( walked_dir_id sha1_git, dir_or_content_path bytea[]) returns directory_entry language plpgsql as $$ declare end_index integer; paths bytea default ''; path bytea; res bytea[]; r record; begin end_index := array_upper(dir_or_content_path, 1); res[1] := walked_dir_id; for i in 1..end_index loop path := dir_or_content_path[i]; -- concatenate path for patching the name in the result record (if we found it) if i = 1 then paths = path; else paths := paths || '/' || path; -- concatenate paths end if; if i <> end_index then select * from swh_directory_walk_one(res[i] :: sha1_git) where name=path and type = 'dir' limit 1 into r; else select * from swh_directory_walk_one(res[i] :: sha1_git) where name=path limit 1 into r; end if; -- find the path if r is null then return null; else -- store the next dir to lookup the next local path from res[i+1] := r.target; end if; end loop; -- at this moment, r is the result. Patch its 'name' with the full path before returning it. r.name := paths; return r; end $$; -- List all revision IDs starting from a given revision, going back in time -- -- TODO ordering: should be breadth-first right now (what do we want?) -- TODO ordering: ORDER BY parent_rank somewhere? create or replace function swh_revision_list(root_revisions bytea[], num_revs bigint default NULL) returns table (id sha1_git, parents bytea[]) language sql stable as $$ with recursive full_rev_list(id) as ( (select id from revision where id = ANY(root_revisions)) union (select h.parent_id from revision_history as h join full_rev_list on h.id = full_rev_list.id) ), rev_list as (select id from full_rev_list limit num_revs) select rev_list.id as id, array(select rh.parent_id::bytea from revision_history rh where rh.id = rev_list.id order by rh.parent_rank ) as parent from rev_list; $$; -- List all the children of a given revision create or replace function swh_revision_list_children(root_revisions bytea[], num_revs bigint default NULL) returns table (id sha1_git, parents bytea[]) language sql stable as $$ with recursive full_rev_list(id) as ( (select id from revision where id = ANY(root_revisions)) union (select h.id from revision_history as h join full_rev_list on h.parent_id = full_rev_list.id) ), rev_list as (select id from full_rev_list limit num_revs) select rev_list.id as id, array(select rh.parent_id::bytea from revision_history rh where rh.id = rev_list.id order by rh.parent_rank ) as parent from rev_list; $$; -- Detailed entry for a revision create type revision_entry as ( id sha1_git, date timestamptz, date_offset smallint, date_neg_utc_offset boolean, committer_date timestamptz, committer_date_offset smallint, committer_date_neg_utc_offset boolean, type revision_type, directory sha1_git, message bytea, author_id bigint, author_fullname bytea, author_name bytea, author_email bytea, committer_id bigint, committer_fullname bytea, committer_name bytea, committer_email bytea, metadata jsonb, synthetic boolean, parents bytea[], object_id bigint ); -- "git style" revision log. Similar to swh_revision_list(), but returning all -- information associated to each revision, and expanding authors/committers create or replace function swh_revision_log(root_revisions bytea[], num_revs bigint default NULL) returns setof revision_entry language sql stable as $$ select t.id, r.date, r.date_offset, r.date_neg_utc_offset, r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset, r.type, r.directory, r.message, a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic, t.parents, r.object_id from swh_revision_list(root_revisions, num_revs) as t left join revision r on t.id = r.id left join person a on a.id = r.author left join person c on c.id = r.committer; $$; -- Retrieve revisions from tmp_bytea in bulk create or replace function swh_revision_get() returns setof revision_entry language plpgsql as $$ begin return query select r.id, r.date, r.date_offset, r.date_neg_utc_offset, r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset, r.type, r.directory, r.message, a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic, array(select rh.parent_id::bytea from revision_history rh where rh.id = t.id order by rh.parent_rank) as parents, r.object_id from tmp_bytea t left join revision r on t.id = r.id left join person a on a.id = r.author left join person c on c.id = r.committer; return; end $$; -- List missing revisions from tmp_bytea create or replace function swh_revision_missing() returns setof sha1_git language plpgsql as $$ begin return query select id::sha1_git from tmp_bytea t where not exists ( select 1 from revision r where r.id = t.id); return; end $$; -- Detailed entry for a release create type release_entry as ( id sha1_git, target sha1_git, target_type object_type, date timestamptz, date_offset smallint, date_neg_utc_offset boolean, name bytea, comment bytea, synthetic boolean, author_id bigint, author_fullname bytea, author_name bytea, author_email bytea, object_id bigint ); -- Detailed entry for release create or replace function swh_release_get() returns setof release_entry language plpgsql as $$ begin return query select r.id, r.target, r.target_type, r.date, r.date_offset, r.date_neg_utc_offset, r.name, r.comment, r.synthetic, p.id as author_id, p.fullname as author_fullname, p.name as author_name, p.email as author_email, r.object_id from tmp_bytea t inner join release r on t.id = r.id inner join person p on p.id = r.author; return; end $$; -- Create entries in person from tmp_revision create or replace function swh_person_add_from_revision() returns void language plpgsql as $$ begin with t as ( select author_fullname as fullname, author_name as name, author_email as email from tmp_revision union select committer_fullname as fullname, committer_name as name, committer_email as email from tmp_revision ) insert into person (fullname, name, email) select distinct fullname, name, email from t where not exists ( select 1 from person p where t.fullname = p.fullname ); return; end $$; -- Create entries in revision from tmp_revision create or replace function swh_revision_add() returns void language plpgsql as $$ begin perform swh_person_add_from_revision(); insert into revision (id, date, date_offset, date_neg_utc_offset, committer_date, committer_date_offset, committer_date_neg_utc_offset, type, directory, message, author, committer, metadata, synthetic) select t.id, t.date, t.date_offset, t.date_neg_utc_offset, t.committer_date, t.committer_date_offset, t.committer_date_neg_utc_offset, t.type, t.directory, t.message, a.id, c.id, t.metadata, t.synthetic from tmp_revision t left join person a on a.fullname = t.author_fullname left join person c on c.fullname = t.committer_fullname; return; end $$; -- List missing releases from tmp_bytea create or replace function swh_release_missing() returns setof sha1_git language plpgsql as $$ begin return query select id::sha1_git from tmp_bytea t where not exists ( select 1 from release r where r.id = t.id); end $$; -- Create entries in person from tmp_release create or replace function swh_person_add_from_release() returns void language plpgsql as $$ begin with t as ( select distinct author_fullname as fullname, author_name as name, author_email as email from tmp_release ) insert into person (fullname, name, email) select fullname, name, email from t where not exists ( select 1 from person p where t.fullname = p.fullname ); return; end $$; -- Create entries in release from tmp_release create or replace function swh_release_add() returns void language plpgsql as $$ begin perform swh_person_add_from_release(); insert into release (id, target, target_type, date, date_offset, date_neg_utc_offset, name, comment, author, synthetic) select t.id, t.target, t.target_type, t.date, t.date_offset, t.date_neg_utc_offset, t.name, t.comment, a.id, t.synthetic from tmp_release t left join person a on a.fullname = t.author_fullname; return; end $$; create or replace function swh_occurrence_update_for_origin(origin_id bigint) returns void language sql as $$ delete from occurrence where origin = origin_id; insert into occurrence (origin, branch, target, target_type) select origin, branch, target, target_type from occurrence_history where origin = origin_id and (select visit from origin_visit where origin = origin_id order by date desc limit 1) = any(visits); $$; create or replace function swh_occurrence_update_all() returns void language plpgsql as $$ declare origin_id origin.id%type; begin for origin_id in select distinct id from origin loop perform swh_occurrence_update_for_origin(origin_id); end loop; return; end; $$; -- add a new origin_visit for origin origin_id at date. -- -- Returns the new visit id. create or replace function swh_origin_visit_add(origin_id bigint, date timestamptz) returns bigint language sql as $$ with last_known_visit as ( select coalesce(max(visit), 0) as visit from origin_visit where origin = origin_id ) insert into origin_visit (origin, date, visit, status) values (origin_id, date, (select visit from last_known_visit) + 1, 'ongoing') returning visit; $$; -- add tmp_occurrence_history entries to occurrence_history -- -- operates in bulk: 0. swh_mktemp(occurrence_history), 1. COPY to tmp_occurrence_history, -- 2. call this function create or replace function swh_occurrence_history_add() returns void language plpgsql as $$ declare origin_id origin.id%type; begin -- Create or update occurrence_history with occurrence_history_id_visit as ( select tmp_occurrence_history.*, object_id, visits from tmp_occurrence_history left join occurrence_history using(origin, branch, target, target_type) ), occurrences_to_update as ( select object_id, visit from occurrence_history_id_visit where object_id is not null ), update_occurrences as ( update occurrence_history set visits = array(select unnest(occurrence_history.visits) as e union select occurrences_to_update.visit as e order by e) from occurrences_to_update where occurrence_history.object_id = occurrences_to_update.object_id ) insert into occurrence_history (origin, branch, target, target_type, visits) select origin, branch, target, target_type, ARRAY[visit] from occurrence_history_id_visit where object_id is null; -- update occurrence for origin_id in select distinct origin from tmp_occurrence_history loop perform swh_occurrence_update_for_origin(origin_id); end loop; return; end $$; -- Absolute path: directory reference + complete path relative to it create type content_dir as ( directory sha1_git, path unix_path ); -- Find the containing directory of a given content, specified by sha1 -- (note: *not* sha1_git). -- -- Return a pair (dir_it, path) where path is a UNIX path that, from the -- directory root, reach down to a file with the desired content. Return NULL -- if no match is found. -- -- In case of multiple paths (i.e., pretty much always), an arbitrary one is -- chosen. create or replace function swh_content_find_directory(content_id sha1) returns content_dir language sql stable as $$ with recursive path as ( -- Recursively build a path from the requested content to a root -- directory. Each iteration returns a pair (dir_id, filename) where -- filename is relative to dir_id. Stops when no parent directory can -- be found. (select dir.id as dir_id, dir_entry_f.name as name, 0 as depth from directory_entry_file as dir_entry_f join content on content.sha1_git = dir_entry_f.target join directory as dir on dir.file_entries @> array[dir_entry_f.id] where content.sha1 = content_id limit 1) union all (select dir.id as dir_id, (dir_entry_d.name || '/' || path.name)::unix_path as name, path.depth + 1 from path join directory_entry_dir as dir_entry_d on dir_entry_d.target = path.dir_id join directory as dir on dir.dir_entries @> array[dir_entry_d.id] limit 1) ) select dir_id, name from path order by depth desc limit 1; $$; -- Walk the revision history starting from a given revision, until a matching -- occurrence is found. Return all occurrence information if one is found, NULL -- otherwise. create or replace function swh_revision_find_occurrence(revision_id sha1_git) returns occurrence language sql stable as $$ select origin, branch, target, target_type from swh_revision_list_children(ARRAY[revision_id] :: bytea[]) as rev_list left join occurrence_history occ_hist on rev_list.id = occ_hist.target where occ_hist.origin is not null and occ_hist.target_type = 'revision' limit 1; $$; -- Find the visit of origin id closest to date visit_date create or replace function swh_visit_find_by_date(origin bigint, visit_date timestamptz default NOW()) returns origin_visit language sql stable as $$ with closest_two_visits as (( select ov, (date - visit_date) as interval from origin_visit ov where ov.origin = origin and ov.date >= visit_date order by ov.date asc limit 1 ) union ( select ov, (visit_date - date) as interval from origin_visit ov where ov.origin = origin and ov.date < visit_date order by ov.date desc limit 1 )) select (ov).* from closest_two_visits order by interval limit 1 $$; -- Find the visit of origin id closest to date visit_date create or replace function swh_visit_get(origin bigint) returns origin_visit language sql stable as $$ select * from origin_visit where origin=origin order by date desc $$; -- Retrieve occurrence by filtering on origin_id and optionally on -- branch_name and/or validity range create or replace function swh_occurrence_get_by( origin_id bigint, branch_name bytea default NULL, date timestamptz default NULL) returns setof occurrence_history language plpgsql as $$ declare filters text[] := array[] :: text[]; -- AND-clauses used to filter content visit_id bigint; q text; begin if origin_id is null then raise exception 'Needs an origin_id to get an occurrence.'; end if; filters := filters || format('origin = %L', origin_id); if branch_name is not null then filters := filters || format('branch = %L', branch_name); end if; if date is not null then select visit from swh_visit_find_by_date(origin_id, date) into visit_id; else select visit from origin_visit where origin = origin_id order by origin_visit.date desc limit 1 into visit_id; end if; if visit_id is null then return; end if; filters := filters || format('%L = any(visits)', visit_id); q = format('select * from occurrence_history where %s', array_to_string(filters, ' and ')); return query execute q; end $$; -- Retrieve revisions by occurrence criterion filtering create or replace function swh_revision_get_by( origin_id bigint, branch_name bytea default NULL, date timestamptz default NULL) returns setof revision_entry language sql stable as $$ select r.id, r.date, r.date_offset, r.date_neg_utc_offset, r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset, r.type, r.directory, r.message, a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic, array(select rh.parent_id::bytea from revision_history rh where rh.id = r.id order by rh.parent_rank ) as parents, r.object_id from swh_occurrence_get_by(origin_id, branch_name, date) as occ inner join revision r on occ.target = r.id left join person a on a.id = r.author left join person c on c.id = r.committer; $$; -- Retrieve a release by occurrence criterion create or replace function swh_release_get_by( origin_id bigint) returns setof release_entry language sql stable as $$ select r.id, r.target, r.target_type, r.date, r.date_offset, r.date_neg_utc_offset, r.name, r.comment, r.synthetic, a.id as author_id, a.fullname as author_fullname, a.name as author_name, a.email as author_email, r.object_id from release r inner join occurrence_history occ on occ.target = r.target left join person a on a.id = r.author where occ.origin = origin_id and occ.target_type = 'revision' and r.target_type = 'revision'; $$; create type content_provenance as ( content sha1_git, revision sha1_git, origin bigint, visit bigint, path unix_path ); COMMENT ON TYPE content_provenance IS 'Provenance information on content'; create or replace function swh_content_find_provenance(content_id sha1_git) returns setof content_provenance language sql as $$ with subscripted_paths as ( select content, revision_paths, generate_subscripts(revision_paths, 1) as s from cache_content_revision where content = content_id ), cleaned_up_contents as ( select content, revision_paths[s][1]::sha1_git as revision, revision_paths[s][2]::unix_path as path from subscripted_paths ) select cuc.content, cuc.revision, cro.origin, cro.visit, cuc.path from cleaned_up_contents cuc inner join cache_revision_origin cro using(revision) $$; COMMENT ON FUNCTION swh_content_find_provenance(sha1_git) IS 'Given a content, provide provenance information on it'; create type object_found as ( sha1_git sha1_git, type object_type, id bytea, -- sha1 or sha1_git depending on object_type object_id bigint ); -- Find objects by sha1_git, return their type and their main identifier create or replace function swh_object_find_by_sha1_git() returns setof object_found language plpgsql as $$ begin return query with known_objects as (( select id as sha1_git, 'release'::object_type as type, id, object_id from release r where exists (select 1 from tmp_bytea t where t.id = r.id) ) union all ( select id as sha1_git, 'revision'::object_type as type, id, object_id from revision r where exists (select 1 from tmp_bytea t where t.id = r.id) ) union all ( select id as sha1_git, 'directory'::object_type as type, id, object_id from directory d where exists (select 1 from tmp_bytea t where t.id = d.id) ) union all ( select sha1_git as sha1_git, 'content'::object_type as type, sha1 as id, object_id from content c where exists (select 1 from tmp_bytea t where t.id = c.sha1_git) )) select t.id::sha1_git as sha1_git, k.type, k.id, k.object_id from tmp_bytea t left join known_objects k on t.id = k.sha1_git; end $$; -- Create entries in entity_history from tmp_entity_history -- -- TODO: do something smarter to compress the entries if the data -- didn't change. create or replace function swh_entity_history_add() returns void language plpgsql as $$ begin insert into entity_history ( uuid, parent, name, type, description, homepage, active, generated, lister_metadata, metadata, validity ) select * from tmp_entity_history; return; end $$; create or replace function swh_update_entity_from_entity_history() returns trigger language plpgsql as $$ begin insert into entity (uuid, parent, name, type, description, homepage, active, generated, lister_metadata, metadata, last_seen, last_id) select uuid, parent, name, type, description, homepage, active, generated, lister_metadata, metadata, unnest(validity), id from entity_history where uuid = NEW.uuid order by unnest(validity) desc limit 1 on conflict (uuid) do update set parent = EXCLUDED.parent, name = EXCLUDED.name, type = EXCLUDED.type, description = EXCLUDED.description, homepage = EXCLUDED.homepage, active = EXCLUDED.active, generated = EXCLUDED.generated, lister_metadata = EXCLUDED.lister_metadata, metadata = EXCLUDED.metadata, last_seen = EXCLUDED.last_seen, last_id = EXCLUDED.last_id; return null; end $$; create trigger update_entity after insert or update on entity_history for each row execute procedure swh_update_entity_from_entity_history(); -- map an id of tmp_entity_lister to a full entity create type entity_id as ( id bigint, uuid uuid, parent uuid, name text, type entity_type, description text, homepage text, active boolean, generated boolean, lister_metadata jsonb, metadata jsonb, last_seen timestamptz, last_id bigint ); -- find out the uuid of the entries of entity with the metadata -- contained in tmp_entity_lister create or replace function swh_entity_from_tmp_entity_lister() returns setof entity_id language plpgsql as $$ begin return query select t.id, e.* from tmp_entity_lister t left join entity e on e.lister_metadata @> t.lister_metadata; return; end $$; create or replace function swh_entity_get(entity_uuid uuid) returns setof entity language sql stable as $$ with recursive entity_hierarchy as ( select e.* from entity e where uuid = entity_uuid union select p.* from entity_hierarchy e join entity p on e.parent = p.uuid ) select * from entity_hierarchy; $$; -- Object listing by object_id create or replace function swh_content_list_by_object_id( min_excl bigint, max_incl bigint ) returns setof content language sql stable as $$ select * from content where object_id > min_excl and object_id <= max_incl order by object_id; $$; create or replace function swh_revision_list_by_object_id( min_excl bigint, max_incl bigint ) returns setof revision_entry language sql stable as $$ with revs as ( select * from revision where object_id > min_excl and object_id <= max_incl ) select r.id, r.date, r.date_offset, r.date_neg_utc_offset, r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset, r.type, r.directory, r.message, a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic, array(select rh.parent_id::bytea from revision_history rh where rh.id = r.id order by rh.parent_rank) as parents, r.object_id from revs r left join person a on a.id = r.author left join person c on c.id = r.committer order by r.object_id; $$; create or replace function swh_release_list_by_object_id( min_excl bigint, max_incl bigint ) returns setof release_entry language sql stable as $$ with rels as ( select * from release where object_id > min_excl and object_id <= max_incl ) select r.id, r.target, r.target_type, r.date, r.date_offset, r.date_neg_utc_offset, r.name, r.comment, r.synthetic, p.id as author_id, p.fullname as author_fullname, p.name as author_name, p.email as author_email, r.object_id from rels r left join person p on p.id = r.author order by r.object_id; $$; create or replace function swh_cache_content_revision_add() returns void language plpgsql as $$ declare cnt bigint; d sha1_git; begin delete from tmp_bytea t where exists (select 1 from cache_content_revision_processed ccrp where t.id = ccrp.revision); select count(*) from tmp_bytea into cnt; if cnt <> 0 then create temporary table tmp_ccr ( content sha1_git, directory sha1_git, path unix_path ) on commit drop; create temporary table tmp_ccrd ( directory sha1_git, revision sha1_git ) on commit drop; insert into tmp_ccrd select directory, id as revision from tmp_bytea inner join revision using(id); insert into cache_content_revision_processed select distinct id from tmp_bytea order by id; for d in select distinct directory from tmp_ccrd loop insert into tmp_ccr select sha1_git as content, d as directory, name as path from swh_directory_walk(d) where type='file'; end loop; with revision_contents as ( select content, false as blacklisted, array_agg(ARRAY[revision::bytea, path::bytea]) as revision_paths from tmp_ccr inner join tmp_ccrd using (directory) group by content order by content ), updated_cache_entries as ( update cache_content_revision ccr set revision_paths = ccr.revision_paths || rc.revision_paths from revision_contents rc where ccr.content = rc.content and ccr.blacklisted = false returning ccr.content ) insert into cache_content_revision select * from revision_contents rc where not exists (select 1 from updated_cache_entries uce where uce.content = rc.content) order by rc.content on conflict (content) do update set revision_paths = cache_content_revision.revision_paths || EXCLUDED.revision_paths where cache_content_revision.blacklisted = false; return; else return; end if; end $$; COMMENT ON FUNCTION swh_cache_content_revision_add() IS 'Cache the revisions from tmp_bytea into cache_content_revision'; create or replace function swh_occurrence_by_origin_visit(origin_id bigint, visit_id bigint) returns setof occurrence language sql stable as $$ select origin, branch, target, target_type from occurrence_history where origin = origin_id and visit_id = ANY(visits); $$; create type cache_content_signature as ( sha1 sha1, sha1_git sha1_git, sha256 sha256, revision_paths bytea[][] ); create or replace function swh_cache_content_get_all() returns setof cache_content_signature language sql stable as $$ SELECT c.sha1, c.sha1_git, c.sha256, ccr.revision_paths FROM cache_content_revision ccr INNER JOIN content as c ON ccr.content = c.sha1_git $$; COMMENT ON FUNCTION swh_cache_content_get_all() IS 'Retrieve batch of contents'; create or replace function swh_cache_content_get(target sha1_git) returns setof cache_content_signature language sql stable as $$ SELECT c.sha1, c.sha1_git, c.sha256, ccr.revision_paths FROM cache_content_revision ccr INNER JOIN content as c ON ccr.content = c.sha1_git where ccr.content = target $$; COMMENT ON FUNCTION swh_cache_content_get(sha1_git) IS 'Retrieve cache content information'; create or replace function swh_revision_from_target(target sha1_git, target_type object_type) returns sha1_git language plpgsql as $$ #variable_conflict use_variable begin while target_type = 'release' loop select r.target, r.target_type from release r where r.id = target into target, target_type; end loop; if target_type = 'revision' then return target; else return null; end if; end $$; create or replace function swh_cache_revision_origin_add(origin_id bigint, visit_id bigint) returns setof sha1_git language plpgsql as $$ declare visit_exists bool; begin select true from origin_visit where origin = origin_id and visit = visit_id into visit_exists; if not visit_exists then return; end if; visit_exists := null; select true from cache_revision_origin where origin = origin_id and visit = visit_id limit 1 into visit_exists; if visit_exists then return; end if; return query with new_pointed_revs as ( select swh_revision_from_target(target, target_type) as id from swh_occurrence_by_origin_visit(origin_id, visit_id) ), old_pointed_revs as ( select swh_revision_from_target(target, target_type) as id from swh_occurrence_by_origin_visit(origin_id, (select visit from origin_visit where origin = origin_id and visit < visit_id order by visit desc limit 1)) ), new_revs as ( select distinct id from swh_revision_list(array(select id::bytea from new_pointed_revs where id is not null)) ), old_revs as ( select distinct id from swh_revision_list(array(select id::bytea from old_pointed_revs where id is not null)) ) insert into cache_revision_origin (revision, origin, visit) select n.id as revision, origin_id, visit_id from new_revs n where not exists ( select 1 from old_revs o where o.id = n.id) returning revision; end $$; -- create a temporary table for content_ctags tmp_content_mimetype_missing, create or replace function swh_mktemp_content_mimetype_missing() returns void language sql as $$ create temporary table tmp_content_mimetype_missing ( id sha1, tool_name text, tool_version text ) on commit drop; $$; comment on function swh_mktemp_content_mimetype_missing() IS 'Helper table to filter existing mimetype information'; -- check which entries of tmp_bytea are missing from content_mimetype -- -- operates in bulk: 0. swh_mktemp_bytea(), 1. COPY to tmp_bytea, -- 2. call this function create or replace function swh_content_mimetype_missing() returns setof sha1 language plpgsql as $$ begin return query (select id::sha1 from tmp_content_mimetype_missing as tmp where not exists (select 1 from content_mimetype as c inner join indexer_configuration i on (tmp.tool_name = i.tool_name and tmp.tool_version = i.tool_version) where c.id = tmp.id)); return; end $$; comment on function swh_content_mimetype_missing() is 'Filter existing mimetype information'; -- create a temporary table for content_ctags tmp_content_mimetype, create or replace function swh_mktemp_content_mimetype() returns void language sql as $$ create temporary table tmp_content_mimetype ( like content_mimetype including defaults ) on commit drop; alter table tmp_content_mimetype drop column indexer_configuration_id, add column tool_name text, add column tool_version text; $$; comment on function swh_mktemp_content_mimetype() IS 'Helper table to add mimetype information'; -- add tmp_content_mimetype entries to content_mimetype, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- If filtering duplicates is in order, the call to -- swh_content_mimetype_missing must take place before calling this -- function. -- -- -- operates in bulk: 0. swh_mktemp(content_mimetype), 1. COPY to tmp_content_mimetype, -- 2. call this function create or replace function swh_content_mimetype_add(conflict_update boolean) returns void language plpgsql as $$ begin if conflict_update then insert into content_mimetype (id, mimetype, encoding, indexer_configuration_id) select id, mimetype, encoding, (select id from indexer_configuration where tool_name=tcm.tool_name and tool_version=tcm.tool_version) from tmp_content_mimetype tcm on conflict(id, indexer_configuration_id) do update set mimetype = excluded.mimetype, encoding = excluded.encoding; else insert into content_mimetype (id, mimetype, encoding, indexer_configuration_id) select id, mimetype, encoding, (select id from indexer_configuration where tool_name=tcm.tool_name and tool_version=tcm.tool_version) from tmp_content_mimetype tcm on conflict(id, indexer_configuration_id) do nothing; end if; return; end $$; comment on function swh_content_mimetype_add(boolean) IS 'Add new content mimetypes'; create type content_mimetype_signature as( id sha1, mimetype bytea, encoding bytea, tool_name text, tool_version text ); -- Retrieve list of content mimetype from the temporary table. -- -- operates in bulk: 0. mktemp(tmp_bytea), 1. COPY to tmp_bytea, -- 2. call this function create or replace function swh_content_mimetype_get() returns setof content_mimetype_signature language plpgsql as $$ begin return query select c.id, mimetype, encoding, tool_name, tool_version from tmp_bytea t inner join content_mimetype c on c.id=t.id inner join indexer_configuration i on c.indexer_configuration_id=i.id; return; end $$; comment on function swh_content_mimetype_get() IS 'List content''s mimetypes'; -- create a temporary table for content_language tmp_content_language, create or replace function swh_mktemp_content_language_missing() returns void language sql as $$ create temporary table tmp_content_language_missing ( id sha1, lang languages, tool_name text, tool_version text ) on commit drop; $$; comment on function swh_mktemp_content_language_missing() is 'Helper table to filter missing language'; -- check which entries of tmp_bytea are missing from content_language -- -- operates in bulk: 0. swh_mktemp_bytea(), 1. COPY to tmp_bytea, -- 2. call this function create or replace function swh_content_language_missing() returns setof sha1 language plpgsql as $$ begin return query select id::sha1 from tmp_content_language_missing as tmp where not exists (select 1 from content_language as c inner join indexer_configuration i on (tmp.tool_name = i.tool_name and tmp.tool_version = i.tool_version) where c.id = tmp.id); return; end $$; comment on function swh_content_language_missing() IS 'Filter missing content languages'; -- add tmp_content_language entries to content_language, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- If filtering duplicates is in order, the call to -- swh_content_language_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_content_language, 2. call this function create or replace function swh_content_language_add(conflict_update boolean) returns void language plpgsql as $$ begin if conflict_update then insert into content_language (id, lang, indexer_configuration_id) select id, lang, (select id from indexer_configuration where tool_name=tcl.tool_name and tool_version=tcl.tool_version) from tmp_content_language tcl on conflict(id, indexer_configuration_id) do update set lang = excluded.lang; else insert into content_language (id, lang, indexer_configuration_id) select id, lang, (select id from indexer_configuration where tool_name=tcl.tool_name and tool_version=tcl.tool_version) from tmp_content_language tcl on conflict(id, indexer_configuration_id) do nothing; end if; return; end $$; comment on function swh_content_language_add(boolean) IS 'Add new content languages'; -- create a temporary table for retrieving content_language create or replace function swh_mktemp_content_language() returns void language sql as $$ create temporary table tmp_content_language ( like content_language including defaults ) on commit drop; alter table tmp_content_language drop column indexer_configuration_id, add column tool_name text, add column tool_version text; $$; comment on function swh_mktemp_content_language() is 'Helper table to add content language'; create type content_language_signature as ( id sha1, lang languages, tool_name text, tool_version text ); -- Retrieve list of content language from the temporary table. -- -- operates in bulk: 0. mktemp(tmp_bytea), 1. COPY to tmp_bytea, 2. call this function create or replace function swh_content_language_get() returns setof content_language_signature language plpgsql as $$ begin return query select c.id, lang, tool_name, tool_version from tmp_bytea t inner join content_language c on c.id = t.id inner join indexer_configuration i on i.id=c.indexer_configuration_id; return; end $$; comment on function swh_content_language_get() is 'List content''s language'; -- create a temporary table for content_ctags tmp_content_ctags, create or replace function swh_mktemp_content_ctags() returns void language sql as $$ create temporary table tmp_content_ctags ( like content_ctags including defaults ) on commit drop; alter table tmp_content_ctags drop column indexer_configuration_id, add column tool_name text, add column tool_version text; $$; comment on function swh_mktemp_content_ctags() is 'Helper table to add content ctags'; -- add tmp_content_ctags entries to content_ctags, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- operates in bulk: 0. swh_mktemp(content_ctags), 1. COPY to tmp_content_ctags, -- 2. call this function create or replace function swh_content_ctags_add(conflict_update boolean) returns void language plpgsql as $$ begin if conflict_update then delete from content_ctags where id in (select tmp.id from tmp_content_ctags tmp inner join indexer_configuration i on (i.tool_name=tmp.tool_name and i.tool_version = tmp.tool_version)); end if; insert into content_ctags (id, name, kind, line, lang, indexer_configuration_id) select id, name, kind, line, lang, (select id from indexer_configuration where tool_name=tct.tool_name and tool_version=tct.tool_version) from tmp_content_ctags tct on conflict(id, hash_sha1(name), kind, line, lang, indexer_configuration_id) do nothing; return; end $$; comment on function swh_content_ctags_add(boolean) IS 'Add new ctags symbols per content'; -- create a temporary table for content_ctags missing routine create or replace function swh_mktemp_content_ctags_missing() returns void language sql as $$ create temporary table tmp_content_ctags_missing ( id sha1, tool_name text, tool_version text ) on commit drop; $$; comment on function swh_mktemp_content_ctags_missing() is 'Helper table to filter missing content ctags'; -- check which entries of tmp_bytea are missing from content_ctags -- -- operates in bulk: 0. swh_mktemp_bytea(), 1. COPY to tmp_bytea, -- 2. call this function create or replace function swh_content_ctags_missing() returns setof sha1 language plpgsql as $$ begin return query (select id::sha1 from tmp_content_ctags_missing as tmp where not exists (select 1 from content_ctags as c inner join indexer_configuration i on (tmp.tool_name = i.tool_name and tmp.tool_version = i.tool_version) where c.id = tmp.id limit 1)); return; end $$; comment on function swh_content_ctags_missing() IS 'Filter missing content ctags'; create type content_ctags_signature as ( id sha1, name text, kind text, line bigint, lang ctags_languages, tool_name text, tool_version text ); -- Retrieve list of content ctags from the temporary table. -- -- operates in bulk: 0. mktemp(tmp_bytea), 1. COPY to tmp_bytea, 2. call this function create or replace function swh_content_ctags_get() returns setof content_ctags_signature language plpgsql as $$ begin return query select c.id, c.name, c.kind, c.line, c.lang, i.tool_name, i.tool_version from tmp_bytea t inner join content_ctags c using(id) inner join indexer_configuration i on i.id = c.indexer_configuration_id order by line; return; end $$; comment on function swh_content_ctags_get() IS 'List content ctags'; -- Search within ctags content. -- create or replace function swh_content_ctags_search( expression text, l integer default 10, last_sha1 sha1 default '\x0000000000000000000000000000000000000000') returns setof content_ctags_signature language sql as $$ select c.id, name, kind, line, lang, tool_name, tool_version from content_ctags c inner join indexer_configuration i on i.id = c.indexer_configuration_id where hash_sha1(name) = hash_sha1(expression) and c.id > last_sha1 order by id limit l; $$; comment on function swh_content_ctags_search(text, integer, sha1) IS 'Equality search through ctags'' symbols'; -- create a temporary table for content_fossology_license_missing create or replace function swh_mktemp_content_fossology_license_missing() returns void language sql as $$ create temporary table tmp_content_fossology_license_missing ( id bytea, tool_name text, tool_version text ) on commit drop; $$; comment on function swh_mktemp_content_fossology_license_missing() is 'Helper table to add content license'; -- check which entries of tmp_content_fossology_license are missing from content_fossology_license create or replace function swh_content_fossology_license_missing() returns setof sha1 language plpgsql as $$ begin return query (select id::sha1 from tmp_content_fossology_license_missing as tmp where not exists (select 1 from content_fossology_license as c inner join indexer_configuration i on i.id=c.indexer_configuration_id where c.id = tmp.id)); return; end $$; comment on function swh_content_fossology_license_missing() IS 'Filter missing content licenses'; -- create a temporary table for content_fossology_license tmp_content_fossology_license, create or replace function swh_mktemp_content_fossology_license() returns void language sql as $$ create temporary table tmp_content_fossology_license ( id sha1, tool_name text, tool_version text, license text ) on commit drop; $$; comment on function swh_mktemp_content_fossology_license() is 'Helper table to add content license'; -- add tmp_content_fossology_license entries to content_fossology_license, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- If filtering duplicates is in order, the call to -- swh_content_fossology_license_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_fossology_license), 1. COPY to -- tmp_content_fossology_license, 2. call this function create or replace function swh_content_fossology_license_add(conflict_update boolean) returns void language plpgsql as $$ begin if conflict_update then -- delete from content_fossology_license c -- using tmp_content_fossology_license tmp, indexer_configuration i -- where c.id = tmp.id and i.tool_name = tmp.tool_name and i.tool_version = tmp.tool_version; delete from content_fossology_license where id in (select tmp.id from tmp_content_fossology_license tmp inner join indexer_configuration i on (i.tool_name=tmp.tool_name and i.tool_version = tmp.tool_version)); end if; insert into content_fossology_license (id, license_id, indexer_configuration_id) select tcl.id, (select id from fossology_license where name = tcl.license) as license, (select id from indexer_configuration where tool_name = tcl.tool_name and tool_version = tcl.tool_version) as indexer_configuration_id from tmp_content_fossology_license tcl on conflict(id, license_id, indexer_configuration_id) do nothing; return; end $$; comment on function swh_content_fossology_license_add(boolean) IS 'Add new content licenses'; create or replace function swh_content_fossology_license_unknown() returns setof text language plpgsql as $$ begin return query select name from tmp_content_fossology_license_unknown t where not exists ( select 1 from fossology_license where name=t.name ); end $$; comment on function swh_content_fossology_license_unknown() IS 'List unknown licenses'; -- create a temporary table for checking licenses' name create or replace function swh_mktemp_content_fossology_license_unknown() returns void language sql as $$ create temporary table tmp_content_fossology_license_unknown ( name text not null ) on commit drop; $$; comment on function swh_mktemp_content_fossology_license_unknown() is 'Helper table to list unknown licenses'; create type content_fossology_license_signature as ( id sha1, tool_name text, tool_version text, licenses text[] ); -- Retrieve list of content license from the temporary table. -- -- operates in bulk: 0. mktemp(tmp_bytea), 1. COPY to tmp_bytea, -- 2. call this function create or replace function swh_content_fossology_license_get() returns setof content_fossology_license_signature language plpgsql as $$ begin return query select cl.id, ic.tool_name, ic.tool_version, array(select name from fossology_license where id = ANY(array_agg(cl.license_id))) as licenses from tmp_bytea tcl inner join content_fossology_license cl using(id) inner join indexer_configuration ic on ic.id=cl.indexer_configuration_id group by cl.id, ic.tool_name, ic.tool_version; return; end $$; comment on function swh_content_fossology_license_get() IS 'List content licenses'; -- simple counter mapping a textual label to an integer value create type counter as ( label text, value bigint ); -- return statistics about the number of tuples in various SWH tables -- -- Note: the returned values are based on postgres internal statistics -- (pg_class table), which are only updated daily (by autovacuum) or so create or replace function swh_stat_counters() returns setof counter language sql stable as $$ select relname::text as label, reltuples::bigint as value from pg_class where oid in ( 'public.content'::regclass, 'public.directory'::regclass, 'public.directory_entry_dir'::regclass, 'public.directory_entry_file'::regclass, 'public.directory_entry_rev'::regclass, 'public.occurrence'::regclass, 'public.occurrence_history'::regclass, 'public.origin'::regclass, 'public.person'::regclass, 'public.entity'::regclass, 'public.entity_history'::regclass, 'public.release'::regclass, 'public.revision'::regclass, 'public.revision_history'::regclass, 'public.skipped_content'::regclass ); $$; diff --git a/sql/swh-schema.sql b/sql/swh-schema.sql index 4067bce00..22fcbbabd 100644 --- a/sql/swh-schema.sql +++ b/sql/swh-schema.sql @@ -1,460 +1,460 @@ --- --- Software Heritage Data Model --- -- drop schema if exists swh cascade; -- create schema swh; -- set search_path to swh; create table dbversion ( version int primary key, release timestamptz, description text ); insert into dbversion(version, release, description) - values(100, now(), 'Work In Progress'); + values(101, now(), 'Work In Progress'); -- a SHA1 checksum (not necessarily originating from Git) create domain sha1 as bytea check (length(value) = 20); -- a Git object ID, i.e., a SHA1 checksum create domain sha1_git as bytea check (length(value) = 20); -- a SHA256 checksum create domain sha256 as bytea check (length(value) = 32); -- UNIX path (absolute, relative, individual path component, etc.) create domain unix_path as bytea; -- a set of UNIX-like access permissions, as manipulated by, e.g., chmod create domain file_perms as int; -- Checksums about actual file content. Note that the content itself is not -- stored in the DB, but on external (key-value) storage. A single checksum is -- used as key there, but the other can be used to verify that we do not inject -- content collisions not knowingly. create table content ( sha1 sha1 not null, sha1_git sha1_git not null, sha256 sha256 not null, length bigint not null, ctime timestamptz not null default now(), -- creation time, i.e. time of (first) injection into the storage status content_status not null default 'visible', object_id bigserial ); -- Entities constitute a typed hierarchy of organization, hosting -- facilities, groups, people and software projects. -- -- Examples of entities: Software Heritage, Debian, GNU, GitHub, -- Apache, The Linux Foundation, the Debian Python Modules Team, the -- torvalds GitHub user, the torvalds/linux GitHub project. -- -- The data model is hierarchical (via the parent attribute) and might -- store sub-branches of existing entities. The key feature of an -- entity is might be *listed* (if it is available in listable_entity) -- to retrieve information about its content, i.e: sub-entities, -- projects, origins. -- The history of entities. Allows us to keep historical metadata -- about entities. The temporal invariant is the uuid. Root -- organization uuids are manually generated (and available in -- swh-data.sql). -- -- For generated entities (generated = true), we can provide -- generation_metadata to allow listers to retrieve the uuids of previous -- iterations of the entity. -- -- Inactive entities that have been active in the past (active = -- false) should register the timestamp at which we saw them -- deactivate, in a new entry of entity_history. create table entity_history ( id bigserial not null, uuid uuid, parent uuid, -- should reference entity_history(uuid) name text not null, type entity_type not null, description text, homepage text, active boolean not null, -- whether the entity was seen on the last listing generated boolean not null, -- whether this entity has been generated by a lister lister_metadata jsonb, -- lister-specific metadata, used for queries metadata jsonb, validity timestamptz[] -- timestamps at which we have seen this entity ); -- The entity table provides a view of the latest information on a -- given entity. It is updated via a trigger on entity_history. create table entity ( uuid uuid not null, parent uuid, name text not null, type entity_type not null, description text, homepage text, active boolean not null, -- whether the entity was seen on the last listing generated boolean not null, -- whether this entity has been generated by a lister lister_metadata jsonb, -- lister-specific metadata, used for queries metadata jsonb, last_seen timestamptz, -- last listing time or disappearance time for active=false last_id bigint -- last listing id ); -- Register the equivalence between two entities. Allows sideways -- navigation in the entity table create table entity_equivalence ( entity1 uuid, entity2 uuid ); -- Register a lister for a specific entity. create table listable_entity ( uuid uuid, enabled boolean not null default true, -- do we list this entity automatically? list_engine text, -- crawler to be used to list entity's content list_url text, -- root URL to start the listing list_params jsonb, -- org-specific listing parameter latest_list timestamptz -- last time the entity's content has been listed ); -- Log of all entity listings (i.e., entity crawling) that have been -- done in the past, or are still ongoing. create table list_history ( id bigserial not null, date timestamptz not null, status boolean, -- true if and only if the listing has been successful result jsonb, -- more detailed return value, depending on status stdout text, stderr text, duration interval, -- fetch duration of NULL if still ongoing entity uuid ); -- An origin is a place, identified by an URL, where software can be found. We -- support different kinds of origins, e.g., git and other VCS repositories, -- web pages that list tarballs URLs (e.g., http://www.kernel.org), indirect -- tarball URLs (e.g., http://www.example.org/latest.tar.gz), etc. The key -- feature of an origin is that it can be *fetched* (wget, git clone, svn -- checkout, etc.) to retrieve all the contained software. create table origin ( id bigserial not null, type text, -- TODO use an enum here (?) url text not null, lister uuid, project uuid ); -- Content we have seen but skipped for some reason. This table is -- separate from the content table as we might not have the sha1 -- checksum of that data (for instance when we inject git -- repositories, objects that are too big will be skipped here, and we -- will only know their sha1_git). 'reason' contains the reason the -- content was skipped. origin is a nullable column allowing to find -- out which origin contains that skipped content. create table skipped_content ( sha1 sha1, sha1_git sha1_git, sha256 sha256, length bigint not null, ctime timestamptz not null default now(), status content_status not null default 'absent', reason text not null, origin bigint, object_id bigserial ); -- Log of all origin fetches (i.e., origin crawling) that have been done in the -- past, or are still ongoing. Similar to list_history, but for origins. create table fetch_history ( id bigserial, origin bigint, date timestamptz not null, status boolean, -- true if and only if the fetch has been successful result jsonb, -- more detailed returned values, times, etc... stdout text, stderr text, -- null when status is true, filled otherwise duration interval -- fetch duration of NULL if still ongoing ); -- A file-system directory. A directory is a list of directory entries (see -- tables: directory_entry_{dir,file}). -- -- To list the contents of a directory: -- 1. list the contained directory_entry_dir using array dir_entries -- 2. list the contained directory_entry_file using array file_entries -- 3. list the contained directory_entry_rev using array rev_entries -- 4. UNION -- -- Synonyms/mappings: -- * git: tree create table directory ( id sha1_git, dir_entries bigint[], -- sub-directories, reference directory_entry_dir file_entries bigint[], -- contained files, reference directory_entry_file rev_entries bigint[], -- mounted revisions, reference directory_entry_rev object_id bigserial -- short object identifier ); -- A directory entry pointing to a sub-directory. create table directory_entry_dir ( id bigserial, target sha1_git, -- id of target directory name unix_path, -- path name, relative to containing dir perms file_perms -- unix-like permissions ); -- A directory entry pointing to a file. create table directory_entry_file ( id bigserial, target sha1_git, -- id of target file name unix_path, -- path name, relative to containing dir perms file_perms -- unix-like permissions ); -- A directory entry pointing to a revision. create table directory_entry_rev ( id bigserial, target sha1_git, -- id of target revision name unix_path, -- path name, relative to containing dir perms file_perms -- unix-like permissions ); create table person ( id bigserial, name bytea, -- advisory: not null if we managed to parse a name email bytea, -- advisory: not null if we managed to parse an email fullname bytea not null -- freeform specification; what is actually used in the checksums -- will usually be of the form 'name ' ); -- A snapshot of a software project at a specific point in time. -- -- Synonyms/mappings: -- * git / subversion / etc: commit -- * tarball: a specific tarball -- -- Revisions are organized as DAGs. Each revision points to 0, 1, or more (in -- case of merges) parent revisions. Each revision points to a directory, i.e., -- a file-system tree containing files and directories. create table revision ( id sha1_git, date timestamptz, date_offset smallint, committer_date timestamptz, committer_date_offset smallint, type revision_type not null, directory sha1_git, -- file-system tree message bytea, author bigint, committer bigint, synthetic boolean not null default false, -- true if synthetic (cf. swh-loader-tar) metadata jsonb, -- extra metadata (tarball checksums, extra commit information, etc...) object_id bigserial, date_neg_utc_offset boolean, committer_date_neg_utc_offset boolean ); -- either this table or the sha1_git[] column on the revision table create table revision_history ( id sha1_git, parent_id sha1_git, parent_rank int not null default 0 -- parent position in merge commits, 0-based ); -- The timestamps at which Software Heritage has made a visit of the given origin. create table origin_visit ( origin bigint not null, visit bigint not null, date timestamptz not null, status origin_visit_status not null, metadata jsonb ); comment on column origin_visit.origin is 'Visited origin'; comment on column origin_visit.visit is 'Visit number the visit occurred for that origin'; comment on column origin_visit.date is 'Visit date for that origin'; comment on column origin_visit.status is 'Visit status for that origin'; comment on column origin_visit.metadata is 'Metadata associated with the visit'; -- The content of software origins is indexed starting from top-level pointers -- called "branches". Every time we fetch some origin we store in this table -- where the branches pointed to at fetch time. -- -- Synonyms/mappings: -- * git: ref (in the "git update-ref" sense) create table occurrence_history ( origin bigint not null, branch bytea not null, -- e.g., b"master" (for VCS), or b"sid" (for Debian) target sha1_git not null, -- ref target, e.g., commit id target_type object_type not null, -- ref target type visits bigint[] not null, -- the visits where that occurrence was valid. References -- origin_visit(visit), where o_h.origin = origin_visit.origin. object_id bigserial not null -- short object identifier ); -- Materialized view of occurrence_history, storing the *current* value of each -- branch, as last seen by SWH. create table occurrence ( origin bigint, branch bytea not null, target sha1_git not null, target_type object_type not null ); -- A "memorable" point in the development history of a project. -- -- Synonyms/mappings: -- * git: tag (of the annotated kind, otherwise they are just references) -- * tarball: the release version number create table release ( id sha1_git not null, target sha1_git, date timestamptz, date_offset smallint, name bytea, comment bytea, author bigint, synthetic boolean not null default false, -- true if synthetic (cf. swh-loader-tar) object_id bigserial, target_type object_type not null, date_neg_utc_offset boolean ); -- Content provenance information caches -- https://forge.softwareheritage.org/T547 -- -- Those tables aren't expected to be exhaustive, and get filled on a case by -- case basis: absence of data doesn't mean the data is not there -- content <-> revision mapping cache -- -- semantics: "we have seen the content with given id in the given path inside -- the given revision" create table cache_content_revision ( content sha1_git not null, blacklisted boolean default false, revision_paths bytea[][] ); create table cache_content_revision_processed ( revision sha1_git not null ); -- revision <-> origin_visit mapping cache -- -- semantics: "we have seen the given revision in the given origin during the -- given visit" create table cache_revision_origin ( revision sha1_git not null, origin bigint not null, visit bigint not null ); -- Computing metadata on sha1's contents create table indexer_configuration ( id serial not null, tool_name text not null, tool_version text not null, tool_configuration jsonb ); comment on table indexer_configuration is 'Indexer''s configuration version'; comment on column indexer_configuration.id is 'Tool identifier'; comment on column indexer_configuration.tool_version is 'Tool name'; comment on column indexer_configuration.tool_version is 'Tool version'; comment on column indexer_configuration.tool_configuration is 'Tool configuration: command line, flags, etc...'; -- Properties (mimetype, encoding, etc...) create table content_mimetype ( id sha1 not null, mimetype bytea not null, encoding bytea not null, indexer_configuration_id bigint not null ); comment on table content_mimetype is 'Metadata associated to a raw content'; comment on column content_mimetype.mimetype is 'Raw content Mimetype'; comment on column content_mimetype.encoding is 'Raw content encoding'; comment on column content_mimetype.indexer_configuration_id is 'Tool used to compute the information'; -- Language metadata create table content_language ( id sha1 not null, lang languages not null, indexer_configuration_id bigint not null ); comment on table content_language is 'Language information on a raw content'; comment on column content_language.lang is 'Language information'; comment on column content_language.indexer_configuration_id is 'Tool used to compute the information'; -- ctags information per content create table content_ctags ( id sha1 not null, name text not null, kind text not null, line bigint not null, lang ctags_languages not null, indexer_configuration_id bigint not null ); comment on table content_ctags is 'Ctags information on a raw content'; comment on column content_ctags.id is 'Content identifier'; comment on column content_ctags.name is 'Symbol name'; comment on column content_ctags.kind is 'Symbol kind (function, class, variable, const...)'; comment on column content_ctags.line is 'Symbol line'; comment on column content_ctags.lang is 'Language information for that content'; comment on column content_ctags.indexer_configuration_id is 'Tool used to compute the information'; create table fossology_license( id smallserial, name text not null ); comment on table fossology_license is 'Possible license recognized by license indexer'; comment on column fossology_license.id is 'License identifier'; comment on column fossology_license.name is 'License name'; create table content_fossology_license ( id sha1 not null, license_id smallserial not null, indexer_configuration_id bigint not null ); comment on table content_fossology_license is 'license associated to a raw content'; comment on column content_fossology_license.id is 'Raw content identifier'; comment on column content_fossology_license.license_id is 'One of the content''s license identifier'; comment on column content_fossology_license.indexer_configuration_id is 'Tool used to compute the information'; diff --git a/sql/upgrades/101.sql b/sql/upgrades/101.sql new file mode 100644 index 000000000..b9ab4cfaf --- /dev/null +++ b/sql/upgrades/101.sql @@ -0,0 +1,36 @@ +-- SWH DB schema upgrade +-- from_version: 100 +-- to_version: 101 +-- description: Open swh_content_update function + +insert into dbversion(version, release, description) + values(101, now(), 'Work In Progress'); + +-- Update content entries from temporary table. +-- (columns are potential new columns added to the schema, this cannot be empty) +-- +create or replace function swh_content_update(columns_update text[]) + returns void + language plpgsql +as $$ +declare + query text; + tmp_array text[]; +begin + if array_length(columns_update, 1) = 0 then + raise exception 'Please, provide the list of column names to update.'; + end if; + + tmp_array := array(select format('%1$s=t.%1$s', unnest) from unnest(columns_update)); + + query = format('update content set %s + from tmp_content t where t.sha1 = content.sha1', + array_to_string(tmp_array, ', ')); + + execute query; + + return; +end +$$; + +comment on function swh_content_update(text[]) IS 'Update existing content''s columns'; diff --git a/swh.storage.egg-info/PKG-INFO b/swh.storage.egg-info/PKG-INFO index d4fdd5c70..807cbc165 100644 --- a/swh.storage.egg-info/PKG-INFO +++ b/swh.storage.egg-info/PKG-INFO @@ -1,10 +1,10 @@ Metadata-Version: 1.0 Name: swh.storage -Version: 0.0.79 +Version: 0.0.80 Summary: Software Heritage storage manager Home-page: https://forge.softwareheritage.org/diffusion/DSTO/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN diff --git a/swh.storage.egg-info/SOURCES.txt b/swh.storage.egg-info/SOURCES.txt index 6af1fa73e..c9bf03b67 100644 --- a/swh.storage.egg-info/SOURCES.txt +++ b/swh.storage.egg-info/SOURCES.txt @@ -1,191 +1,194 @@ .gitignore AUTHORS LICENSE MANIFEST.in Makefile Makefile.local README.db_testing README.dev requirements-swh.txt requirements.txt setup.py version.txt bin/swh-storage-add-dir debian/changelog debian/compat debian/control debian/copyright debian/rules debian/source/format docs/archiver-blueprint.md docs/vault-blueprint.md sql/.gitignore sql/Makefile sql/TODO sql/clusters.dot sql/swh-data.sql sql/swh-enums.sql sql/swh-func.sql sql/swh-indexes.sql sql/swh-init.sql sql/swh-schema.sql sql/swh-triggers.sql sql/archiver/Makefile sql/archiver/swh-archiver-data.sql sql/archiver/swh-archiver-func.sql sql/archiver/swh-archiver-schema.sql sql/archiver/upgrades/002.sql sql/archiver/upgrades/003.sql sql/archiver/upgrades/004.sql sql/archiver/upgrades/005.sql sql/archiver/upgrades/006.sql sql/archiver/upgrades/007.sql +sql/archiver/upgrades/008.sql +sql/archiver/upgrades/009.sql sql/bin/db-upgrade sql/bin/dot_add_content sql/doc/json sql/doc/json/.gitignore sql/doc/json/Makefile sql/doc/json/entity.lister_metadata.schema.json sql/doc/json/entity.metadata.schema.json sql/doc/json/entity_history.lister_metadata.schema.json sql/doc/json/entity_history.metadata.schema.json sql/doc/json/fetch_history.result.schema.json sql/doc/json/indexer_configuration.tool_configuration.schema.json sql/doc/json/list_history.result.schema.json sql/doc/json/listable_entity.list_params.schema.json sql/doc/json/origin_visit.metadata.json sql/doc/json/revision.metadata.schema.json sql/json/.gitignore sql/json/Makefile sql/json/entity.lister_metadata.schema.json sql/json/entity.metadata.schema.json sql/json/entity_history.lister_metadata.schema.json sql/json/entity_history.metadata.schema.json sql/json/fetch_history.result.schema.json sql/json/indexer_configuration.tool_configuration.schema.json sql/json/list_history.result.schema.json sql/json/listable_entity.list_params.schema.json sql/json/origin_visit.metadata.json sql/json/revision.metadata.schema.json sql/upgrades/015.sql sql/upgrades/016.sql sql/upgrades/017.sql sql/upgrades/018.sql sql/upgrades/019.sql sql/upgrades/020.sql sql/upgrades/021.sql sql/upgrades/022.sql sql/upgrades/023.sql sql/upgrades/024.sql sql/upgrades/025.sql sql/upgrades/026.sql sql/upgrades/027.sql sql/upgrades/028.sql sql/upgrades/029.sql sql/upgrades/030.sql sql/upgrades/032.sql sql/upgrades/033.sql sql/upgrades/034.sql sql/upgrades/035.sql sql/upgrades/036.sql sql/upgrades/037.sql sql/upgrades/038.sql sql/upgrades/039.sql sql/upgrades/040.sql sql/upgrades/041.sql sql/upgrades/042.sql sql/upgrades/043.sql sql/upgrades/044.sql sql/upgrades/045.sql sql/upgrades/046.sql sql/upgrades/047.sql sql/upgrades/048.sql sql/upgrades/049.sql sql/upgrades/050.sql sql/upgrades/051.sql sql/upgrades/052.sql sql/upgrades/053.sql sql/upgrades/054.sql sql/upgrades/055.sql sql/upgrades/056.sql sql/upgrades/057.sql sql/upgrades/058.sql sql/upgrades/059.sql sql/upgrades/060.sql sql/upgrades/061.sql sql/upgrades/062.sql sql/upgrades/063.sql sql/upgrades/064.sql sql/upgrades/065.sql sql/upgrades/066.sql sql/upgrades/067.sql sql/upgrades/068.sql sql/upgrades/069.sql sql/upgrades/070.sql sql/upgrades/071.sql sql/upgrades/072.sql sql/upgrades/073.sql sql/upgrades/074.sql sql/upgrades/075.sql sql/upgrades/076.sql sql/upgrades/077.sql sql/upgrades/078.sql sql/upgrades/079.sql sql/upgrades/080.sql sql/upgrades/081.sql sql/upgrades/082.sql sql/upgrades/083.sql sql/upgrades/084.sql sql/upgrades/085.sql sql/upgrades/086.sql sql/upgrades/087.sql sql/upgrades/088.sql sql/upgrades/089.sql sql/upgrades/090.sql sql/upgrades/091.sql sql/upgrades/092.sql sql/upgrades/093.sql sql/upgrades/094.sql sql/upgrades/095.sql sql/upgrades/096.sql sql/upgrades/097.sql sql/upgrades/098.sql sql/upgrades/099.sql sql/upgrades/100.sql +sql/upgrades/101.sql swh.storage.egg-info/PKG-INFO swh.storage.egg-info/SOURCES.txt swh.storage.egg-info/dependency_links.txt swh.storage.egg-info/requires.txt swh.storage.egg-info/top_level.txt swh/storage/__init__.py swh/storage/common.py swh/storage/converters.py swh/storage/db.py swh/storage/exc.py swh/storage/listener.py swh/storage/storage.py swh/storage/api/__init__.py swh/storage/api/client.py swh/storage/api/server.py swh/storage/archiver/__init__.py swh/storage/archiver/copier.py swh/storage/archiver/db.py swh/storage/archiver/director.py swh/storage/archiver/storage.py swh/storage/archiver/tasks.py swh/storage/archiver/worker.py swh/storage/provenance/tasks.py swh/storage/tests/server_testing.py swh/storage/tests/test_api_client.py swh/storage/tests/test_archiver.py swh/storage/tests/test_converters.py swh/storage/tests/test_db.py swh/storage/tests/test_storage.py swh/storage/vault/cache.py swh/storage/vault/conf.yaml swh/storage/vault/cooker.py swh/storage/vault/api/client.py swh/storage/vault/api/cooking_tasks.py swh/storage/vault/api/server.py utils/dump_revisions.py utils/fix_revisions_from_dump.py \ No newline at end of file diff --git a/swh.storage.egg-info/requires.txt b/swh.storage.egg-info/requires.txt index f4fc076ab..579215f31 100644 --- a/swh.storage.egg-info/requires.txt +++ b/swh.storage.egg-info/requires.txt @@ -1,9 +1,9 @@ click flask psycopg2 python-dateutil swh.core>=0.0.28 swh.model>=0.0.13 swh.objstorage>=0.0.17 -swh.scheduler +swh.scheduler>=0.0.11 vcversioner diff --git a/swh/storage/api/client.py b/swh/storage/api/client.py index b6c0d9dd5..ff898dd9f 100644 --- a/swh/storage/api/client.py +++ b/swh/storage/api/client.py @@ -1,232 +1,236 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.core.api import SWHRemoteAPI from ..exc import StorageAPIError class RemoteStorage(SWHRemoteAPI): """Proxy to a remote storage API""" def __init__(self, url): super().__init__(api_exception=StorageAPIError, url=url) def check_config(self, *, check_write): return self.post('check_config', {'check_write': check_write}) def content_add(self, content): return self.post('content/add', {'content': content}) + def content_update(self, content, keys=[]): + return self.post('content/update', {'content': content, + 'keys': keys}) + def content_missing(self, content, key_hash='sha1'): return self.post('content/missing', {'content': content, 'key_hash': key_hash}) def content_missing_per_sha1(self, contents): return self.post('content/missing/sha1', {'contents': contents}) def content_get(self, content): return self.post('content/data', {'content': content}) def content_get_metadata(self, content): return self.post('content/metadata', {'content': content}) def content_find(self, content): return self.post('content/present', {'content': content}) def content_find_provenance(self, content): return self.post('content/provenance', {'content': content}) def directory_add(self, directories): return self.post('directory/add', {'directories': directories}) def directory_missing(self, directories): return self.post('directory/missing', {'directories': directories}) def directory_get(self, directories): return self.post('directory', dict(directories=directories)) def directory_ls(self, directory, recursive=False): return self.get('directory/ls', {'directory': directory, 'recursive': recursive}) def revision_get(self, revisions): return self.post('revision', {'revisions': revisions}) def revision_get_by(self, origin_id, branch_name, timestamp, limit=None): return self.post('revision/by', dict(origin_id=origin_id, branch_name=branch_name, timestamp=timestamp, limit=limit)) def revision_log(self, revisions, limit=None): return self.post('revision/log', {'revisions': revisions, 'limit': limit}) def revision_log_by(self, origin_id, branch_name, timestamp, limit=None): return self.post('revision/logby', {'origin_id': origin_id, 'branch_name': branch_name, 'timestamp': timestamp, 'limit': limit}) def revision_shortlog(self, revisions, limit=None): return self.post('revision/shortlog', {'revisions': revisions, 'limit': limit}) def cache_content_revision_add(self, revisions): return self.post('cache/content_revision', {'revisions': revisions}) def cache_content_get_all(self): return self.get('cache/contents') def cache_content_get(self, content): return self.post('cache/content', {'content': content}) def cache_revision_origin_add(self, origin, visit): return self.post('cache/revision_origin', {'origin': origin, 'visit': visit}) def revision_add(self, revisions): return self.post('revision/add', {'revisions': revisions}) def revision_missing(self, revisions): return self.post('revision/missing', {'revisions': revisions}) def release_add(self, releases): return self.post('release/add', {'releases': releases}) def release_get(self, releases): return self.post('release', {'releases': releases}) def release_get_by(self, origin_id, limit=None): return self.post('release/by', dict(origin_id=origin_id, limit=limit)) def release_missing(self, releases): return self.post('release/missing', {'releases': releases}) def object_find_by_sha1_git(self, ids): return self.post('object/find_by_sha1_git', {'ids': ids}) def occurrence_get(self, origin_id): return self.post('occurrence', {'origin_id': origin_id}) def occurrence_add(self, occurrences): return self.post('occurrence/add', {'occurrences': occurrences}) def origin_get(self, origin): return self.post('origin/get', {'origin': origin}) def origin_add(self, origins): return self.post('origin/add_multi', {'origins': origins}) def origin_add_one(self, origin): return self.post('origin/add', {'origin': origin}) def origin_visit_add(self, origin, ts): return self.post('origin/visit/add', {'origin': origin, 'ts': ts}) def origin_visit_update(self, origin, visit_id, status, metadata=None): return self.post('origin/visit/update', {'origin': origin, 'visit_id': visit_id, 'status': status, 'metadata': metadata}) def origin_visit_get(self, origin, last_visit=None, limit=None): return self.post('origin/visit/get', { 'origin': origin, 'last_visit': last_visit, 'limit': limit}) def origin_visit_get_by(self, origin, visit): return self.post('origin/visit/getby', {'origin': origin, 'visit': visit}) def person_get(self, person): return self.post('person', {'person': person}) def fetch_history_start(self, origin_id): return self.post('fetch_history/start', {'origin_id': origin_id}) def fetch_history_end(self, fetch_history_id, data): return self.post('fetch_history/end', {'fetch_history_id': fetch_history_id, 'data': data}) def fetch_history_get(self, fetch_history_id): return self.get('fetch_history', {'id': fetch_history_id}) def entity_add(self, entities): return self.post('entity/add', {'entities': entities}) def entity_get(self, uuid): return self.post('entity/get', {'uuid': uuid}) def entity_get_one(self, uuid): return self.get('entity', {'uuid': uuid}) def entity_get_from_lister_metadata(self, entities): return self.post('entity/from_lister_metadata', {'entities': entities}) def stat_counters(self): return self.get('stat/counters') def directory_entry_get_by_path(self, directory, paths): return self.post('directory/path', dict(directory=directory, paths=paths)) def content_mimetype_add(self, mimetypes, conflict_update=False): return self.post('content_mimetype/add', { 'mimetypes': mimetypes, 'conflict_update': conflict_update, }) def content_mimetype_missing(self, mimetypes): return self.post('content_mimetype/missing', {'mimetypes': mimetypes}) def content_mimetype_get(self, ids): return self.post('content_mimetype', {'ids': ids}) def content_language_add(self, languages, conflict_update=False): return self.post('content_language/add', { 'languages': languages, 'conflict_update': conflict_update, }) def content_language_missing(self, languages): return self.post('content_language/missing', {'languages': languages}) def content_language_get(self, ids): return self.post('content_language', {'ids': ids}) def content_ctags_add(self, ctags, conflict_update=False): return self.post('content/ctags/add', { 'ctags': ctags, 'conflict_update': conflict_update, }) def content_ctags_missing(self, ctags): return self.post('content/ctags/missing', {'ctags': ctags}) def content_ctags_get(self, ids): return self.post('content/ctags', {'ids': ids}) def content_ctags_search(self, expression, limit=10, last_sha1=None): return self.post('content/ctags/search', { 'expression': expression, 'limit': limit, 'last_sha1': last_sha1, }) def content_fossology_license_add(self, licenses, conflict_update=False): return self.post('content/fossology_license/add', { 'licenses': licenses, 'conflict_update': conflict_update, }) def content_fossology_license_missing(self, licenses): return self.post('content/fossology_license/missing', { 'licenses': licenses}) def content_fossology_license_get(self, ids): return self.post('content/fossology_license', {'ids': ids}) diff --git a/swh/storage/api/server.py b/swh/storage/api/server.py index 1618ef21f..56f58fe93 100644 --- a/swh/storage/api/server.py +++ b/swh/storage/api/server.py @@ -1,409 +1,414 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json import logging import click from flask import g, request from swh.core import config from swh.storage import get_storage from swh.core.api import (SWHServerAPIApp, decode_request, error_handler, encode_data_server as encode_data) DEFAULT_CONFIG = { 'storage': ('dict', { 'cls': 'local', 'args': { 'db': 'dbname=softwareheritage-dev', 'objstorage': { 'cls': 'pathslicing', 'args': { 'root': '/srv/softwareheritage/objects', 'slicing': '0:2/2:4/4:6', }, }, }, }) } app = SWHServerAPIApp(__name__) @app.errorhandler(Exception) def my_error_handler(exception): return error_handler(exception, encode_data) @app.before_request def before_request(): g.storage = get_storage(**app.config['storage']) @app.route('/') def index(): return 'SWH Storage API server' @app.route('/check_config', methods=['POST']) def check_config(): return encode_data(g.storage.check_config(**decode_request(request))) @app.route('/content/missing', methods=['POST']) def content_missing(): return encode_data(g.storage.content_missing(**decode_request(request))) @app.route('/content/missing/sha1', methods=['POST']) def content_missing_per_sha1(): return encode_data(g.storage.content_missing_per_sha1( **decode_request(request))) @app.route('/content/present', methods=['POST']) def content_find(): return encode_data(g.storage.content_find(**decode_request(request))) @app.route('/content/provenance', methods=['POST']) def content_find_provenance(): res = g.storage.content_find_provenance(**decode_request(request)) return encode_data(res) @app.route('/content/add', methods=['POST']) def content_add(): return encode_data(g.storage.content_add(**decode_request(request))) +@app.route('/content/update', methods=['POST']) +def content_update(): + return encode_data(g.storage.content_update(**decode_request(request))) + + @app.route('/content/data', methods=['POST']) def content_get(): return encode_data(g.storage.content_get(**decode_request(request))) @app.route('/content/metadata', methods=['POST']) def content_get_metadata(): return encode_data(g.storage.content_get_metadata( **decode_request(request))) @app.route('/directory', methods=['POST']) def directory_get(): return encode_data(g.storage.directory_get(**decode_request(request))) @app.route('/directory/missing', methods=['POST']) def directory_missing(): return encode_data(g.storage.directory_missing(**decode_request(request))) @app.route('/directory/add', methods=['POST']) def directory_add(): return encode_data(g.storage.directory_add(**decode_request(request))) @app.route('/directory/path', methods=['POST']) def directory_entry_get_by_path(): return encode_data(g.storage.directory_entry_get_by_path( **decode_request(request))) @app.route('/directory/ls', methods=['GET']) def directory_ls(): dir = request.args['directory'].encode('utf-8', 'surrogateescape') rec = json.loads(request.args.get('recursive', 'False').lower()) return encode_data(g.storage.directory_ls(dir, recursive=rec)) @app.route('/revision/add', methods=['POST']) def revision_add(): return encode_data(g.storage.revision_add(**decode_request(request))) @app.route('/revision', methods=['POST']) def revision_get(): return encode_data(g.storage.revision_get(**decode_request(request))) @app.route('/revision/by', methods=['POST']) def revision_get_by(): return encode_data(g.storage.revision_get_by(**decode_request(request))) @app.route('/revision/log', methods=['POST']) def revision_log(): return encode_data(g.storage.revision_log(**decode_request(request))) @app.route('/revision/logby', methods=['POST']) def revision_log_by(): return encode_data(g.storage.revision_log_by(**decode_request(request))) @app.route('/revision/shortlog', methods=['POST']) def revision_shortlog(): return encode_data(g.storage.revision_shortlog(**decode_request(request))) @app.route('/revision/missing', methods=['POST']) def revision_missing(): return encode_data(g.storage.revision_missing(**decode_request(request))) @app.route('/cache/content_revision', methods=['POST']) def cache_content_revision_add(): return encode_data(g.storage.cache_content_revision_add( **decode_request(request))) @app.route('/cache/contents', methods=['GET']) def cache_content_get_all(): return encode_data(g.storage.cache_content_get_all()) @app.route('/cache/content', methods=['POST']) def cache_content_get(): return encode_data(g.storage.cache_content_get( **decode_request(request))) @app.route('/cache/revision_origin', methods=['POST']) def cache_revision_origin_add(): return encode_data(g.storage.cache_revision_origin_add( **decode_request(request))) @app.route('/release/add', methods=['POST']) def release_add(): return encode_data(g.storage.release_add(**decode_request(request))) @app.route('/release', methods=['POST']) def release_get(): return encode_data(g.storage.release_get(**decode_request(request))) @app.route('/release/by', methods=['POST']) def release_get_by(): return encode_data(g.storage.release_get_by(**decode_request(request))) @app.route('/release/missing', methods=['POST']) def release_missing(): return encode_data(g.storage.release_missing(**decode_request(request))) @app.route('/object/find_by_sha1_git', methods=['POST']) def object_find_by_sha1_git(): return encode_data(g.storage.object_find_by_sha1_git( **decode_request(request))) @app.route('/occurrence', methods=['POST']) def occurrence_get(): return encode_data(g.storage.occurrence_get(**decode_request(request))) @app.route('/occurrence/add', methods=['POST']) def occurrence_add(): return encode_data(g.storage.occurrence_add(**decode_request(request))) @app.route('/origin/get', methods=['POST']) def origin_get(): return encode_data(g.storage.origin_get(**decode_request(request))) @app.route('/origin/add_multi', methods=['POST']) def origin_add(): return encode_data(g.storage.origin_add(**decode_request(request))) @app.route('/origin/add', methods=['POST']) def origin_add_one(): return encode_data(g.storage.origin_add_one(**decode_request(request))) @app.route('/origin/visit/get', methods=['POST']) def origin_visit_get(): return encode_data(g.storage.origin_visit_get(**decode_request(request))) @app.route('/origin/visit/getby', methods=['POST']) def origin_visit_get_by(): return encode_data( g.storage.origin_visit_get_by(**decode_request(request))) @app.route('/origin/visit/add', methods=['POST']) def origin_visit_add(): return encode_data(g.storage.origin_visit_add(**decode_request(request))) @app.route('/origin/visit/update', methods=['POST']) def origin_visit_update(): return encode_data(g.storage.origin_visit_update( **decode_request(request))) @app.route('/person', methods=['POST']) def person_get(): return encode_data(g.storage.person_get(**decode_request(request))) @app.route('/fetch_history', methods=['GET']) def fetch_history_get(): return encode_data(g.storage.fetch_history_get(request.args['id'])) @app.route('/fetch_history/start', methods=['POST']) def fetch_history_start(): return encode_data( g.storage.fetch_history_start(**decode_request(request))) @app.route('/fetch_history/end', methods=['POST']) def fetch_history_end(): return encode_data( g.storage.fetch_history_end(**decode_request(request))) @app.route('/entity/add', methods=['POST']) def entity_add(): return encode_data( g.storage.entity_add(**decode_request(request))) @app.route('/entity/get', methods=['POST']) def entity_get(): return encode_data( g.storage.entity_get(**decode_request(request))) @app.route('/entity', methods=['GET']) def entity_get_one(): return encode_data(g.storage.entity_get_one(request.args['uuid'])) @app.route('/entity/from_lister_metadata', methods=['POST']) def entity_from_lister_metadata(): return encode_data( g.storage.entity_get_from_lister_metadata(**decode_request(request))) @app.route('/content_mimetype/add', methods=['POST']) def content_mimetype_add(): return encode_data( g.storage.content_mimetype_add(**decode_request(request))) @app.route('/content_mimetype/missing', methods=['POST']) def content_mimetype_missing(): return encode_data( g.storage.content_mimetype_missing(**decode_request(request))) @app.route('/content_mimetype', methods=['POST']) def content_mimetype_get(): return encode_data( g.storage.content_mimetype_get(**decode_request(request))) @app.route('/content_language/add', methods=['POST']) def content_language_add(): return encode_data( g.storage.content_language_add(**decode_request(request))) @app.route('/content_language/missing', methods=['POST']) def content_language_missing(): return encode_data( g.storage.content_language_missing(**decode_request(request))) @app.route('/content_language', methods=['POST']) def content_language_get(): return encode_data( g.storage.content_language_get(**decode_request(request))) @app.route('/content/ctags/add', methods=['POST']) def content_ctags_add(): return encode_data( g.storage.content_ctags_add(**decode_request(request))) @app.route('/content/ctags/search', methods=['POST']) def content_ctags_search(): return encode_data( g.storage.content_ctags_search(**decode_request(request))) @app.route('/content/ctags/missing', methods=['POST']) def content_ctags_missing(): return encode_data( g.storage.content_ctags_missing(**decode_request(request))) @app.route('/content/ctags', methods=['POST']) def content_ctags_get(): return encode_data( g.storage.content_ctags_get(**decode_request(request))) @app.route('/content/fossology_license/add', methods=['POST']) def content_fossology_license_add(): return encode_data( g.storage.content_fossology_license_add(**decode_request(request))) @app.route('/content/fossology_license/missing', methods=['POST']) def content_fossology_license_missing(): return encode_data( g.storage.content_fossology_license_missing(**decode_request(request))) @app.route('/content/fossology_license', methods=['POST']) def content_fossology_license_get(): return encode_data( g.storage.content_fossology_license_get(**decode_request(request))) @app.route('/stat/counters', methods=['GET']) def stat_counters(): return encode_data(g.storage.stat_counters()) def run_from_webserver(environ, start_response): """Run the WSGI app from the webserver, loading the configuration.""" config_path = '/etc/softwareheritage/storage/storage.yml' app.config.update(config.read(config_path, DEFAULT_CONFIG)) handler = logging.StreamHandler() app.logger.addHandler(handler) return app(environ, start_response) @click.command() @click.argument('config-path', required=1) @click.option('--host', default='0.0.0.0', help="Host to run the server") -@click.option('--port', default=5000, type=click.INT, +@click.option('--port', default=5002, type=click.INT, help="Binding port of the server") @click.option('--debug/--nodebug', default=True, help="Indicates if the server should run in debug mode") def launch(config_path, host, port, debug): app.config.update(config.read(config_path, DEFAULT_CONFIG)) app.run(host, port=int(port), debug=bool(debug)) if __name__ == '__main__': launch() diff --git a/swh/storage/archiver/db.py b/swh/storage/archiver/db.py index a18aabee4..b130d3bc7 100644 --- a/swh/storage/archiver/db.py +++ b/swh/storage/archiver/db.py @@ -1,260 +1,251 @@ -# Copyright (C) 2015-2016 The Software Heritage developers +# Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import time from swh.core import hashutil from swh.storage.db import BaseDb, cursor_to_bytes, stored_procedure class ArchiverDb(BaseDb): """Proxy to the SWH's archiver DB """ def archive_ls(self, cur=None): """ Get all the archives registered on the server. Yields: a tuple (server_id, server_url) for each archive server. """ cur = self._cursor(cur) cur.execute("SELECT * FROM archive") yield from cursor_to_bytes(cur) def content_archive_get(self, content_id, cur=None): """ Get the archival status of a content in a specific server. Retrieve from the database the archival status of the given content in the given archive server. Args: content_id: the sha1 of the content. Yields: A tuple (content_id, present_copies, ongoing_copies), where ongoing_copies is a dict mapping copy to mtime. """ query = """SELECT content_id, array( SELECT key FROM jsonb_each(copies) WHERE value->>'status' = 'present' ORDER BY key ) AS present, array( SELECT key FROM jsonb_each(copies) WHERE value->>'status' = 'ongoing' ORDER BY key ) AS ongoing, array( SELECT value->'mtime' FROM jsonb_each(copies) WHERE value->>'status' = 'ongoing' ORDER BY key ) AS ongoing_mtime FROM content_archive WHERE content_id = %s ORDER BY content_id """ cur = self._cursor(cur) cur.execute(query, (content_id,)) row = cur.fetchone() if not row: return None content_id, present, ongoing, mtimes = row return (content_id, present, dict(zip(ongoing, mtimes))) def content_archive_get_copies(self, last_content=None, limit=1000, cur=None): """Get the list of copies for `limit` contents starting after `last_content`. Args: last_content: sha1 of the last content retrieved. May be None to start at the beginning. limit: number of contents to retrieve. Can be None to retrieve all objects (will be slow). Yields: A tuple (content_id, present_copies, ongoing_copies), where ongoing_copies is a dict mapping copy to mtime. """ query = """SELECT content_id, array( SELECT key FROM jsonb_each(copies) WHERE value->>'status' = 'present' ORDER BY key ) AS present, array( SELECT key FROM jsonb_each(copies) WHERE value->>'status' = 'ongoing' ORDER BY key ) AS ongoing, array( SELECT value->'mtime' FROM jsonb_each(copies) WHERE value->>'status' = 'ongoing' ORDER BY key ) AS ongoing_mtime FROM content_archive WHERE content_id > %s ORDER BY content_id LIMIT %s """ if last_content is None: last_content = b'' cur = self._cursor(cur) cur.execute(query, (last_content, limit)) for content_id, present, ongoing, mtimes in cursor_to_bytes(cur): yield (content_id, present, dict(zip(ongoing, mtimes))) def content_archive_get_unarchived_copies( self, retention_policy, last_content=None, limit=1000, cur=None): """ Get the list of copies for `limit` contents starting after `last_content`. Yields only copies with number of present smaller than `retention policy`. Args: last_content: sha1 of the last content retrieved. May be None to start at the beginning. retention_policy: number of required present copies limit: number of contents to retrieve. Can be None to retrieve all objects (will be slow). Yields: A tuple (content_id, present_copies, ongoing_copies), where ongoing_copies is a dict mapping copy to mtime. """ query = """SELECT content_id, array( SELECT key FROM jsonb_each(copies) WHERE value->>'status' = 'present' ORDER BY key ) AS present, array( SELECT key FROM jsonb_each(copies) WHERE value->>'status' = 'ongoing' ORDER BY key ) AS ongoing, array( SELECT value->'mtime' FROM jsonb_each(copies) WHERE value->>'status' = 'ongoing' ORDER BY key ) AS ongoing_mtime FROM content_archive WHERE content_id > %s AND num_present < %s ORDER BY content_id LIMIT %s """ if last_content is None: last_content = b'' cur = self._cursor(cur) cur.execute(query, (last_content, retention_policy, limit)) for content_id, present, ongoing, mtimes in cursor_to_bytes(cur): yield (content_id, present, dict(zip(ongoing, mtimes))) @stored_procedure('swh_mktemp_content_archive') def mktemp_content_archive(self, cur=None): """Trigger the creation of the temporary table tmp_content_archive during the lifetime of the transaction. + """ + pass + + @stored_procedure('swh_content_archive_add') + def content_archive_add_from_temp(self, cur=None): + """Add new content archive entries from temporary table. + Use from archiver.storage module: self.db.mktemp_content_archive() # copy data over to the temp table self.db.copy_to([{'colname': id0}, {'colname': id1}], 'tmp_cache_content', ['colname'], cur) + # insert into the main table + self.db.add_content_archive_from_temp(cur) """ pass def content_archive_get_missing(self, backend_name, cur=None): """Retrieve the content missing from backend_name. """ cur = self._cursor(cur) cur.execute("select * from swh_content_archive_missing(%s)", (backend_name,)) yield from cursor_to_bytes(cur) def content_archive_get_unknown(self, cur=None): """Retrieve unknown sha1 from archiver db. """ cur = self._cursor(cur) cur.execute('select * from swh_content_archive_unknown()') yield from cursor_to_bytes(cur) - def content_archive_insert(self, content_id, source, status, cur=None): - """Insert a new entry in the db for the content_id. - - Args: - content_id: content concerned - source: name of the source - status: the status of the content for that source - - """ - if isinstance(content_id, bytes): - content_id = '\\x%s' % hashutil.hash_to_hex(content_id) - - query = """INSERT INTO content_archive(content_id, copies, num_present) - VALUES('%s', '{"%s": {"status": "%s", "mtime": %d}}', 1) - """ % (content_id, source, status, int(time.time())) - cur = self._cursor(cur) - cur.execute(query) - def content_archive_update(self, content_id, archive_id, new_status=None, cur=None): """ Update the status of an archive content and set its mtime to Change the mtime of an archived content for the given archive and set it's mtime to the current time. Args: content_id (str): content sha1 archive_id (str): name of the archive new_status (str): one of 'missing', 'present' or 'ongoing'. this status will replace the previous one. If not given, the function only change the mtime of the content for the given archive. """ if isinstance(content_id, bytes): content_id = '\\x%s' % hashutil.hash_to_hex(content_id) if new_status is not None: query = """UPDATE content_archive SET copies=jsonb_set( copies, '{%s}', '{"status":"%s", "mtime":%d}' ) WHERE content_id='%s' """ % (archive_id, new_status, int(time.time()), content_id) else: query = """ UPDATE content_archive SET copies=jsonb_set(copies, '{%s,mtime}', '%d') WHERE content_id='%s' """ % (archive_id, int(time.time())) cur = self._cursor(cur) cur.execute(query) diff --git a/swh/storage/archiver/director.py b/swh/storage/archiver/director.py index 9e4615408..5b70aa3f0 100644 --- a/swh/storage/archiver/director.py +++ b/swh/storage/archiver/director.py @@ -1,301 +1,306 @@ -# Copyright (C) 2015-2016 The Software Heritage developers +# Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import click import sys from swh.core import config, utils, hashutil from swh.objstorage import get_objstorage -from swh.scheduler.celery_backend.config import app +from swh.scheduler.utils import get_task from . import tasks # noqa -from .storage import ArchiverStorage +from .storage import get_archiver_storage class ArchiverDirectorBase(config.SWHConfig, metaclass=abc.ABCMeta): """Abstract Director class An archiver director is in charge of dispatching batch of contents to archiver workers (for them to archive). Inherit from this class and provide: - ADDITIONAL_CONFIG: Some added configuration needed for the director to work - CONFIG_BASE_FILENAME: relative path to lookup for the configuration file - def get_contents_to_archive(self): Implementation method to read contents to archive """ DEFAULT_CONFIG = { 'batch_max_size': ('int', 1500), 'asynchronous': ('bool', True), - 'dbconn': ('str', 'dbname=softwareheritage-archiver-dev user=guest') + 'archiver_storage': ('dict', { + 'cls': 'db', + 'args': { + 'dbconn': 'dbname=softwareheritage-archiver-dev user=guest', + }, + }), } # Destined to be overridden by subclass ADDITIONAL_CONFIG = {} # We use the same configuration file as the worker CONFIG_BASE_FILENAME = 'archiver/worker' # The worker's task queue name to use TASK_NAME = None def __init__(self): """ Constructor of the archiver director. Args: db_conn_archiver: Either a libpq connection string, or a psycopg2 connection for the archiver db. config: optionnal additional configuration. Keys in the dict will override the one parsed from the configuration file. """ super().__init__() self.config = self.parse_config_file( additional_configs=[self.ADDITIONAL_CONFIG]) - self.archiver_storage = ArchiverStorage(self.config['dbconn']) + self.archiver_storage = get_archiver_storage( + **self.config['archiver_storage']) + self.task = get_task(self.TASK_NAME) def run(self): """ Run the archiver director. The archiver director will check all the contents of the archiver database and do the required backup jobs. """ if self.config['asynchronous']: run_fn = self.run_async_worker else: run_fn = self.run_sync_worker for batch in self.read_batch_contents(): run_fn(batch) def run_async_worker(self, batch): """Produce a worker that will be added to the task queue. """ - task = app.tasks[self.TASK_NAME] - task.delay(batch=batch) + self.task.delay(batch=batch) def run_sync_worker(self, batch): """Run synchronously a worker on the given batch. """ - task = app.tasks[self.TASK_NAME] - task(batch=batch) + self.task(batch=batch) def read_batch_contents(self): """ Create batch of contents that needs to be archived Yields: batch of sha1 that corresponds to contents that needs more archive copies. """ contents = [] for content in self.get_contents_to_archive(): contents.append(content) if len(contents) > self.config['batch_max_size']: yield contents contents = [] if len(contents) > 0: yield contents @abc.abstractmethod def get_contents_to_archive(self): """Retrieve generator of sha1 to archive Yields: sha1 to archive """ pass class ArchiverWithRetentionPolicyDirector(ArchiverDirectorBase): """Process the files in order to know which one is needed as backup. The archiver director processes the files in the local storage in order to know which one needs archival and it delegates this task to archiver workers. """ ADDITIONAL_CONFIG = { 'retention_policy': ('int', 2), } TASK_NAME = 'swh.storage.archiver.tasks.SWHArchiverWithRetentionPolicyTask' def get_contents_to_archive(self): """Create batch of contents that needs to be archived Yields: Datas about a content as a tuple (content_id, present_copies, ongoing_copies) where ongoing_copies is a dict mapping copy to mtime. """ last_content = None while True: archiver_contents = list( self.archiver_storage.content_archive_get_unarchived_copies( last_content=last_content, retention_policy=self.config['retention_policy'])) if not archiver_contents: return for content_id, _, _ in archiver_contents: last_content = content_id yield content_id def read_sha1_from_stdin(): """Read sha1 from stdin. """ - for sha1 in sys.stdin: - yield {'content_id': hashutil.hex_to_hash(sha1.rstrip())} + for line in sys.stdin: + sha1 = line.strip() + try: + yield {'content_id': hashutil.hex_to_hash(sha1)} + except Exception: + print("%s is not a valid sha1 hash, continuing" % repr(sha1), + file=sys.stderr) + continue class ArchiverStdinToBackendDirector(ArchiverDirectorBase): """A cloud archiver director in charge of reading contents and send them in batch in the cloud. The archiver director, in order: - Reads sha1 to send to a specific backend. - Checks if those sha1 are known in the archiver. If they are not, add them - if the sha1 are missing, they are sent for the worker to archive If the flag force_copy is set, this will force the copy to be sent for archive even though it has already been done. """ ADDITIONAL_CONFIG = { 'destination': ('str', 'azure'), 'force_copy': ('bool', False), 'source': ('str', 'uffizi'), 'storages': ('list[dict]', [ {'host': 'uffizi', 'cls': 'pathslicing', 'args': {'root': '/tmp/softwareheritage/objects', 'slicing': '0:2/2:4/4:6'}}, {'host': 'banco', 'cls': 'remote', 'args': {'base_url': 'http://banco:5003/'}} ]) } CONFIG_BASE_FILENAME = 'archiver/worker-to-backend' TASK_NAME = 'swh.storage.archiver.tasks.SWHArchiverToBackendTask' def __init__(self): super().__init__() self.destination = self.config['destination'] self.force_copy = self.config['force_copy'] self.objstorages = { storage['host']: get_objstorage(storage['cls'], storage['args']) for storage in self.config.get('storages', []) } # Fallback objstorage self.source = self.config['source'] - def _add_unknown_content_ids(self, content_ids, source_objstorage): + def _add_unknown_content_ids(self, content_ids): """Check whether some content_id are unknown. If they are, add them to the archiver db. Args: content_ids: List of dict with one key content_id - source_objstorage (ObjStorage): objstorage to check if - content_id is there - """ - unknowns = self.archiver_storage.content_archive_get_unknown( - content_ids) - for unknown_id in unknowns: - if unknown_id not in source_objstorage: - continue - self.archiver_storage.content_archive_insert( - unknown_id, self.source, 'present') + source_objstorage = self.objstorages[self.source] + + self.archiver_storage.content_archive_add( + (h['content_id'] + for h in content_ids + if h['content_id'] in source_objstorage), + sources_present=[self.source]) def get_contents_to_archive(self): gen_content_ids = ( ids for ids in utils.grouper(read_sha1_from_stdin(), self.config['batch_max_size'])) - source_objstorage = self.objstorages[self.source] if self.force_copy: for content_ids in gen_content_ids: content_ids = list(content_ids) if not content_ids: continue # Add missing entries in archiver table - self._add_unknown_content_ids(content_ids, source_objstorage) + self._add_unknown_content_ids(content_ids) print('Send %s contents to archive' % len(content_ids)) for content in content_ids: content_id = content['content_id'] # force its status to missing self.archiver_storage.content_archive_update( content_id, self.destination, 'missing') yield content_id else: for content_ids in gen_content_ids: content_ids = list(content_ids) # Add missing entries in archiver table - self._add_unknown_content_ids(content_ids, source_objstorage) + self._add_unknown_content_ids(content_ids) # Filter already copied data content_ids = list( self.archiver_storage.content_archive_get_missing( content_ids=content_ids, backend_name=self.destination)) if not content_ids: continue print('Send %s contents to archive' % len(content_ids)) for content in content_ids: yield content def run_async_worker(self, batch): """Produce a worker that will be added to the task queue. """ - task = app.tasks[self.TASK_NAME] - task.delay(destination=self.destination, batch=batch) + self.task.delay(destination=self.destination, batch=batch) def run_sync_worker(self, batch): """Run synchronously a worker on the given batch. """ - task = app.tasks[self.TASK_NAME] - task(destination=self.destination, batch=batch) + self.task(destination=self.destination, batch=batch) @click.command() @click.option('--direct', is_flag=True, help="""The archiver sends content for backup to one storage.""") def launch(direct): if direct: archiver = ArchiverStdinToBackendDirector() else: archiver = ArchiverWithRetentionPolicyDirector() archiver.run() if __name__ == '__main__': launch() diff --git a/swh/storage/archiver/storage.py b/swh/storage/archiver/storage.py index b207a7047..c64bf5500 100644 --- a/swh/storage/archiver/storage.py +++ b/swh/storage/archiver/storage.py @@ -1,168 +1,362 @@ -# Copyright (C) 2016 The Software Heritage developers +# Copyright (C) 2016-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import json +import os import psycopg2 +import time from .db import ArchiverDb +from swh.core import hashutil + from swh.storage.common import db_transaction_generator, db_transaction from swh.storage.exc import StorageDBError class ArchiverStorage(): """SWH Archiver storage proxy, encompassing DB """ - def __init__(self, db_conn): + def __init__(self, dbconn): """ Args: db_conn: either a libpq connection string, or a psycopg2 connection """ try: - if isinstance(db_conn, psycopg2.extensions.connection): - self.db = ArchiverDb(db_conn) + if isinstance(dbconn, psycopg2.extensions.connection): + self.db = ArchiverDb(dbconn) else: - self.db = ArchiverDb.connect(db_conn) + self.db = ArchiverDb.connect(dbconn) except psycopg2.OperationalError as e: raise StorageDBError(e) @db_transaction_generator def archive_ls(self, cur=None): """ Get all the archives registered on the server. Yields: a tuple (server_id, server_url) for each archive server. """ yield from self.db.archive_ls(cur) @db_transaction def content_archive_get(self, content_id, cur=None): """ Get the archival status of a content. Retrieve from the database the archival status of the given content Args: content_id: the sha1 of the content Yields: A tuple (content_id, present_copies, ongoing_copies), where ongoing_copies is a dict mapping copy to mtime. """ return self.db.content_archive_get(content_id, cur) @db_transaction_generator def content_archive_get_copies(self, last_content=None, limit=1000, cur=None): """ Get the list of copies for `limit` contents starting after `last_content`. Args: last_content: sha1 of the last content retrieved. May be None to start at the beginning. limit: number of contents to retrieve. Can be None to retrieve all objects (will be slow). Yields: A tuple (content_id, present_copies, ongoing_copies), where ongoing_copies is a dict mapping copy to mtime. """ yield from self.db.content_archive_get_copies(last_content, limit, cur) @db_transaction_generator def content_archive_get_unarchived_copies( self, retention_policy, last_content=None, limit=1000, cur=None): """ Get the list of copies for `limit` contents starting after `last_content`. Yields only copies with number of present smaller than `retention policy`. Args: last_content: sha1 of the last content retrieved. May be None to start at the beginning. retention_policy: number of required present copies limit: number of contents to retrieve. Can be None to retrieve all objects (will be slow). Yields: A tuple (content_id, present_copies, ongoing_copies), where ongoing_copies is a dict mapping copy to mtime. """ yield from self.db.content_archive_get_unarchived_copies( retention_policy, last_content, limit, cur) @db_transaction_generator def content_archive_get_missing(self, content_ids, backend_name, cur=None): """Retrieve missing sha1s from source_name. Args: content_ids ([sha1s]): list of sha1s to test source_name (str): Name of the backend to check for content Yields: missing sha1s from backend_name """ db = self.db db.mktemp_content_archive() db.copy_to(content_ids, 'tmp_content_archive', ['content_id'], cur) for content_id in db.content_archive_get_missing(backend_name, cur): yield content_id[0] @db_transaction_generator def content_archive_get_unknown(self, content_ids, cur=None): """Retrieve unknown sha1s from content_archive. Args: content_ids ([sha1s]): list of sha1s to test Yields: Unknown sha1s from content_archive """ db = self.db db.mktemp_content_archive() db.copy_to(content_ids, 'tmp_content_archive', ['content_id'], cur) for content_id in db.content_archive_get_unknown(cur): yield content_id[0] @db_transaction def content_archive_update(self, content_id, archive_id, new_status=None, cur=None): """ Update the status of an archive content and set its mtime to now Change the mtime of an archived content for the given archive and set it's mtime to the current time. Args: content_id (str): content sha1 archive_id (str): name of the archive new_status (str): one of 'missing', 'present' or 'ongoing'. this status will replace the previous one. If not given, the function only change the mtime of the content for the given archive. """ self.db.content_archive_update(content_id, archive_id, new_status, cur) @db_transaction - def content_archive_insert(self, content_id, source, status, cur=None): + def content_archive_add( + self, content_ids, sources_present, cur=None): """Insert a new entry in db about content_id. Args: - content_id: content concerned - source: name of the source - status: the status of the content for that source + content_ids ([bytes|str]): content identifiers + sources_present ([str]): List of source names where + contents are present + """ + db = self.db + + # Prepare copies dictionary + copies = {} + for source in sources_present: + copies[source] = { + "status": "present", + "mtime": int(time.time()), + } + + copies = json.dumps(copies) + num_present = len(sources_present) + + db.mktemp('content_archive') + db.copy_to( + ({'content_id': id, + 'copies': copies, + 'num_present': num_present} + for id in content_ids), + 'tmp_content_archive', + ['content_id', 'copies', 'num_present'], + cur) + db.content_archive_add_from_temp(cur) + + +class StubArchiverStorage(): + def __init__(self, archives, present, missing, logfile_base): + """ + A stub storage for the archiver that doesn't write to disk + + Args: + - archives: a dictionary mapping archive names to archive URLs + - present: archives where the objects are all considered present + - missing: archives where the objects are all considered missing + - logfile_base: basename for the logfile + """ + self.archives = archives + self.present = set(present) + self.missing = set(missing) + if set(archives) != self.present | self.missing: + raise ValueError("Present and missing archives don't match") + self.logfile_base = logfile_base + self.__logfile = None + + def open_logfile(self): + if self.__logfile: + return + + logfile_name = "%s.%d" % (self.logfile_base, os.getpid()) + self.__logfile = open(logfile_name, 'a') + + def close_logfile(self): + if not self.__logfile: + return + + self.__logfile.close() + self.__logfile = None + + def archive_ls(self, cur=None): + """ Get all the archives registered on the server. + + Yields: + a tuple (server_id, server_url) for each archive server. + """ + yield from self.archives.items() + + def content_archive_get(self, content_id, cur=None): + """ Get the archival status of a content. + + Retrieve from the database the archival status of the given content + + Args: + content_id: the sha1 of the content + + Yields: + A tuple (content_id, present_copies, ongoing_copies), where + ongoing_copies is a dict mapping copy to mtime. + """ + return (content_id, self.present, {}) + + def content_archive_get_copies(self, last_content=None, limit=1000, + cur=None): + """ Get the list of copies for `limit` contents starting after + `last_content`. + + Args: + last_content: sha1 of the last content retrieved. May be None + to start at the beginning. + limit: number of contents to retrieve. Can be None to retrieve all + objects (will be slow). + + Yields: + A tuple (content_id, present_copies, ongoing_copies), where + ongoing_copies is a dict mapping copy to mtime. """ - self.db.content_archive_insert(content_id, source, status, cur) + yield from [] + + def content_archive_get_unarchived_copies(self, retention_policy, + last_content=None, limit=1000, + cur=None): + """ Get the list of copies for `limit` contents starting after + `last_content`. Yields only copies with number of present + smaller than `retention policy`. + + Args: + last_content: sha1 of the last content retrieved. May be None + to start at the beginning. + retention_policy: number of required present copies + limit: number of contents to retrieve. Can be None to retrieve all + objects (will be slow). + + Yields: + A tuple (content_id, present_copies, ongoing_copies), where + ongoing_copies is a dict mapping copy to mtime. + + """ + yield from [] + + def content_archive_get_missing(self, content_ids, backend_name, cur=None): + """Retrieve missing sha1s from source_name. + + Args: + content_ids ([sha1s]): list of sha1s to test + source_name (str): Name of the backend to check for content + + Yields: + missing sha1s from backend_name + + """ + if backend_name in self.missing: + yield from content_ids + elif backend_name in self.present: + yield from [] + else: + raise ValueError('Unknown backend `%s`' % backend_name) + + def content_archive_get_unknown(self, content_ids, cur=None): + """Retrieve unknown sha1s from content_archive. + + Args: + content_ids ([sha1s]): list of sha1s to test + + Yields: + Unknown sha1s from content_archive + + """ + yield from [] + + def content_archive_update(self, content_id, archive_id, + new_status=None, cur=None): + """ Update the status of an archive content and set its mtime to now + + Change the mtime of an archived content for the given archive and set + it's mtime to the current time. + + Args: + content_id (str): content sha1 + archive_id (str): name of the archive + new_status (str): one of 'missing', 'present' or 'ongoing'. + this status will replace the previous one. If not given, + the function only change the mtime of the content for the + given archive. + """ + if not self.__logfile: + self.open_logfile() + + print(time.time(), archive_id, new_status, + hashutil.hash_to_hex(content_id), file=self.__logfile) + + def content_archive_add( + self, content_ids, sources_present, cur=None): + """Insert a new entry in db about content_id. + + Args: + content_ids ([bytes|str]): content identifiers + sources_present ([str]): List of source names where + contents are present + """ + pass + + +def get_archiver_storage(cls, args): + """Instantiate an archiver database with the proper class and arguments""" + if cls == 'db': + return ArchiverStorage(**args) + elif cls == 'stub': + return StubArchiverStorage(**args) + else: + raise ValueError('Unknown Archiver Storage class `%s`' % cls) diff --git a/swh/storage/archiver/worker.py b/swh/storage/archiver/worker.py index 52f54713d..724942fed 100644 --- a/swh/storage/archiver/worker.py +++ b/swh/storage/archiver/worker.py @@ -1,416 +1,421 @@ -# Copyright (C) 2015-2016 The Software Heritage developers +# Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import logging import random import time from collections import defaultdict from celery import group from swh.core import hashutil, config, utils from swh.objstorage import get_objstorage from swh.objstorage.exc import Error, ObjNotFoundError -from swh.scheduler.celery_backend.config import app +from swh.scheduler.utils import get_task -from .storage import ArchiverStorage +from .storage import get_archiver_storage from .copier import ArchiverCopier logger = logging.getLogger('archiver.worker') class BaseArchiveWorker(config.SWHConfig, metaclass=abc.ABCMeta): """Base archive worker. Inherit from this class and override: - ADDITIONAL_CONFIG: Some added configuration needed for the director to work - CONFIG_BASE_FILENAME: relative path to lookup for the configuration file - def need_archival(self, content_data): Determine if a content needs archival or not - def choose_backup_servers(self, present, missing): Choose which backup server to send copies to """ DEFAULT_CONFIG = { - 'dbconn': ('str', 'dbname=softwareheritage-archiver-dev'), + 'archiver_storage': ('dict', { + 'cls': 'db', + 'args': { + 'dbconn': 'dbname=softwareheritage-archiver-dev user=guest', + }, + }), 'storages': ('list[dict]', [ {'host': 'uffizi', 'cls': 'pathslicing', 'args': {'root': '/tmp/softwareheritage/objects', 'slicing': '0:2/2:4/4:6'}}, {'host': 'banco', 'cls': 'remote', 'args': {'base_url': 'http://banco:5003/'}} ]) } ADDITIONAL_CONFIG = {} CONFIG_BASE_FILENAME = 'archiver/worker' objstorages = {} def __init__(self, batch): super().__init__() self.config = self.parse_config_file( additional_configs=[self.ADDITIONAL_CONFIG]) self.batch = batch - self.archiver_db = ArchiverStorage(self.config['dbconn']) + self.archiver_db = get_archiver_storage( + **self.config['archiver_storage']) self.objstorages = { storage['host']: get_objstorage(storage['cls'], storage['args']) for storage in self.config.get('storages', []) } self.set_objstorages = set(self.objstorages) def run(self): """Do the task expected from the archiver worker. Process the contents in self.batch, ensure that the elements still need an archival (using archiver db), and spawn copiers to copy files in each destination according to the archiver-worker's policy. """ transfers = defaultdict(list) for obj_id in self.batch: # Get dict {'missing': [servers], 'present': [servers]} # for contents ignoring those who don't need archival. copies = self.compute_copies(self.set_objstorages, obj_id) if not copies: # could not happen if using .director module msg = 'Unknown content %s' % hashutil.hash_to_hex(obj_id) logger.warning(msg) continue if not self.need_archival(copies): continue - present = copies.get('present', []) - missing = copies.get('missing', []) + present = copies.get('present', set()) + missing = copies.get('missing', set()) if len(present) == 0: msg = 'Lost content %s' % hashutil.hash_to_hex(obj_id) logger.critical(msg) continue # Choose servers to be used as srcs and dests. for src_dest in self.choose_backup_servers(present, missing): transfers[src_dest].append(obj_id) # Then run copiers for each of the required transfers. contents_copied = [] for (src, dest), content_ids in transfers.items(): contents_copied.extend(self.run_copier(src, dest, content_ids)) # copy is done, eventually do something else with them self.copy_finished(contents_copied) def compute_copies(self, set_objstorages, content_id): """From a content_id, return present and missing copies. Args: objstorages (set): objstorage's id name content_id: the content concerned Returns: - A dictionary with keys 'present' and 'missing' that are - mapped to lists of copies ids depending on whenever the - content is present or missing on the copy. - - There is also the key 'ongoing' which is associated with a - dict that map to a copy name the mtime of the ongoing - status update. - + A dictionary with the following keys: + - 'present': set of archives where the content is present + - 'missing': set of archives where the content is missing + - 'ongoing': ongoing copies: dict mapping the archive id + with the time the copy supposedly started. """ result = self.archiver_db.content_archive_get(content_id) if not result: return None _, present, ongoing = result - set_present = set(present) - set_ongoing = set(ongoing) + set_present = set_objstorages & set(present) + set_ongoing = set_objstorages & set(ongoing) set_missing = set_objstorages - set_present - set_ongoing return { 'present': set_present, 'missing': set_missing, - 'ongoing': ongoing + 'ongoing': {archive: value + for archive, value in ongoing.items() + if archive in set_ongoing}, } def run_copier(self, source, destination, content_ids): """Run a copier in order to archive the given contents. Upload the given contents from the source to the destination. If the process fails, the whole content is considered uncopied and remains 'ongoing', waiting to be rescheduled as there is a delay. Args: source (str): source storage's identifier destination (str): destination storage's identifier content_ids ([sha1]): list of content ids to archive. """ # Check if there are any errors among the contents. content_status = self.get_contents_error(content_ids, source) # Iterates over the error detected. for content_id, real_status in content_status.items(): # Remove them from the to-archive list, # as they cannot be retrieved correctly. content_ids.remove(content_id) # Update their status to reflect their real state. self.archiver_db.content_archive_update( content_id, archive_id=source, new_status=real_status) # Now perform the copy on the remaining contents ac = ArchiverCopier( source=self.objstorages[source], destination=self.objstorages[destination], content_ids=content_ids) if ac.run(): # Once the archival complete, update the database. for content_id in content_ids: self.archiver_db.content_archive_update( content_id, archive_id=destination, new_status='present') return content_ids return [] def copy_finished(self, content_ids): """Hook to notify the content_ids archive copy is finished. (This is not an abstract method as this is optional """ pass def get_contents_error(self, content_ids, source_storage): """Indicates what is the error associated to a content when needed Check the given content on the given storage. If an error is detected, it will be reported through the returned dict. Args: content_ids ([sha1]): list of content ids to check source_storage (str): the source storage holding the contents to check. Returns: a dict that map {content_id -> error_status} for each content_id with an error. The `error_status` result may be 'missing' or 'corrupted'. """ content_status = {} storage = self.objstorages[source_storage] for content_id in content_ids: try: storage.check(content_id) except Error: content_status[content_id] = 'corrupted' logger.error('%s corrupted!' % hashutil.hash_to_hex( content_id)) except ObjNotFoundError: content_status[content_id] = 'missing' logger.error('%s missing!' % hashutil.hash_to_hex(content_id)) return content_status @abc.abstractmethod def need_archival(self, content_data): """Indicate if the content needs to be archived. Args: content_data (dict): dict that contains two lists 'present' and 'missing' with copies id corresponding to this status. Returns: True if there is not enough copies, False otherwise. """ pass @abc.abstractmethod def choose_backup_servers(self, present, missing): """Choose and yield the required amount of couple source/destination For each required copy, choose a unique destination server among the missing copies and a source server among the presents. Args: present: set of objstorage source name where the content is present missing: set of objstorage destination name where the content is missing Yields: tuple (source (str), destination (src)) for each required copy. """ pass class ArchiverWithRetentionPolicyWorker(BaseArchiveWorker): """ Do the required backups on a given batch of contents. Process the content of a content batch in order to do the needed backups on the slaves servers. """ ADDITIONAL_CONFIG = { 'retention_policy': ('int', 2), 'archival_max_age': ('int', 3600), } def __init__(self, batch): """ Constructor of the ArchiverWorker class. Args: batch: list of object's sha1 that potentially need archival. """ super().__init__(batch) config = self.config self.retention_policy = config['retention_policy'] self.archival_max_age = config['archival_max_age'] if len(self.objstorages) < self.retention_policy: raise ValueError('Retention policy is too high for the number of ' 'provided servers') def need_archival(self, content_data): """ Indicate if the content need to be archived. Args: content_data (dict): dict that contains two lists 'present' and 'missing' with copies id corresponding to this status. Returns: True if there is not enough copies, False otherwise. """ nb_presents = len(content_data.get('present', [])) for copy, mtime in content_data.get('ongoing', {}).items(): if not self._is_archival_delay_elapsed(mtime): nb_presents += 1 return nb_presents < self.retention_policy def _is_archival_delay_elapsed(self, start_time): """ Indicates if the archival delay is elapsed given the start_time Args: start_time (float): time at which the archival started. Returns: True if the archival delay is elasped, False otherwise """ elapsed = time.time() - start_time return elapsed > self.archival_max_age def choose_backup_servers(self, present, missing): """Choose and yield the required amount of couple source/destination For each required copy, choose a unique destination server among the missing copies and a source server among the presents. Each destination server is unique so after archival, the retention policy requirement will be fulfilled. However, the source server may be used multiple times. Args: present: set of objstorage source name where the content is present missing: set of objstorage destination name where the content is missing Yields: tuple (source, destination) for each required copy. """ # Transform from set to list to allow random selections missing = list(missing) present = list(present) nb_required = self.retention_policy - len(present) destinations = random.sample(missing, nb_required) sources = [random.choice(present) for dest in destinations] yield from zip(sources, destinations) class ArchiverToBackendWorker(BaseArchiveWorker): """Worker that sends copies over from a source to another backend. Process the content of a content batch from source objstorage to destination objstorage. """ CONFIG_BASE_FILENAME = 'archiver/worker-to-backend' ADDITIONAL_CONFIG = { 'next_task': ( 'dict', { 'queue': 'swh.indexer.tasks.SWHOrchestratorAllContentsTask', 'batch_size': 10, } ) } def __init__(self, destination, batch): """Constructor of the ArchiverWorkerToBackend class. Args: destination: where to copy the objects from batch: sha1s to send to destination """ super().__init__(batch) self.destination = destination next_task = self.config['next_task'] destination_queue = next_task['queue'] - self.task_destination = app.tasks[destination_queue] + self.task_destination = get_task(destination_queue) self.batch_size = int(next_task['batch_size']) def need_archival(self, content_data): """Indicate if the content needs to be archived. Args: content_data (dict): dict that contains 3 lists 'present', 'ongoing' and 'missing' with copies id corresponding to this status. Returns: True if we need to archive, False otherwise """ return self.destination in content_data.get('missing', {}) def choose_backup_servers(self, present, missing): """The destination is fixed to the destination mentioned. The only variable here is the source of information that we choose randomly in 'present'. Args: present: set of objstorage source name where the content is present missing: set of objstorage destination name where the content is missing Yields: tuple (source, destination) for each required copy. """ yield (random.choice(list(present)), self.destination) def copy_finished(self, content_ids): """Once the copy is finished, we'll send those batch of contents as done in the destination queue. """ groups = [] for ids in utils.grouper(content_ids, self.batch_size): sig_ids = self.task_destination.s(list(ids)) groups.append(sig_ids) group(groups).delay() diff --git a/swh/storage/db.py b/swh/storage/db.py index 73c2214d1..82e128439 100644 --- a/swh/storage/db.py +++ b/swh/storage/db.py @@ -1,966 +1,971 @@ -# Copyright (C) 2015-2016 The Software Heritage developers +# Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime import functools import json import psycopg2 import psycopg2.extras import select import tempfile from contextlib import contextmanager from swh.core import hashutil TMP_CONTENT_TABLE = 'tmp_content' psycopg2.extras.register_uuid() def stored_procedure(stored_proc): """decorator to execute remote stored procedure, specified as argument Generally, the body of the decorated function should be empty. If it is not, the stored procedure will be executed first; the function body then. """ def wrap(meth): @functools.wraps(meth) def _meth(self, *args, **kwargs): cur = kwargs.get('cur', None) self._cursor(cur).execute('SELECT %s()' % stored_proc) meth(self, *args, **kwargs) return _meth return wrap def jsonize(value): """Convert a value to a psycopg2 JSON object if necessary""" if isinstance(value, dict): return psycopg2.extras.Json(value) return value def entry_to_bytes(entry): """Convert an entry coming from the database to bytes""" if isinstance(entry, memoryview): return entry.tobytes() if isinstance(entry, list): return [entry_to_bytes(value) for value in entry] return entry def line_to_bytes(line): """Convert a line coming from the database to bytes""" if not line: return line if isinstance(line, dict): return {k: entry_to_bytes(v) for k, v in line.items()} return line.__class__(entry_to_bytes(entry) for entry in line) def cursor_to_bytes(cursor): """Yield all the data from a cursor as bytes""" yield from (line_to_bytes(line) for line in cursor) class BaseDb: """Base class for swh.storage.*Db. cf. swh.storage.db.Db, swh.storage.archiver.db.ArchiverDb """ @classmethod def connect(cls, *args, **kwargs): """factory method to create a DB proxy Accepts all arguments of psycopg2.connect; only some specific possibilities are reported below. Args: connstring: libpq2 connection string """ conn = psycopg2.connect(*args, **kwargs) return cls(conn) def _cursor(self, cur_arg): """get a cursor: from cur_arg if given, or a fresh one otherwise meant to avoid boilerplate if/then/else in methods that proxy stored procedures """ if cur_arg is not None: return cur_arg # elif self.cur is not None: # return self.cur else: return self.conn.cursor() def __init__(self, conn): """create a DB proxy Args: conn: psycopg2 connection to the SWH DB """ self.conn = conn @contextmanager def transaction(self): """context manager to execute within a DB transaction Yields: a psycopg2 cursor """ with self.conn.cursor() as cur: try: yield cur self.conn.commit() except: if not self.conn.closed: self.conn.rollback() raise def copy_to(self, items, tblname, columns, cur=None, item_cb=None): """Copy items' entries to table tblname with columns information. Args: items (dict): dictionary of data to copy over tblname tblname (str): Destination table's name columns ([str]): keys to access data in items and also the column names in the destination table. item_cb (fn): optional function to apply to items's entry """ def escape(data): if data is None: return '' if isinstance(data, bytes): return '\\x%s' % binascii.hexlify(data).decode('ascii') elif isinstance(data, str): return '"%s"' % data.replace('"', '""') elif isinstance(data, datetime.datetime): # We escape twice to make sure the string generated by # isoformat gets escaped return escape(data.isoformat()) elif isinstance(data, dict): return escape(json.dumps(data)) elif isinstance(data, list): return escape("{%s}" % ','.join(escape(d) for d in data)) elif isinstance(data, psycopg2.extras.Range): # We escape twice here too, so that we make sure # everything gets passed to copy properly return escape( '%s%s,%s%s' % ( '[' if data.lower_inc else '(', '-infinity' if data.lower_inf else escape(data.lower), 'infinity' if data.upper_inf else escape(data.upper), ']' if data.upper_inc else ')', ) ) else: # We don't escape here to make sure we pass literals properly return str(data) with tempfile.TemporaryFile('w+') as f: for d in items: if item_cb is not None: item_cb(d) line = [escape(d.get(k)) for k in columns] f.write(','.join(line)) f.write('\n') f.seek(0) self._cursor(cur).copy_expert('COPY %s (%s) FROM STDIN CSV' % ( tblname, ', '.join(columns)), f) + def mktemp(self, tblname, cur=None): + self._cursor(cur).execute('SELECT swh_mktemp(%s)', (tblname,)) + class Db(BaseDb): """Proxy to the SWH DB, with wrappers around stored procedures """ - def mktemp(self, tblname, cur=None): - self._cursor(cur).execute('SELECT swh_mktemp(%s)', (tblname,)) - def mktemp_dir_entry(self, entry_type, cur=None): self._cursor(cur).execute('SELECT swh_mktemp_dir_entry(%s)', (('directory_entry_%s' % entry_type),)) @stored_procedure('swh_mktemp_revision') def mktemp_revision(self, cur=None): pass @stored_procedure('swh_mktemp_release') def mktemp_release(self, cur=None): pass @stored_procedure('swh_mktemp_occurrence_history') def mktemp_occurrence_history(self, cur=None): pass @stored_procedure('swh_mktemp_entity_lister') def mktemp_entity_lister(self, cur=None): pass @stored_procedure('swh_mktemp_entity_history') def mktemp_entity_history(self, cur=None): pass @stored_procedure('swh_mktemp_bytea') def mktemp_bytea(self, cur=None): pass @stored_procedure('swh_mktemp_content_ctags') def mktemp_content_ctags(self, cur=None): pass @stored_procedure('swh_mktemp_content_ctags_missing') def mktemp_content_ctags_missing(self, cur=None): pass def register_listener(self, notify_queue, cur=None): """Register a listener for NOTIFY queue `notify_queue`""" self._cursor(cur).execute("LISTEN %s" % notify_queue) def listen_notifies(self, timeout): """Listen to notifications for `timeout` seconds""" if select.select([self.conn], [], [], timeout) == ([], [], []): return else: self.conn.poll() while self.conn.notifies: yield self.conn.notifies.pop(0) @stored_procedure('swh_content_add') def content_add_from_temp(self, cur=None): pass @stored_procedure('swh_directory_add') def directory_add_from_temp(self, cur=None): pass @stored_procedure('swh_skipped_content_add') def skipped_content_add_from_temp(self, cur=None): pass @stored_procedure('swh_revision_add') def revision_add_from_temp(self, cur=None): pass @stored_procedure('swh_release_add') def release_add_from_temp(self, cur=None): pass @stored_procedure('swh_occurrence_history_add') def occurrence_history_add_from_temp(self, cur=None): pass @stored_procedure('swh_entity_history_add') def entity_history_add_from_temp(self, cur=None): pass @stored_procedure('swh_cache_content_revision_add') def cache_content_revision_add(self, cur=None): pass def store_tmp_bytea(self, ids, cur=None): """Store the given identifiers in a new tmp_bytea table""" cur = self._cursor(cur) self.mktemp_bytea(cur) self.copy_to(({'id': elem} for elem in ids), 'tmp_bytea', ['id'], cur) + def content_update_from_temp(self, keys_to_update, cur=None): + cur = self._cursor(cur) + cur.execute("""select swh_content_update(ARRAY[%s] :: text[])""" % + keys_to_update) + content_get_metadata_keys = ['sha1', 'sha1_git', 'sha256', 'length', 'status'] def content_get_metadata_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("""select t.id as sha1, %s from tmp_bytea t left join content on t.id = content.sha1 """ % ', '.join(self.content_get_metadata_keys[1:])) yield from cursor_to_bytes(cur) def content_missing_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("""SELECT sha1, sha1_git, sha256 FROM swh_content_missing()""") yield from cursor_to_bytes(cur) def content_missing_per_sha1_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("""SELECT * FROM swh_content_missing_per_sha1()""") yield from cursor_to_bytes(cur) def skipped_content_missing_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("""SELECT sha1, sha1_git, sha256 FROM swh_skipped_content_missing()""") yield from cursor_to_bytes(cur) def occurrence_get(self, origin_id, cur=None): """Retrieve latest occurrence's information by origin_id. """ cur = self._cursor(cur) cur.execute("""SELECT origin, branch, target, target_type, (select max(date) from origin_visit where origin=%s) as date FROM occurrence WHERE origin=%s """, (origin_id, origin_id)) yield from cursor_to_bytes(cur) def content_find(self, sha1=None, sha1_git=None, sha256=None, cur=None): """Find the content optionally on a combination of the following checksums sha1, sha1_git or sha256. Args: sha1: sha1 content git_sha1: the sha1 computed `a la git` sha1 of the content sha256: sha256 content Returns: The triplet (sha1, sha1_git, sha256) if found or None. """ cur = self._cursor(cur) cur.execute("""SELECT sha1, sha1_git, sha256, length, ctime, status FROM swh_content_find(%s, %s, %s) LIMIT 1""", (sha1, sha1_git, sha256)) content = line_to_bytes(cur.fetchone()) if set(content) == {None}: return None else: return content provenance_cols = ['content', 'revision', 'origin', 'visit', 'path'] def content_find_provenance(self, sha1_git, cur=None): """Find content's provenance information Args: sha1: sha1_git content cur: cursor to use Returns: Provenance information on such content """ cur = self._cursor(cur) cur.execute("""SELECT content, revision, origin, visit, path FROM swh_content_find_provenance(%s)""", (sha1_git, )) yield from cursor_to_bytes(cur) def directory_get_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute('''SELECT id, file_entries, dir_entries, rev_entries FROM swh_directory_get()''') yield from cursor_to_bytes(cur) def directory_missing_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute('SELECT * FROM swh_directory_missing()') yield from cursor_to_bytes(cur) directory_ls_cols = ['dir_id', 'type', 'target', 'name', 'perms', 'status', 'sha1', 'sha1_git', 'sha256'] def directory_walk_one(self, directory, cur=None): cur = self._cursor(cur) cur.execute('SELECT * FROM swh_directory_walk_one(%s)', (directory,)) yield from cursor_to_bytes(cur) def directory_walk(self, directory, cur=None): cur = self._cursor(cur) cur.execute('SELECT * FROM swh_directory_walk(%s)', (directory,)) yield from cursor_to_bytes(cur) def revision_missing_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute('SELECT id FROM swh_revision_missing() as r(id)') yield from cursor_to_bytes(cur) revision_add_cols = [ 'id', 'date', 'date_offset', 'date_neg_utc_offset', 'committer_date', 'committer_date_offset', 'committer_date_neg_utc_offset', 'type', 'directory', 'message', 'author_fullname', 'author_name', 'author_email', 'committer_fullname', 'committer_name', 'committer_email', 'metadata', 'synthetic', ] revision_get_cols = revision_add_cols + [ 'author_id', 'committer_id', 'parents'] def origin_visit_add(self, origin, ts, cur=None): """Add a new origin_visit for origin origin at timestamp ts with status 'ongoing'. Args: origin: origin concerned by the visit ts: the date of the visit Returns: The new visit index step for that origin """ cur = self._cursor(cur) self._cursor(cur).execute('SELECT swh_origin_visit_add(%s, %s)', (origin, ts)) return cur.fetchone()[0] def origin_visit_update(self, origin, visit_id, status, metadata, cur=None): """Update origin_visit's status.""" cur = self._cursor(cur) update = """UPDATE origin_visit SET status=%s, metadata=%s WHERE origin=%s AND visit=%s""" cur.execute(update, (status, jsonize(metadata), origin, visit_id)) origin_visit_get_cols = ['origin', 'visit', 'date', 'status', 'metadata'] def origin_visit_get_all(self, origin_id, last_visit=None, limit=None, cur=None): """Retrieve all visits for origin with id origin_id. Args: origin_id: The occurrence's origin Yields: The occurrence's history visits """ cur = self._cursor(cur) query_suffix = '' if last_visit: query_suffix += ' AND %s < visit' % last_visit if limit: query_suffix += ' LIMIT %s' % limit query = """\ SELECT %s FROM origin_visit WHERE origin=%%s %s""" % ( ', '.join(self.origin_visit_get_cols), query_suffix) cur.execute(query, (origin_id, )) yield from cursor_to_bytes(cur) def origin_visit_get(self, origin_id, visit_id, cur=None): """Retrieve information on visit visit_id of origin origin_id. Args: origin_id: the origin concerned visit_id: The visit step for that origin Returns: The origin_visit information """ cur = self._cursor(cur) query = """\ SELECT %s FROM origin_visit WHERE origin = %%s AND visit = %%s """ % (', '.join(self.origin_visit_get_cols)) cur.execute(query, (origin_id, visit_id)) r = cur.fetchall() if not r: return None return line_to_bytes(r[0]) occurrence_cols = ['origin', 'branch', 'target', 'target_type'] def occurrence_by_origin_visit(self, origin_id, visit_id, cur=None): """Retrieve all occurrences for a particular origin_visit. Args: origin_id: the origin concerned visit_id: The visit step for that origin Yields: The occurrence's history visits """ cur = self._cursor(cur) query = """\ SELECT %s FROM swh_occurrence_by_origin_visit(%%s, %%s) """ % (', '.join(self.occurrence_cols)) cur.execute(query, (origin_id, visit_id)) yield from cursor_to_bytes(cur) def revision_get_from_temp(self, cur=None): cur = self._cursor(cur) query = 'SELECT %s FROM swh_revision_get()' % ( ', '.join(self.revision_get_cols)) cur.execute(query) yield from cursor_to_bytes(cur) def revision_log(self, root_revisions, limit=None, cur=None): cur = self._cursor(cur) query = """SELECT %s FROM swh_revision_log(%%s, %%s) """ % ', '.join(self.revision_get_cols) cur.execute(query, (root_revisions, limit)) yield from cursor_to_bytes(cur) revision_shortlog_cols = ['id', 'parents'] def revision_shortlog(self, root_revisions, limit=None, cur=None): cur = self._cursor(cur) query = """SELECT %s FROM swh_revision_list(%%s, %%s) """ % ', '.join(self.revision_shortlog_cols) cur.execute(query, (root_revisions, limit)) yield from cursor_to_bytes(cur) cache_content_get_cols = [ 'sha1', 'sha1_git', 'sha256', 'revision_paths'] def cache_content_get_all(self, cur=None): """Retrieve cache contents' sha1, sha256, sha1_git """ cur = self._cursor(cur) cur.execute('SELECT * FROM swh_cache_content_get_all()') yield from cursor_to_bytes(cur) def cache_content_get(self, sha1_git, cur=None): """Retrieve cache content information sh. """ cur = self._cursor(cur) cur.execute('SELECT * FROM swh_cache_content_get(%s)', (sha1_git, )) data = cur.fetchone() if data: return line_to_bytes(data) return None def cache_revision_origin_add(self, origin, visit, cur=None): """Populate the content provenance information cache for the given (origin, visit) couple.""" cur = self._cursor(cur) cur.execute('SELECT * FROM swh_cache_revision_origin_add(%s, %s)', (origin, visit)) yield from cursor_to_bytes(cur) def release_missing_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute('SELECT id FROM swh_release_missing() as r(id)') yield from cursor_to_bytes(cur) object_find_by_sha1_git_cols = ['sha1_git', 'type', 'id', 'object_id'] def object_find_by_sha1_git(self, ids, cur=None): cur = self._cursor(cur) self.store_tmp_bytea(ids, cur) query = 'select %s from swh_object_find_by_sha1_git()' % ( ', '.join(self.object_find_by_sha1_git_cols) ) cur.execute(query) yield from cursor_to_bytes(cur) def stat_counters(self, cur=None): cur = self._cursor(cur) cur.execute('SELECT * FROM swh_stat_counters()') yield from cur fetch_history_cols = ['origin', 'date', 'status', 'result', 'stdout', 'stderr', 'duration'] def create_fetch_history(self, fetch_history, cur=None): """Create a fetch_history entry with the data in fetch_history""" cur = self._cursor(cur) query = '''INSERT INTO fetch_history (%s) VALUES (%s) RETURNING id''' % ( ','.join(self.fetch_history_cols), ','.join(['%s'] * len(self.fetch_history_cols)) ) cur.execute(query, [fetch_history.get(col) for col in self.fetch_history_cols]) return cur.fetchone()[0] def get_fetch_history(self, fetch_history_id, cur=None): """Get a fetch_history entry with the given id""" cur = self._cursor(cur) query = '''SELECT %s FROM fetch_history WHERE id=%%s''' % ( ', '.join(self.fetch_history_cols), ) cur.execute(query, (fetch_history_id,)) data = cur.fetchone() if not data: return None ret = {'id': fetch_history_id} for i, col in enumerate(self.fetch_history_cols): ret[col] = data[i] return ret def update_fetch_history(self, fetch_history, cur=None): """Update the fetch_history entry from the data in fetch_history""" cur = self._cursor(cur) query = '''UPDATE fetch_history SET %s WHERE id=%%s''' % ( ','.join('%s=%%s' % col for col in self.fetch_history_cols) ) cur.execute(query, [jsonize(fetch_history.get(col)) for col in self.fetch_history_cols + ['id']]) base_entity_cols = ['uuid', 'parent', 'name', 'type', 'description', 'homepage', 'active', 'generated', 'lister_metadata', 'metadata'] entity_cols = base_entity_cols + ['last_seen', 'last_id'] entity_history_cols = base_entity_cols + ['id', 'validity'] def origin_add(self, type, url, cur=None): """Insert a new origin and return the new identifier.""" insert = """INSERT INTO origin (type, url) values (%s, %s) RETURNING id""" cur.execute(insert, (type, url)) return cur.fetchone()[0] def origin_get_with(self, type, url, cur=None): """Retrieve the origin id from its type and url if found.""" cur = self._cursor(cur) query = """SELECT id, type, url, lister, project FROM origin WHERE type=%s AND url=%s""" cur.execute(query, (type, url)) data = cur.fetchone() if data: return line_to_bytes(data) return None def origin_get(self, id, cur=None): """Retrieve the origin per its identifier. """ cur = self._cursor(cur) query = "SELECT id, type, url, lister, project FROM origin WHERE id=%s" cur.execute(query, (id,)) data = cur.fetchone() if data: return line_to_bytes(data) return None person_cols = ['fullname', 'name', 'email'] person_get_cols = person_cols + ['id'] def person_add(self, person, cur=None): """Add a person identified by its name and email. Returns: The new person's id """ cur = self._cursor(cur) query_new_person = '''\ INSERT INTO person(%s) VALUES (%s) RETURNING id''' % ( ', '.join(self.person_cols), ', '.join('%s' for i in range(len(self.person_cols))) ) cur.execute(query_new_person, [person[col] for col in self.person_cols]) return cur.fetchone()[0] def person_get(self, ids, cur=None): """Retrieve the persons identified by the list of ids. """ cur = self._cursor(cur) query = """SELECT %s FROM person WHERE id IN %%s""" % ', '.join(self.person_get_cols) cur.execute(query, (tuple(ids),)) yield from cursor_to_bytes(cur) release_add_cols = [ 'id', 'target', 'target_type', 'date', 'date_offset', 'date_neg_utc_offset', 'name', 'comment', 'synthetic', 'author_fullname', 'author_name', 'author_email', ] release_get_cols = release_add_cols + ['author_id'] def release_get_from_temp(self, cur=None): cur = self._cursor(cur) query = ''' SELECT %s FROM swh_release_get() ''' % ', '.join(self.release_get_cols) cur.execute(query) yield from cursor_to_bytes(cur) def release_get_by(self, origin_id, limit=None, cur=None): """Retrieve a release by occurrence criterion (only origin right now) Args: - origin_id: The origin to look for. """ cur = self._cursor(cur) query = """ SELECT %s FROM swh_release_get_by(%%s) LIMIT %%s """ % ', '.join(self.release_get_cols) cur.execute(query, (origin_id, limit)) yield from cursor_to_bytes(cur) def revision_get_by(self, origin_id, branch_name, datetime, limit=None, cur=None): """Retrieve a revision by occurrence criterion. Args: - origin_id: The origin to look for - branch_name: the branch name to look for - datetime: the lower bound of timerange to look for. - limit: limit number of results to return The upper bound being now. """ cur = self._cursor(cur) if branch_name and isinstance(branch_name, str): branch_name = branch_name.encode('utf-8') query = ''' SELECT %s FROM swh_revision_get_by(%%s, %%s, %%s) LIMIT %%s ''' % ', '.join(self.revision_get_cols) cur.execute(query, (origin_id, branch_name, datetime, limit)) yield from cursor_to_bytes(cur) def directory_entry_get_by_path(self, directory, paths, cur=None): """Retrieve a directory entry by path. """ cur = self._cursor(cur) cur.execute("""SELECT dir_id, type, target, name, perms, status, sha1, sha1_git, sha256 FROM swh_find_directory_entry_by_path(%s, %s)""", (directory, paths)) data = cur.fetchone() if set(data) == {None}: return None return line_to_bytes(data) def entity_get(self, uuid, cur=None): """Retrieve the entity and its parent hierarchy chain per uuid. """ cur = self._cursor(cur) cur.execute("""SELECT %s FROM swh_entity_get(%%s)""" % ( ', '.join(self.entity_cols)), (uuid, )) yield from cursor_to_bytes(cur) def entity_get_one(self, uuid, cur=None): """Retrieve a single entity given its uuid. """ cur = self._cursor(cur) cur.execute("""SELECT %s FROM entity WHERE uuid = %%s""" % ( ', '.join(self.entity_cols)), (uuid, )) data = cur.fetchone() if not data: return None return line_to_bytes(data) content_mimetype_cols = ['id', 'mimetype', 'encoding', 'tool_name', 'tool_version'] @stored_procedure('swh_mktemp_content_mimetype_missing') def mktemp_content_mimetype_missing(self, cur=None): pass def content_mimetype_missing_from_temp(self, cur=None): """List missing mimetypes. """ cur = self._cursor(cur) cur.execute("SELECT * FROM swh_content_mimetype_missing()") yield from cursor_to_bytes(cur) @stored_procedure('swh_mktemp_content_mimetype') def mktemp_content_mimetype(self, cur=None): pass def content_mimetype_add_from_temp(self, conflict_update, cur=None): self._cursor(cur).execute("SELECT swh_content_mimetype_add(%s)", (conflict_update, )) content_language_cols = ['id', 'lang', 'tool_name', 'tool_version'] @stored_procedure('swh_mktemp_content_language') def mktemp_content_language(self, cur=None): pass def content_mimetype_get_from_temp(self, cur=None): cur = self._cursor(cur) query = "SELECT %s FROM swh_content_mimetype_get()" % ( ','.join(self.content_mimetype_cols)) cur.execute(query) yield from cursor_to_bytes(cur) @stored_procedure('swh_mktemp_content_language_missing') def mktemp_content_language_missing(self, cur=None): pass def content_language_missing_from_temp(self, cur=None): """List missing languages. """ cur = self._cursor(cur) cur.execute("SELECT * FROM swh_content_language_missing()") yield from cursor_to_bytes(cur) def content_language_add_from_temp(self, conflict_update, cur=None): self._cursor(cur).execute("SELECT swh_content_language_add(%s)", (conflict_update, )) def content_language_get_from_temp(self, cur=None): cur = self._cursor(cur) query = "SELECT %s FROM swh_content_language_get()" % ( ','.join(self.content_language_cols)) cur.execute(query) yield from cursor_to_bytes(cur) def content_ctags_missing_from_temp(self, cur=None): """List missing ctags. """ cur = self._cursor(cur) cur.execute("SELECT * FROM swh_content_ctags_missing()") yield from cursor_to_bytes(cur) def content_ctags_add_from_temp(self, conflict_update, cur=None): self._cursor(cur).execute("SELECT swh_content_ctags_add(%s)", (conflict_update, )) content_ctags_cols = ['id', 'name', 'kind', 'line', 'lang', 'tool_name', 'tool_version'] def content_ctags_get_from_temp(self, cur=None): cur = self._cursor(cur) query = "SELECT %s FROM swh_content_ctags_get()" % ( ','.join(self.content_ctags_cols)) cur.execute(query) yield from cursor_to_bytes(cur) def content_ctags_search(self, expression, last_sha1, limit, cur=None): cur = self._cursor(cur) if not last_sha1: query = """SELECT %s FROM swh_content_ctags_search(%%s, %%s)""" % ( ','.join(self.content_ctags_cols)) cur.execute(query, (expression, limit)) else: if last_sha1 and isinstance(last_sha1, bytes): last_sha1 = '\\x%s' % hashutil.hash_to_hex(last_sha1) elif last_sha1: last_sha1 = '\\x%s' % last_sha1 query = """SELECT %s FROM swh_content_ctags_search(%%s, %%s, %%s)""" % ( ','.join(self.content_ctags_cols)) cur.execute(query, (expression, limit, last_sha1)) yield from cursor_to_bytes(cur) content_fossology_license_cols = ['id', 'tool_name', 'tool_version', 'licenses'] @stored_procedure('swh_mktemp_content_fossology_license_missing') def mktemp_content_fossology_license_missing(self, cur=None): pass def content_fossology_license_missing_from_temp(self, cur=None): """List missing licenses. """ cur = self._cursor(cur) cur.execute("SELECT * FROM swh_content_fossology_license_missing()") yield from cursor_to_bytes(cur) @stored_procedure('swh_mktemp_content_fossology_license') def mktemp_content_fossology_license(self, cur=None): pass @stored_procedure('swh_mktemp_content_fossology_license_unknown') def mktemp_content_fossology_license_unknown(self, cur=None): pass def content_fossology_license_add_from_temp(self, conflict_update, cur=None): """Add new licenses per content. """ self._cursor(cur).execute( "SELECT swh_content_fossology_license_add(%s)", (conflict_update, )) def content_fossology_license_get_from_temp(self, cur=None): """Retrieve licenses per content. """ cur = self._cursor(cur) query = "SELECT %s FROM swh_content_fossology_license_get()" % ( ','.join(self.content_fossology_license_cols)) cur.execute(query) yield from cursor_to_bytes(cur) def content_fossology_license_unknown(self, cur=None): """Returns the unknown licenses from tmp_content_fossology_license_unknown. """ cur = self._cursor(cur) cur.execute("SELECT * FROM swh_content_fossology_license_unknown()") yield from cursor_to_bytes(cur) diff --git a/swh/storage/provenance/tasks.py b/swh/storage/provenance/tasks.py index b01c7806e..b1dd26510 100644 --- a/swh/storage/provenance/tasks.py +++ b/swh/storage/provenance/tasks.py @@ -1,112 +1,112 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from celery import group from swh.core import hashutil from swh.core.config import load_named_config from swh.scheduler.task import Task from swh.storage import get_storage BASE_CONFIG_PATH = 'storage/provenance_cache' DEFAULT_CONFIG = { 'storage': ('dict', { 'cls': 'remote', 'args': { - 'url': 'http://localhost:5000/' + 'url': 'http://localhost:5002/' }, }), 'revision_packet_size': ('int', 100), } class PopulateCacheContentRevision(Task): """Populate the content -> revision provenance cache for some revisions""" task_queue = 'swh_populate_cache_content_revision' @property def config(self): if not hasattr(self, '__config'): self.__config = load_named_config(BASE_CONFIG_PATH, DEFAULT_CONFIG) return self.__config def run(self, revisions): """Cache the cache_content_revision table for the revisions provided. Args: revisions: List of revisions to cache populate. """ config = self.config storage = get_storage(**config['storage']) storage.cache_content_revision_add( hashutil.hex_to_hash(revision) for revision in revisions ) class PopulateCacheRevisionOrigin(Task): """Populate the revision -> origin provenance cache for one origin's visit""" task_queue = 'swh_populate_cache_revision_origin' @property def config(self): if not hasattr(self, '__config'): self.__config = load_named_config(BASE_CONFIG_PATH, DEFAULT_CONFIG) return self.__config def run(self, origin_id, visit_id): """Cache the cache_revision_origin for the given origin visit Args: origin_id: the origin id to cache visit_id: the visit id to cache This task also creates the revision cache tasks, as well as the task to cache the next origin visit available """ config = self.config storage = get_storage(**config['storage']) packet_size = config['revision_packet_size'] pipelined_tasks = [] visits = sorted( visit['visit'] for visit in storage.origin_visit_get(origin_id) ) if visit_id in visits: revision_task = PopulateCacheContentRevision() new_revisions = [ hashutil.hash_to_hex(revision) for revision in storage.cache_revision_origin_add( origin_id, visit_id) ] if new_revisions: split_new_revisions = [ new_revisions[i:i + packet_size] for i in range(0, packet_size, len(new_revisions)) ] for packet in split_new_revisions: pipelined_tasks.append(revision_task.s(packet)) try: next_visit = min(visit for visit in visits if visit > visit_id) except ValueError: # no next visit, stop pipelining further visits pass else: pipelined_tasks.append(self.s(origin_id, next_visit)) if pipelined_tasks: group(pipelined_tasks).delay() diff --git a/swh/storage/storage.py b/swh/storage/storage.py index 88918689c..f4b057f4f 100644 --- a/swh/storage/storage.py +++ b/swh/storage/storage.py @@ -1,1574 +1,1604 @@ -# Copyright (C) 2015-2016 The Software Heritage developers +# Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict import datetime import itertools import dateutil.parser import psycopg2 from . import converters from .common import db_transaction_generator, db_transaction from .db import Db from .exc import StorageDBError from swh.core.hashutil import ALGORITHMS from swh.objstorage import get_objstorage from swh.objstorage.exc import ObjNotFoundError # Max block size of contents to return BULK_BLOCK_CONTENT_LEN_MAX = 10000 class Storage(): """SWH storage proxy, encompassing DB and object storage """ def __init__(self, db, objstorage): """ Args: db_conn: either a libpq connection string, or a psycopg2 connection obj_root: path to the root of the object storage """ try: if isinstance(db, psycopg2.extensions.connection): self.db = Db(db) else: self.db = Db.connect(db) except psycopg2.OperationalError as e: raise StorageDBError(e) self.objstorage = get_objstorage(**objstorage) def check_config(self, *, check_write): """Check that the storage is configured and ready to go.""" if not self.objstorage.check_config(check_write=check_write): return False # Check permissions on one of the tables with self.db.transaction() as cur: if check_write: check = 'INSERT' else: check = 'SELECT' cur.execute( "select has_table_privilege(current_user, 'content', %s)", (check,) ) return cur.fetchone()[0] return True def content_add(self, content): """Add content blobs to the storage Note: in case of DB errors, objects might have already been added to the object storage and will not be removed. Since addition to the object storage is idempotent, that should not be a problem. Args: content: iterable of dictionaries representing individual pieces of content to add. Each dictionary has the following keys: - data (bytes): the actual content - length (int): content length (default: -1) - one key for each checksum algorithm in swh.core.hashutil.ALGORITHMS, mapped to the corresponding checksum - status (str): one of visible, hidden, absent - reason (str): if status = absent, the reason why - origin (int): if status = absent, the origin we saw the content in """ db = self.db content_by_status = defaultdict(list) for d in content: if 'status' not in d: d['status'] = 'visible' if 'length' not in d: d['length'] = -1 content_by_status[d['status']].append(d) content_with_data = content_by_status['visible'] content_without_data = content_by_status['absent'] missing_content = set(self.content_missing(content_with_data)) missing_skipped = set( sha1_git for sha1, sha1_git, sha256 in self.skipped_content_missing(content_without_data)) with db.transaction() as cur: if missing_content: # create temporary table for metadata injection db.mktemp('content', cur) def add_to_objstorage(cont): self.objstorage.add(cont['data'], obj_id=cont['sha1']) content_filtered = (cont for cont in content_with_data if cont['sha1'] in missing_content) db.copy_to(content_filtered, 'tmp_content', ['sha1', 'sha1_git', 'sha256', 'length', 'status'], cur, item_cb=add_to_objstorage) # move metadata in place db.content_add_from_temp(cur) if missing_skipped: missing_filtered = (cont for cont in content_without_data if cont['sha1_git'] in missing_skipped) db.mktemp('skipped_content', cur) db.copy_to(missing_filtered, 'tmp_skipped_content', ['sha1', 'sha1_git', 'sha256', 'length', 'reason', 'status', 'origin'], cur) # move metadata in place db.skipped_content_add_from_temp(cur) + @db_transaction + def content_update(self, content, keys=[], cur=None): + """Update content blobs to the storage. Does nothing for unknown + contents or skipped ones. + + Args: + content: iterable of dictionaries representing individual pieces of + content to update. Each dictionary has the following keys: + - data (bytes): the actual content + - length (int): content length (default: -1) + - one key for each checksum algorithm in + swh.core.hashutil.ALGORITHMS, mapped to the corresponding + checksum + - status (str): one of visible, hidden, absent + + keys ([str]): List of keys whose values needs an update ( + e.g. new hash column) + + """ + db = self.db + + # TODO: Add a check on input keys. How to properly implement + # this? We don't know yet the new columns. + + db.mktemp('content') + select_keys = list(set(db.content_get_metadata_keys).union(set(keys))) + db.copy_to(content, 'tmp_content', select_keys, cur) + db.content_update_from_temp(keys_to_update=keys, + cur=cur) + def content_get(self, content): """Retrieve in bulk contents and their data. Args: content: iterables of sha1 Returns: Generates streams of contents as dict with their raw data: - sha1: sha1's content - data: bytes data of the content Raises: ValueError in case of too much contents are required. cf. BULK_BLOCK_CONTENT_LEN_MAX """ # FIXME: Improve on server module to slice the result if len(content) > BULK_BLOCK_CONTENT_LEN_MAX: raise ValueError( "Send at maximum %s contents." % BULK_BLOCK_CONTENT_LEN_MAX) for obj_id in content: try: data = self.objstorage.get(obj_id) except ObjNotFoundError: yield None continue yield {'sha1': obj_id, 'data': data} @db_transaction_generator def content_get_metadata(self, content, cur=None): """Retrieve content metadata in bulk Args: content: iterable of content identifiers (sha1) Returns: an iterable with content metadata corresponding to the given ids """ db = self.db db.store_tmp_bytea(content, cur) for content_metadata in db.content_get_metadata_from_temp(cur): yield dict(zip(db.content_get_metadata_keys, content_metadata)) @db_transaction_generator def content_missing(self, content, key_hash='sha1', cur=None): """List content missing from storage Args: content: iterable of dictionaries containing one key for each checksum algorithm in swh.core.hashutil.ALGORITHMS, mapped to the corresponding checksum, and a length key mapped to the content length. key_hash: the name of the hash used as key (default: 'sha1') Returns: an iterable of `key_hash`es missing from the storage Raises: TODO: an exception when we get a hash collision. """ db = self.db keys = ['sha1', 'sha1_git', 'sha256'] if key_hash not in keys: raise ValueError("key_hash should be one of %s" % keys) key_hash_idx = keys.index(key_hash) # Create temporary table for metadata injection db.mktemp('content', cur) db.copy_to(content, 'tmp_content', keys + ['length'], cur) for obj in db.content_missing_from_temp(cur): yield obj[key_hash_idx] @db_transaction_generator def content_missing_per_sha1(self, contents, cur=None): """List content missing from storage based only on sha1. Args: contents: Iterable of sha1 to check for absence. Returns: an iterable of `sha1`s missing from the storage. Raises: TODO: an exception when we get a hash collision. """ db = self.db db.store_tmp_bytea(contents, cur) for obj in db.content_missing_per_sha1_from_temp(cur): yield obj[0] @db_transaction_generator def skipped_content_missing(self, content, cur=None): """List skipped_content missing from storage Args: content: iterable of dictionaries containing the data for each checksum algorithm. Returns: an iterable of signatures missing from the storage """ keys = ['sha1', 'sha1_git', 'sha256'] db = self.db db.mktemp('skipped_content', cur) db.copy_to(content, 'tmp_skipped_content', keys + ['length', 'reason'], cur) yield from db.skipped_content_missing_from_temp(cur) @db_transaction def content_find(self, content, cur=None): """Find a content hash in db. Args: content: a dictionary representing one content hash, mapping checksum algorithm names (see swh.core.hashutil.ALGORITHMS) to checksum values Returns: a triplet (sha1, sha1_git, sha256) if the content exist or None otherwise. Raises: ValueError in case the key of the dictionary is not sha1, sha1_git nor sha256. """ db = self.db if not set(content).intersection(ALGORITHMS): raise ValueError('content keys must contain at least one of: ' 'sha1, sha1_git, sha256') c = db.content_find(sha1=content.get('sha1'), sha1_git=content.get('sha1_git'), sha256=content.get('sha256'), cur=cur) if c: keys = ['sha1', 'sha1_git', 'sha256', 'length', 'ctime', 'status'] return dict(zip(keys, c)) return None @db_transaction_generator def content_find_provenance(self, content, cur=None): """Find content's provenance information. Args: content: a dictionary entry representing one content hash. The dictionary key is one of swh.core.hashutil.ALGORITHMS. The value mapped to the corresponding checksum. Yields: The provenance information on content. """ db = self.db c = self.content_find(content) if not c: return [] sha1_git = c['sha1_git'] for provenance in db.content_find_provenance(sha1_git, cur=cur): yield dict(zip(db.provenance_cols, provenance)) def directory_add(self, directories): """Add directories to the storage Args: directories: iterable of dictionaries representing the individual directories to add. Each dict has the following keys: - id (sha1_git): the id of the directory to add - entries (list): list of dicts for each entry in the directory. Each dict has the following keys: - name (bytes) - type (one of 'file', 'dir', 'rev'): type of the directory entry (file, directory, revision) - target (sha1_git): id of the object pointed at by the directory entry - perms (int): entry permissions """ dirs = set() dir_entries = { 'file': defaultdict(list), 'dir': defaultdict(list), 'rev': defaultdict(list), } for cur_dir in directories: dir_id = cur_dir['id'] dirs.add(dir_id) for src_entry in cur_dir['entries']: entry = src_entry.copy() entry['dir_id'] = dir_id dir_entries[entry['type']][dir_id].append(entry) dirs_missing = set(self.directory_missing(dirs)) if not dirs_missing: return db = self.db with db.transaction() as cur: # Copy directory ids dirs_missing_dict = ({'id': dir} for dir in dirs_missing) db.mktemp('directory', cur) db.copy_to(dirs_missing_dict, 'tmp_directory', ['id'], cur) # Copy entries for entry_type, entry_list in dir_entries.items(): entries = itertools.chain.from_iterable( entries_for_dir for dir_id, entries_for_dir in entry_list.items() if dir_id in dirs_missing) db.mktemp_dir_entry(entry_type) db.copy_to( entries, 'tmp_directory_entry_%s' % entry_type, ['target', 'name', 'perms', 'dir_id'], cur, ) # Do the final copy db.directory_add_from_temp(cur) @db_transaction_generator def directory_missing(self, directories, cur): """List directories missing from storage Args: an iterable of directory ids Returns: a list of missing directory ids """ db = self.db # Create temporary table for metadata injection db.mktemp('directory', cur) directories_dicts = ({'id': dir} for dir in directories) db.copy_to(directories_dicts, 'tmp_directory', ['id'], cur) for obj in db.directory_missing_from_temp(cur): yield obj[0] @db_transaction_generator def directory_get(self, directories, cur=None): """Get information on directories. Args: - directories: an iterable of directory ids Returns: List of directories as dict with keys and associated values. """ db = self.db keys = ('id', 'dir_entries', 'file_entries', 'rev_entries') db.mktemp('directory', cur) db.copy_to(({'id': dir_id} for dir_id in directories), 'tmp_directory', ['id'], cur) dirs = db.directory_get_from_temp(cur) for line in dirs: yield dict(zip(keys, line)) @db_transaction_generator def directory_ls(self, directory, recursive=False, cur=None): """Get entries for one directory. Args: - directory: the directory to list entries from. - recursive: if flag on, this list recursively from this directory. Returns: List of entries for such directory. """ db = self.db if recursive: res_gen = db.directory_walk(directory) else: res_gen = db.directory_walk_one(directory) for line in res_gen: yield dict(zip(db.directory_ls_cols, line)) @db_transaction def cache_content_revision_add(self, revisions, cur=None): """Cache the current revision's current targeted arborescence directory. If the revision has already been cached, it just does nothing. Args: - revisions: the revisions to cache Returns: None """ db = self.db db.store_tmp_bytea(revisions, cur) db.cache_content_revision_add() @db_transaction_generator def cache_content_get_all(self, cur=None): """Read the distinct contents in the cache table. Yields: contents from cache """ for content in self.db.cache_content_get_all(cur): yield dict(zip(self.db.cache_content_get_cols, content)) @db_transaction def cache_content_get(self, content, cur=None): """Retrieve information on content. Args: content (dict): content with checkums Returns: Its properties (sha1, sha1_git, sha256, revision_paths) """ if 'sha1_git' in content: sha1_git = content['sha1_git'] else: c = self.content_find(content) if not c: return None sha1_git = c['sha1_git'] c = self.db.cache_content_get(sha1_git, cur=cur) if not c: return None return dict(zip(self.db.cache_content_get_cols, c)) @db_transaction_generator def cache_revision_origin_add(self, origin, visit, cur=None): """Cache the list of revisions the given visit added to the origin. Args: - origin: the id of the origin - visit: the id of the visit Returns: The list of new revisions """ for (revision,) in self.db.cache_revision_origin_add(origin, visit): yield revision @db_transaction def directory_entry_get_by_path(self, directory, paths, cur=None): """Get the directory entry (either file or dir) from directory with path. Args: - directory: sha1 of the top level directory - paths: path to lookup from the top level directory. From left (top) to right (bottom). Returns: The corresponding directory entry if found, None otherwise. """ db = self.db keys = ('dir_id', 'type', 'target', 'name', 'perms', 'status', 'sha1', 'sha1_git', 'sha256') res = db.directory_entry_get_by_path(directory, paths, cur) if res: return dict(zip(keys, res)) def revision_add(self, revisions): """Add revisions to the storage Args: revisions: iterable of dictionaries representing the individual revisions to add. Each dict has the following keys: - id (sha1_git): id of the revision to add - date (datetime.DateTime): date the revision was written - date_offset (int): offset from UTC in minutes the revision was written - date_neg_utc_offset (boolean): whether a null date_offset represents a negative UTC offset - committer_date (datetime.DateTime): date the revision got added to the origin - committer_date_offset (int): offset from UTC in minutes the revision was added to the origin - committer_date_neg_utc_offset (boolean): whether a null committer_date_offset represents a negative UTC offset - type (one of 'git', 'tar'): type of the revision added - directory (sha1_git): the directory the revision points at - message (bytes): the message associated with the revision - author_name (bytes): the name of the revision author - author_email (bytes): the email of the revision author - committer_name (bytes): the name of the revision committer - committer_email (bytes): the email of the revision committer - metadata (jsonb): extra information as dictionary - synthetic (bool): revision's nature (tarball, directory creates synthetic revision) - parents (list of sha1_git): the parents of this revision """ db = self.db revisions_missing = set(self.revision_missing( set(revision['id'] for revision in revisions))) if not revisions_missing: return with db.transaction() as cur: db.mktemp_revision(cur) revisions_filtered = ( converters.revision_to_db(revision) for revision in revisions if revision['id'] in revisions_missing) parents_filtered = [] db.copy_to( revisions_filtered, 'tmp_revision', db.revision_add_cols, cur, lambda rev: parents_filtered.extend(rev['parents'])) db.revision_add_from_temp(cur) db.copy_to(parents_filtered, 'revision_history', ['id', 'parent_id', 'parent_rank'], cur) @db_transaction_generator def revision_missing(self, revisions, cur=None): """List revisions missing from storage Args: an iterable of revision ids Returns: a list of missing revision ids """ db = self.db db.store_tmp_bytea(revisions, cur) for obj in db.revision_missing_from_temp(cur): yield obj[0] @db_transaction_generator def revision_get(self, revisions, cur): """Get all revisions from storage Args: an iterable of revision ids Returns: an iterable of revisions as dictionaries (or None if the revision doesn't exist) """ db = self.db db.store_tmp_bytea(revisions, cur) for line in self.db.revision_get_from_temp(cur): data = converters.db_to_revision( dict(zip(db.revision_get_cols, line)) ) if not data['type']: yield None continue yield data @db_transaction_generator def revision_log(self, revisions, limit=None, cur=None): """Fetch revision entry from the given root revisions. Args: - revisions: array of root revision to lookup - limit: limitation on the output result. Default to null. Yields: List of revision log from such revisions root. """ db = self.db for line in db.revision_log(revisions, limit, cur): data = converters.db_to_revision( dict(zip(db.revision_get_cols, line)) ) if not data['type']: yield None continue yield data @db_transaction_generator def revision_shortlog(self, revisions, limit=None, cur=None): """Fetch the shortlog for the given revisions Args: revisions: list of root revisions to lookup limit: depth limitation for the output Yields: a list of (id, parents) tuples. """ db = self.db yield from db.revision_shortlog(revisions, limit, cur) @db_transaction_generator def revision_log_by(self, origin_id, branch_name=None, timestamp=None, limit=None, cur=None): """Fetch revision entry from the actual origin_id's latest revision. Args: - origin_id: the origin id from which deriving the revision - branch_name: (optional) occurrence's branch name - timestamp: (optional) occurrence's time - limit: (optional) depth limitation for the output. Default to None. Yields: The revision log starting from the revision derived from the (origin, branch_name, timestamp) combination if any. Returns the [] if no revision matching this combination is found. """ db = self.db # Retrieve the revision by criterion revisions = list(db.revision_get_by( origin_id, branch_name, timestamp, limit=1)) if not revisions: return None revision_id = revisions[0][0] # otherwise, retrieve the revision log from that revision yield from self.revision_log([revision_id], limit) def release_add(self, releases): """Add releases to the storage Args: releases: iterable of dictionaries representing the individual releases to add. Each dict has the following keys: - id (sha1_git): id of the release to add - revision (sha1_git): id of the revision the release points to - date (datetime.DateTime): the date the release was made - date_offset (int): offset from UTC in minutes the release was made - date_neg_utc_offset (boolean): whether a null date_offset represents a negative UTC offset - name (bytes): the name of the release - comment (bytes): the comment associated with the release - author_name (bytes): the name of the release author - author_email (bytes): the email of the release author """ db = self.db release_ids = set(release['id'] for release in releases) releases_missing = set(self.release_missing(release_ids)) if not releases_missing: return with db.transaction() as cur: db.mktemp_release(cur) releases_filtered = ( converters.release_to_db(release) for release in releases if release['id'] in releases_missing ) db.copy_to(releases_filtered, 'tmp_release', db.release_add_cols, cur) db.release_add_from_temp(cur) @db_transaction_generator def release_missing(self, releases, cur=None): """List releases missing from storage Args: an iterable of release ids Returns: a list of missing release ids """ db = self.db # Create temporary table for metadata injection db.store_tmp_bytea(releases, cur) for obj in db.release_missing_from_temp(cur): yield obj[0] @db_transaction_generator def release_get(self, releases, cur=None): """Given a list of sha1, return the releases's information Args: releases: list of sha1s Returns: Generates the list of releases dict with the following keys: - id: origin's id - revision: origin's type - url: origin's url - lister: lister's uuid - project: project's uuid (FIXME, retrieve this information) Raises: ValueError if the keys does not match (url and type) nor id. """ db = self.db # Create temporary table for metadata injection db.store_tmp_bytea(releases, cur) for release in db.release_get_from_temp(cur): yield converters.db_to_release( dict(zip(db.release_get_cols, release)) ) @db_transaction def occurrence_add(self, occurrences, cur=None): """Add occurrences to the storage Args: occurrences: iterable of dictionaries representing the individual occurrences to add. Each dict has the following keys: - origin (int): id of the origin corresponding to the occurrence - branch (str): the reference name of the occurrence - target (sha1_git): the id of the object pointed to by the occurrence - target_type (str): the type of object pointed to by the occurrence """ db = self.db db.mktemp_occurrence_history(cur) db.copy_to(occurrences, 'tmp_occurrence_history', ['origin', 'branch', 'target', 'target_type', 'visit'], cur) db.occurrence_history_add_from_temp(cur) @db_transaction_generator def occurrence_get(self, origin_id, cur=None): """Retrieve occurrence information per origin_id. Args: origin_id: The occurrence's origin. Yields: List of occurrences matching criterion. """ db = self.db for line in db.occurrence_get(origin_id, cur): yield { 'origin': line[0], 'branch': line[1], 'target': line[2], 'target_type': line[3], } @db_transaction def origin_visit_add(self, origin, ts, cur=None): """Add an origin_visit for the origin at ts with status 'ongoing'. Args: origin: Visited Origin id ts: timestamp of such visit Returns: Dict with keys origin and visit where: - origin: origin identifier - visit: the visit identifier for the new visit occurrence - ts (datetime.DateTime): the visit date """ if isinstance(ts, str): ts = dateutil.parser.parse(ts) return { 'origin': origin, 'visit': self.db.origin_visit_add(origin, ts, cur) } @db_transaction def origin_visit_update(self, origin, visit_id, status, metadata=None, cur=None): """Update an origin_visit's status. Args: origin: Visited Origin id visit_id: Visit's id status: Visit's new status metadata: Data associated to the visit Returns: None """ return self.db.origin_visit_update(origin, visit_id, status, metadata, cur) @db_transaction_generator def origin_visit_get(self, origin, last_visit=None, limit=None, cur=None): """Retrieve all the origin's visit's information. Args: origin (int): The occurrence's origin (identifier). last_visit (int): Starting point from which listing the next visits Default to None limit (int): Number of results to return from the last visit. Default to None Yields: List of visits. """ db = self.db for line in db.origin_visit_get_all( origin, last_visit=last_visit, limit=limit, cur=cur): data = dict(zip(self.db.origin_visit_get_cols, line)) yield data @db_transaction def origin_visit_get_by(self, origin, visit, cur=None): """Retrieve origin visit's information. Args: origin: The occurrence's origin (identifier). Returns: The information on that particular (origin, visit) """ db = self.db ori_visit = db.origin_visit_get(origin, visit, cur) if not ori_visit: return None ori_visit = dict(zip(self.db.origin_visit_get_cols, ori_visit)) occs = {} for occ in db.occurrence_by_origin_visit(origin, visit): _, branch_name, target, target_type = occ occs[branch_name] = { 'target': target, 'target_type': target_type } ori_visit.update({ 'occurrences': occs }) return ori_visit @db_transaction_generator def revision_get_by(self, origin_id, branch_name=None, timestamp=None, limit=None, cur=None): """Given an origin_id, retrieve occurrences' list per given criterions. Args: origin_id: The origin to filter on. branch_name: (optional) branch name. timestamp: (optional) time. limit: (optional) limit Yields: List of occurrences matching the criterions or None if nothing is found. """ for line in self.db.revision_get_by(origin_id, branch_name, timestamp, limit=limit, cur=cur): data = converters.db_to_revision( dict(zip(self.db.revision_get_cols, line)) ) if not data['type']: yield None continue yield data def release_get_by(self, origin_id, limit=None): """Given an origin id, return all the tag objects pointing to heads of origin_id. Args: origin_id: the origin to filter on. limit: None by default Yields: List of releases matching the criterions or None if nothing is found. """ for line in self.db.release_get_by(origin_id, limit=limit): data = converters.db_to_release( dict(zip(self.db.release_get_cols, line)) ) yield data @db_transaction def object_find_by_sha1_git(self, ids, cur=None): """Return the objects found with the given ids. Args: ids: a generator of sha1_gits Returns: a dict mapping the id to the list of objects found. Each object found is itself a dict with keys: sha1_git: the input id type: the type of object found id: the id of the object found object_id: the numeric id of the object found. """ db = self.db ret = {id: [] for id in ids} for retval in db.object_find_by_sha1_git(ids): if retval[1]: ret[retval[0]].append(dict(zip(db.object_find_by_sha1_git_cols, retval))) return ret @db_transaction def origin_get(self, origin, cur=None): """Return the origin either identified by its id or its tuple (type, url). Args: origin: dictionary representing the individual origin to find. This dict has either the keys type and url: - type (FIXME: enum TBD): the origin type ('git', 'wget', ...) - url (bytes): the url the origin points to either the id: - id: the origin id Returns: the origin dict with the keys: - id: origin's id - type: origin's type - url: origin's url - lister: lister's uuid - project: project's uuid (FIXME, retrieve this information) Raises: ValueError if the keys does not match (url and type) nor id. """ db = self.db keys = ['id', 'type', 'url', 'lister', 'project'] origin_id = origin.get('id') if origin_id: # check lookup per id first ori = db.origin_get(origin_id, cur) elif 'type' in origin and 'url' in origin: # or lookup per type, url ori = db.origin_get_with(origin['type'], origin['url'], cur) else: # unsupported lookup raise ValueError('Origin must have either id or (type and url).') if ori: return dict(zip(keys, ori)) return None @db_transaction def _person_add(self, person, cur=None): """Add a person in storage. BEWARE: Internal function for now. Do not do anything fancy in case a person already exists. Please adapt code if more checks are needed. Args: person dictionary with keys name and email. Returns: Id of the new person. """ db = self.db return db.person_add(person) @db_transaction_generator def person_get(self, person, cur=None): """Return the persons identified by their ids. Args: person: array of ids. Returns: The array of persons corresponding of the ids. """ db = self.db for person in db.person_get(person): yield dict(zip(db.person_get_cols, person)) @db_transaction def origin_add(self, origins, cur=None): """Add origins to the storage Args: origins: list of dictionaries representing the individual origins, with the following keys: type: the origin type ('git', 'svn', 'deb', ...) url (bytes): the url the origin points to Returns: The array of ids corresponding to the given origins """ ret = [] for origin in origins: ret.append(self.origin_add_one(origin, cur=cur)) return ret @db_transaction def origin_add_one(self, origin, cur=None): """Add origin to the storage Args: origin: dictionary representing the individual origin to add. This dict has the following keys: - type (FIXME: enum TBD): the origin type ('git', 'wget', ...) - url (bytes): the url the origin points to Returns: the id of the added origin, or of the identical one that already exists. """ db = self.db data = db.origin_get_with(origin['type'], origin['url'], cur) if data: return data[0] return db.origin_add(origin['type'], origin['url'], cur) @db_transaction def fetch_history_start(self, origin_id, cur=None): """Add an entry for origin origin_id in fetch_history. Returns the id of the added fetch_history entry """ fetch_history = { 'origin': origin_id, 'date': datetime.datetime.now(tz=datetime.timezone.utc), } return self.db.create_fetch_history(fetch_history, cur) @db_transaction def fetch_history_end(self, fetch_history_id, data, cur=None): """Close the fetch_history entry with id `fetch_history_id`, replacing its data with `data`. """ now = datetime.datetime.now(tz=datetime.timezone.utc) fetch_history = self.db.get_fetch_history(fetch_history_id, cur) if not fetch_history: raise ValueError('No fetch_history with id %d' % fetch_history_id) fetch_history['duration'] = now - fetch_history['date'] fetch_history.update(data) self.db.update_fetch_history(fetch_history, cur) @db_transaction def fetch_history_get(self, fetch_history_id, cur=None): """Get the fetch_history entry with id `fetch_history_id`. """ return self.db.get_fetch_history(fetch_history_id, cur) @db_transaction def entity_add(self, entities, cur=None): """Add the given entitites to the database (in entity_history). Args: - entities: iterable of dictionaries containing the following keys: - uuid (uuid): id of the entity - parent (uuid): id of the parent entity - name (str): name of the entity - type (str): type of entity (one of 'organization', 'group_of_entities', 'hosting', 'group_of_persons', 'person', 'project') - description (str, optional): description of the entity - homepage (str): url of the entity's homepage - active (bool): whether the entity is active - generated (bool): whether the entity was generated - lister_metadata (dict): lister-specific entity metadata - metadata (dict): other metadata for the entity - validity (datetime.DateTime array): timestamps at which we listed the entity. """ db = self.db cols = list(db.entity_history_cols) cols.remove('id') db.mktemp_entity_history() db.copy_to(entities, 'tmp_entity_history', cols, cur) db.entity_history_add_from_temp() @db_transaction_generator def entity_get_from_lister_metadata(self, entities, cur=None): """Fetch entities from the database, matching with the lister and associated metadata. Args: entities: iterable of dictionaries containing the lister metadata to look for. Useful keys are 'lister', 'type', 'id', ... Returns: A generator of fetched entities with all their attributes. If no match was found, the returned entity is None. """ db = self.db db.mktemp_entity_lister(cur) mapped_entities = [] for i, entity in enumerate(entities): mapped_entity = { 'id': i, 'lister_metadata': entity, } mapped_entities.append(mapped_entity) db.copy_to(mapped_entities, 'tmp_entity_lister', ['id', 'lister_metadata'], cur) cur.execute('''select id, %s from swh_entity_from_tmp_entity_lister() order by id''' % ','.join(db.entity_cols)) for id, *entity_vals in cur: fetched_entity = dict(zip(db.entity_cols, entity_vals)) if fetched_entity['uuid']: yield fetched_entity else: yield { 'uuid': None, 'lister_metadata': entities[i], } @db_transaction_generator def entity_get(self, uuid, cur=None): """Returns the list of entity per its uuid identifier and also its parent hierarchy. Args: uuid: entity's identifier Returns: List of entities starting with entity with uuid and the parent hierarchy from such entity. """ db = self.db for entity in db.entity_get(uuid, cur): yield dict(zip(db.entity_cols, entity)) @db_transaction def entity_get_one(self, uuid, cur=None): """Returns one entity using its uuid identifier. Args: uuid: entity's identifier Returns: the object corresponding to the given entity """ db = self.db entity = db.entity_get_one(uuid, cur) if entity: return dict(zip(db.entity_cols, entity)) else: return None @db_transaction def stat_counters(self, cur=None): """compute statistics about the number of tuples in various tables Returns: a dictionary mapping textual labels (e.g., content) to integer values (e.g., the number of tuples in table content) """ return {k: v for (k, v) in self.db.stat_counters()} @db_transaction_generator def content_mimetype_missing(self, mimetypes, cur=None): """List mimetypes missing from storage. Args: mimetypes: iterable of dict with keys: - id (bytes): sha1 identifier - tool_name (str): tool used to compute the results - tool_version (str): associated tool's version Returns: an iterable of missing id for the triplets id, tool_name, tool_version """ db = self.db db.mktemp_content_mimetype_missing(cur) db.copy_to(mimetypes, 'tmp_content_mimetype_missing', ['id', 'tool_name', 'tool_version'], cur) for obj in db.content_mimetype_missing_from_temp(cur): yield obj[0] @db_transaction def content_mimetype_add(self, mimetypes, conflict_update=False, cur=None): """Add mimetypes not present in storage. Args: mimetypes: iterable of dictionary with keys: - id (bytes): sha1 identifier - mimetype (bytes): raw content's mimetype - encoding (bytes): raw content's encoding - tool_name (str): tool used to compute the results - tool_version (str): associated tool's version conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ db = self.db db.mktemp_content_mimetype(cur) db.copy_to(mimetypes, 'tmp_content_mimetype', db.content_mimetype_cols, cur) db.content_mimetype_add_from_temp(conflict_update, cur) @db_transaction_generator def content_mimetype_get(self, ids, cur=None): db = self.db db.store_tmp_bytea(ids, cur) for c in db.content_mimetype_get_from_temp(): yield converters.db_to_mimetype( dict(zip(db.content_mimetype_cols, c))) @db_transaction_generator def content_language_missing(self, languages, cur=None): """List languages missing from storage. Args: languages: iterable of dict with keys: - id (bytes): sha1 identifier - tool_name (str): tool used to compute the results - tool_version (str): associated tool's version Returns: an iterable of missing id """ db = self.db db.mktemp_content_language_missing(cur) db.copy_to(languages, 'tmp_content_language_missing', db.content_language_cols, cur) for obj in db.content_language_missing_from_temp(cur): yield obj[0] @db_transaction_generator def content_language_get(self, ids, cur=None): db = self.db db.store_tmp_bytea(ids, cur) for c in db.content_language_get_from_temp(): yield converters.db_to_language( dict(zip(db.content_language_cols, c))) @db_transaction def content_language_add(self, languages, conflict_update=False, cur=None): """Add languages not present in storage. Args: languages: iterable of dictionary with keys: - id: sha1 - lang: bytes conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ db = self.db db.mktemp_content_language(cur) # empty language is mapped to 'unknown' db.copy_to( ({ 'id': l['id'], 'lang': 'unknown' if not l['lang'] else l['lang'], 'tool_name': l['tool_name'], 'tool_version': l['tool_version'], } for l in languages), 'tmp_content_language', db.content_language_cols, cur) db.content_language_add_from_temp(conflict_update, cur) @db_transaction_generator def content_ctags_missing(self, ctags, cur=None): """List ctags missing from storage. Args: ctags: iterable of dict with keys: - id (bytes): sha1 identifier - tool_name (str): tool name used - tool_version (str): associated version Returns: an iterable of missing id """ db = self.db db.mktemp_content_ctags_missing(cur) db.copy_to(ctags, tblname='tmp_content_ctags_missing', columns=['id', 'tool_name', 'tool_version'], cur=cur) for obj in db.content_ctags_missing_from_temp(cur): yield obj[0] @db_transaction_generator def content_ctags_get(self, ids, cur=None): """Retrieve ctags per id. Args: ids ([sha1]): Iterable of sha1 """ db = self.db db.store_tmp_bytea(ids, cur) for c in db.content_ctags_get_from_temp(): yield converters.db_to_ctags(dict(zip(db.content_ctags_cols, c))) @db_transaction def content_ctags_add(self, ctags, conflict_update=False, cur=None): """Add ctags not present in storage Args: ctags: iterable of dictionaries with keys: - id (bytes): sha1 - ctags ([dict]): List of dictionary with keys (name, kind, line, language) """ db = self.db def _convert_ctags(ctags): """Convert ctags to list of ctags. """ res = [] for ctag in ctags: res.extend(converters.ctags_to_db(ctag)) return res db.mktemp_content_ctags(cur) db.copy_to(_convert_ctags(ctags), tblname='tmp_content_ctags', columns=db.content_ctags_cols, cur=cur) db.content_ctags_add_from_temp(conflict_update, cur) @db_transaction_generator def content_ctags_search(self, expression, limit=10, last_sha1=None, cur=None): """Search through content's raw ctags symbols. Args: expression (str): Expression to search for limit (int): Number of rows to return (default to 10). last_sha1 (str): Offset from which retrieving data (default to ''). Yields: rows of ctags including id, name, lang, kind, line, etc... """ db = self.db for obj in db.content_ctags_search(expression, last_sha1, limit, cur=cur): yield converters.db_to_ctags(dict(zip(db.content_ctags_cols, obj))) @db_transaction_generator def content_fossology_license_missing(self, licenses, cur=None): """List license missing from storage. Args: licenses ([bytes]): iterable of sha1 Returns: an iterable of missing id """ db = self.db db.mktemp_content_fossology_license_missing(cur) db.copy_to(licenses, 'tmp_content_fossology_license_missing', ['id', 'tool_name', 'tool_version'], cur) for obj in db.content_fossology_license_missing_from_temp(cur): yield obj[0] @db_transaction_generator def content_fossology_license_get(self, ids, cur=None): """Retrieve licenses per id. Args: ids ([sha1]): Iterable of sha1 Yields: List of dict with the following keys: - id (bytes) - licenses ([str]): associated licenses for that content """ db = self.db db.store_tmp_bytea(ids, cur) for c in db.content_fossology_license_get_from_temp(): yield dict(zip(db.content_fossology_license_cols, c)) @db_transaction def content_fossology_license_add(self, licenses, conflict_update=False, cur=None): """Add licenses not present in storage. Args: licenses ([dict]): iterable of dict with keys: - id: sha1 - license ([bytes]): List of licenses associated to sha1 - tool (str): nomossa conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) Returns: List of content_license entries which failed due to unknown licenses """ db = self.db # First, we check the licenses are ok licenses_to_check = set() # set of licenses to check content_licenses_to_add = {} # content_licenses to add names_to_content_license = {} # map from names to content licenses for c in licenses: id = c['id'] for name in c['licenses']: licenses_to_check.add(name) l = names_to_content_license.get(name, []) l.append(id) names_to_content_license[name] = l content_licenses_to_add[id] = c db.mktemp_content_fossology_license_unknown() db.copy_to(({'name': name} for name in licenses_to_check), tblname='tmp_content_fossology_license_unknown', columns=['name'], cur=cur) unknown_licenses = db.content_fossology_license_unknown(cur) # We filter out wrong content_license (this will be the result) wrong_content_licenses = [] for name, in unknown_licenses: for id in names_to_content_license[name]: # we can remove it multiple times since one content # can have multiple licenses content_license = content_licenses_to_add.pop(id, None) if content_license: wrong_content_licenses.append(content_license) if content_licenses_to_add: # Then, we add the correct ones db.mktemp_content_fossology_license(cur) db.copy_to( ({ 'id': c['id'], 'tool_name': c['tool_name'], 'tool_version': c['tool_version'], 'license': license, } for c in content_licenses_to_add.values() for license in c['licenses']), tblname='tmp_content_fossology_license', columns=['id', 'tool_name', 'tool_version', 'license'], cur=cur) db.content_fossology_license_add_from_temp(conflict_update, cur) return wrong_content_licenses diff --git a/swh/storage/tests/test_archiver.py b/swh/storage/tests/test_archiver.py index 8beca01a0..1625dee13 100644 --- a/swh/storage/tests/test_archiver.py +++ b/swh/storage/tests/test_archiver.py @@ -1,323 +1,471 @@ -# Copyright (C) 2015 The Software Heritage developers +# Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import glob import tempfile +import shutil import unittest import os import time import json from nose.tools import istest from nose.plugins.attrib import attr from swh.core import hashutil from swh.core.tests.db_testing import DbsTestFixture from server_testing import ServerTestFixture +from swh.storage.archiver.storage import get_archiver_storage + from swh.storage.archiver import ArchiverWithRetentionPolicyDirector from swh.storage.archiver import ArchiverWithRetentionPolicyWorker from swh.objstorage import get_objstorage from swh.objstorage.exc import ObjNotFoundError from swh.objstorage.api.server import app TEST_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(TEST_DIR, '../../../../swh-storage-testdata') @attr('db') class TestArchiver(DbsTestFixture, ServerTestFixture, unittest.TestCase): """ Test the objstorage archiver. """ TEST_DB_NAMES = [ 'softwareheritage-archiver-test', ] TEST_DB_DUMPS = [ os.path.join(TEST_DATA_DIR, 'dumps/swh-archiver.dump'), ] TEST_DB_DUMP_TYPES = [ 'pg_dump', ] def setUp(self): # Launch the backup server - dest_root = tempfile.mkdtemp(prefix='remote') + self.dest_root = tempfile.mkdtemp(prefix='remote') self.config = { 'cls': 'pathslicing', 'args': { - 'root': dest_root, + 'root': self.dest_root, 'slicing': '0:2/2:4/4:6', } } self.app = app super().setUp() # Retrieve connection (depends on the order in TEST_DB_NAMES) self.conn = self.conns[0] # archiver db's connection self.cursor = self.cursors[0] # Create source storage - src_root = tempfile.mkdtemp() + self.src_root = tempfile.mkdtemp() src_config = { 'cls': 'pathslicing', 'args': { - 'root': src_root, + 'root': self.src_root, 'slicing': '0:2/2:4/4:6' } } self.src_storage = get_objstorage(**src_config) # Create destination storage dest_config = { 'cls': 'remote', 'args': { 'url': self.url() } } self.dest_storage = get_objstorage(**dest_config) # Keep mapped the id to the storages self.storages = { 'uffizi': self.src_storage, 'banco': self.dest_storage } # Override configurations src_archiver_conf = {'host': 'uffizi'} dest_archiver_conf = {'host': 'banco'} src_archiver_conf.update(src_config) dest_archiver_conf.update(dest_config) self.archiver_storages = [src_archiver_conf, dest_archiver_conf] self._override_director_config() self._override_worker_config() # Create the base archiver self.archiver = self._create_director() def tearDown(self): self.empty_tables() + shutil.rmtree(self.src_root) + shutil.rmtree(self.dest_root) super().tearDown() def empty_tables(self): # Remove all content self.cursor.execute('DELETE FROM content_archive') self.conn.commit() def _override_director_config(self, retention_policy=2): """ Override the default config of the Archiver director to allow the tests to use the *-test db instead of the default one as there is no configuration file for now. """ ArchiverWithRetentionPolicyDirector.parse_config_file = lambda obj, additional_configs: { # noqa - 'dbconn': self.conn, + 'archiver_storage': { + 'cls': 'db', + 'args': { + 'dbconn': self.conn, + }, + }, 'batch_max_size': 5000, 'archival_max_age': 3600, 'retention_policy': retention_policy, 'asynchronous': False, } def _override_worker_config(self): """ Override the default config of the Archiver worker to allow the tests to use the *-test db instead of the default one as there is no configuration file for now. """ ArchiverWithRetentionPolicyWorker.parse_config_file = lambda obj, additional_configs: { # noqa 'retention_policy': 2, 'archival_max_age': 3600, - 'dbconn': self.conn, + 'archiver_storage': { + 'cls': 'db', + 'args': { + 'dbconn': self.conn, + }, + }, 'storages': self.archiver_storages, 'source': 'uffizi', } def _create_director(self): return ArchiverWithRetentionPolicyDirector() def _create_worker(self, batch={}): return ArchiverWithRetentionPolicyWorker(batch) def _add_content(self, storage_name, content_data): """ Add really a content to the given objstorage This put an empty status for the added content. Args: storage_name: the concerned storage content_data: the data to insert with_row_insert: to insert a row entry in the db or not """ # Add the content to the storage obj_id = self.storages[storage_name].add(content_data) db_obj_id = r'\x' + hashutil.hash_to_hex(obj_id) self.cursor.execute(""" INSERT INTO content_archive VALUES('%s', '{}') """ % (db_obj_id)) return obj_id def _update_status(self, obj_id, storage_name, status, date=None): """ Update the db status for the given id/storage_name. This does not create the content in the storage. """ db_obj_id = r'\x' + hashutil.hash_to_hex(obj_id) self.archiver.archiver_storage.content_archive_update( db_obj_id, storage_name, status ) def _add_dated_content(self, obj_id, copies={}): """ Fully erase the previous copies field for the given content id This does not alter the contents into the objstorages. """ db_obj_id = r'\x' + hashutil.hash_to_hex(obj_id) self.cursor.execute(""" UPDATE TABLE content_archive SET copies='%s' WHERE content_id='%s' """ % (json.dumps(copies), db_obj_id)) # Integration test @istest def archive_missing_content(self): """ Run archiver on a missing content should archive it. """ obj_data = b'archive_missing_content' obj_id = self._add_content('uffizi', obj_data) self._update_status(obj_id, 'uffizi', 'present') # Content is missing on banco (entry not present in the db) try: self.dest_storage.get(obj_id) except ObjNotFoundError: pass else: self.fail('Content should not be present before archival') self.archiver.run() # now the content should be present on remote objstorage remote_data = self.dest_storage.get(obj_id) self.assertEquals(obj_data, remote_data) @istest def archive_present_content(self): """ A content that is not 'missing' shouldn't be archived. """ obj_id = self._add_content('uffizi', b'archive_present_content') self._update_status(obj_id, 'uffizi', 'present') self._update_status(obj_id, 'banco', 'present') # After the run, the content should NOT be in the archive. # As the archiver believe it was already in. self.archiver.run() with self.assertRaises(ObjNotFoundError): self.dest_storage.get(obj_id) @istest def archive_already_enough(self): """ A content missing with enough copies shouldn't be archived. """ obj_id = self._add_content('uffizi', b'archive_alread_enough') self._update_status(obj_id, 'uffizi', 'present') self._override_director_config(retention_policy=1) director = self._create_director() # Obj is present in only one archive but only one copy is required. director.run() with self.assertRaises(ObjNotFoundError): self.dest_storage.get(obj_id) # Unit tests for archive worker def archival_elapsed(self, mtime): return self._create_worker()._is_archival_delay_elapsed(mtime) @istest def vstatus_ongoing_remaining(self): self.assertFalse(self.archival_elapsed(time.time())) @istest def vstatus_ongoing_elapsed(self): past_time = ( time.time() - self._create_worker().archival_max_age ) self.assertTrue(self.archival_elapsed(past_time)) def _status(self, status, mtime=None): """ Get a dict that match the copies structure """ return {'status': status, 'mtime': mtime or time.time()} @istest def need_archival_missing(self): """ A content should need archival when it is missing. """ status_copies = {'present': ['uffizi'], 'missing': ['banco']} worker = self._create_worker() self.assertEqual(worker.need_archival(status_copies), True) @istest def need_archival_present(self): """ A content present everywhere shouldn't need archival """ status_copies = {'present': ['uffizi', 'banco']} worker = self._create_worker() self.assertEqual(worker.need_archival(status_copies), False) def _compute_copies_status(self, status): """ A content with a given status should be detected correctly """ obj_id = self._add_content( 'banco', b'compute_copies_' + bytes(status, 'utf8')) self._update_status(obj_id, 'banco', status) worker = self._create_worker() self.assertIn('banco', worker.compute_copies( set(worker.objstorages), obj_id)[status]) @istest def compute_copies_present(self): """ A present content should be detected with correct status """ self._compute_copies_status('present') @istest def compute_copies_missing(self): """ A missing content should be detected with correct status """ self._compute_copies_status('missing') + @istest + def compute_copies_extra_archive(self): + obj_id = self._add_content('banco', b'foobar') + self._update_status(obj_id, 'banco', 'present') + self._update_status(obj_id, 'random_archive', 'present') + worker = self._create_worker() + copies = worker.compute_copies(set(worker.objstorages), obj_id) + self.assertEqual(copies['present'], {'banco'}) + self.assertEqual(copies['missing'], {'uffizi'}) + def _get_backups(self, present, missing): """ Return a list of the pair src/dest from the present and missing """ worker = self._create_worker() return list(worker.choose_backup_servers(present, missing)) @istest def choose_backup_servers(self): self.assertEqual(len(self._get_backups(['uffizi', 'banco'], [])), 0) self.assertEqual(len(self._get_backups(['uffizi'], ['banco'])), 1) # Even with more possible destinations, do not take more than the # retention_policy require self.assertEqual( len(self._get_backups(['uffizi'], ['banco', 's3'])), 1 ) - # This cannot be tested with ArchiverWithRetentionPolicyDirector - # (it reads from archiver db) - # @istest - # def archive_missing_content__without_row_entry_in_archive_db(self): - # """ Run archiver on a missing content should archive it. - # """ - # obj_data = b'archive_missing_content_without_row_entry_in_archive_db' - # obj_id = self._add_content('uffizi', obj_data) - # # One entry in archiver db but no status about its whereabouts - # # Content is actually missing on banco but present on uffizi - # try: - # self.dest_storage.get(obj_id) - # except ObjNotFoundError: - # pass - # else: - # self.fail('Content should not be present before archival') - # self.archiver.run() - # # now the content should be present on remote objstorage - # remote_data = self.dest_storage.get(obj_id) - # self.assertEquals(obj_data, remote_data) + +class TestArchiverStorageStub(unittest.TestCase): + def setUp(self): + self.src_root = tempfile.mkdtemp(prefix='swh.storage.archiver.local') + self.dest_root = tempfile.mkdtemp(prefix='swh.storage.archiver.remote') + self.log_root = tempfile.mkdtemp(prefix='swh.storage.archiver.log') + + src_config = { + 'cls': 'pathslicing', + 'args': { + 'root': self.src_root, + 'slicing': '0:2/2:4/4:6' + } + } + self.src_storage = get_objstorage(**src_config) + + # Create destination storage + dest_config = { + 'cls': 'pathslicing', + 'args': { + 'root': self.dest_root, + 'slicing': '0:2/2:4/4:6' + } + } + self.dest_storage = get_objstorage(**dest_config) + + self.config = { + 'cls': 'stub', + 'args': { + 'archives': { + 'present_archive': 'http://uffizi:5003', + 'missing_archive': 'http://banco:5003', + }, + 'present': ['present_archive'], + 'missing': ['missing_archive'], + 'logfile_base': os.path.join(self.log_root, 'log_'), + } + } + + # Generated with: + # + # id_length = 20 + # random.getrandbits(8 * id_length).to_bytes(id_length, 'big') + # + self.content_ids = [ + b"\xc7\xc9\x8dlk!'k\x81+\xa9\xc1lg\xc2\xcbG\r`f", + b'S\x03:\xc9\xd0\xa7\xf2\xcc\x8f\x86v$0\x8ccq\\\xe3\xec\x9d', + b'\xca\x1a\x84\xcbi\xd6co\x14\x08\\8\x9e\xc8\xc2|\xd0XS\x83', + b'O\xa9\xce(\xb4\x95_&\xd2\xa2e\x0c\x87\x8fw\xd0\xdfHL\xb2', + b'\xaaa \xd1vB\x15\xbd\xf2\xf0 \xd7\xc4_\xf4\xb9\x8a;\xb4\xcc', + ] + + self.archiver_storage = get_archiver_storage(**self.config) + super().setUp() + + def tearDown(self): + shutil.rmtree(self.src_root) + shutil.rmtree(self.dest_root) + shutil.rmtree(self.log_root) + super().tearDown() + + @istest + def archive_ls(self): + self.assertCountEqual( + self.archiver_storage.archive_ls(), + self.config['args']['archives'].items() + ) + + @istest + def content_archive_get(self): + for content_id in self.content_ids: + self.assertEqual( + self.archiver_storage.content_archive_get(content_id), + (content_id, set(self.config['args']['present']), {}), + ) + + @istest + def content_archive_get_copies(self): + self.assertCountEqual( + self.archiver_storage.content_archive_get_copies(), + [], + ) + + @istest + def content_archive_get_unarchived_copies(self): + retention_policy = 2 + self.assertCountEqual( + self.archiver_storage.content_archive_get_unarchived_copies( + retention_policy), + [], + ) + + @istest + def content_archive_get_missing(self): + self.assertCountEqual( + self.archiver_storage.content_archive_get_missing( + self.content_ids, + 'missing_archive' + ), + self.content_ids, + ) + + self.assertCountEqual( + self.archiver_storage.content_archive_get_missing( + self.content_ids, + 'present_archive' + ), + [], + ) + + with self.assertRaises(ValueError): + list(self.archiver_storage.content_archive_get_missing( + self.content_ids, + 'unknown_archive' + )) + + @istest + def content_archive_get_unknown(self): + self.assertCountEqual( + self.archiver_storage.content_archive_get_unknown( + self.content_ids, + ), + [], + ) + + @istest + def content_archive_update(self): + for content_id in self.content_ids: + self.archiver_storage.content_archive_update( + content_id, 'present_archive', 'present') + self.archiver_storage.content_archive_update( + content_id, 'missing_archive', 'present') + + self.archiver_storage.close_logfile() + + # Make sure we created a logfile + files = glob.glob('%s*' % self.config['args']['logfile_base']) + self.assertEqual(len(files), 1) + + # make sure the logfile contains all our lines + lines = open(files[0]).readlines() + self.assertEqual(len(lines), 2 * len(self.content_ids)) diff --git a/swh/storage/tests/test_storage.py b/swh/storage/tests/test_storage.py index 7ec0aca20..b39db625b 100644 --- a/swh/storage/tests/test_storage.py +++ b/swh/storage/tests/test_storage.py @@ -1,3042 +1,3099 @@ -# Copyright (C) 2015-2016 The Software Heritage developers +# Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import datetime import os import psycopg2 import shutil import tempfile import unittest from uuid import UUID from unittest.mock import patch from nose.tools import istest from nose.plugins.attrib import attr from swh.core.tests.db_testing import DbTestFixture from swh.core.hashutil import hex_to_hash from swh.model import identifiers from swh.storage import get_storage from swh.storage.db import cursor_to_bytes TEST_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(TEST_DIR, '../../../../swh-storage-testdata') @attr('db') class AbstractTestStorage(DbTestFixture): """Base class for Storage testing. This class is used as-is to test local storage (see TestStorage below) and remote storage (see TestRemoteStorage in test_remote_storage.py. We need to have the two classes inherit from this base class separately to avoid nosetests running the tests from the base class twice. """ TEST_DB_DUMP = os.path.join(TEST_DATA_DIR, 'dumps/swh.dump') def setUp(self): super().setUp() self.maxDiff = None self.objroot = tempfile.mkdtemp() storage_conf = { 'cls': 'local', 'args': { 'db': self.conn, 'objstorage': { 'cls': 'pathslicing', 'args': { 'root': self.objroot, 'slicing': '0:2/2:4/4:6', }, }, }, } self.storage = get_storage(**storage_conf) self.cont = { 'data': b'42\n', 'length': 3, 'sha1': hex_to_hash( '34973274ccef6ab4dfaaf86599792fa9c3fe4689'), 'sha1_git': hex_to_hash( 'd81cc0710eb6cf9efd5b920a8453e1e07157b6cd'), 'sha256': hex_to_hash( '673650f936cb3b0a2f93ce09d81be107' '48b1b203c19e8176b4eefc1964a0cf3a'), 'status': 'visible', } self.cont2 = { 'data': b'4242\n', 'length': 5, 'sha1': hex_to_hash( '61c2b3a30496d329e21af70dd2d7e097046d07b7'), 'sha1_git': hex_to_hash( '36fade77193cb6d2bd826161a0979d64c28ab4fa'), 'sha256': hex_to_hash( '859f0b154fdb2d630f45e1ecae4a8629' '15435e663248bb8461d914696fc047cd'), 'status': 'visible', } self.cont3 = { 'data': b'424242\n', 'length': 7, 'sha1': hex_to_hash( '3e21cc4942a4234c9e5edd8a9cacd1670fe59f13'), 'sha1_git': hex_to_hash( 'c932c7649c6dfa4b82327d121215116909eb3bea'), 'sha256': hex_to_hash( '92fb72daf8c6818288a35137b72155f5' '07e5de8d892712ab96277aaed8cf8a36'), 'status': 'visible', } self.missing_cont = { 'data': b'missing\n', 'length': 8, 'sha1': hex_to_hash( 'f9c24e2abb82063a3ba2c44efd2d3c797f28ac90'), 'sha1_git': hex_to_hash( '33e45d56f88993aae6a0198013efa80716fd8919'), 'sha256': hex_to_hash( '6bbd052ab054ef222c1c87be60cd191a' 'ddedd24cc882d1f5f7f7be61dc61bb3a'), 'status': 'absent', } self.skipped_cont = { 'length': 1024 * 1024 * 200, 'sha1_git': hex_to_hash( '33e45d56f88993aae6a0198013efa80716fd8920'), 'reason': 'Content too long', 'status': 'absent', } self.skipped_cont2 = { 'length': 1024 * 1024 * 300, 'sha1_git': hex_to_hash( '33e45d56f88993aae6a0198013efa80716fd8921'), 'reason': 'Content too long', 'status': 'absent', } self.dir = { 'id': b'4\x013\x422\x531\x000\xf51\xe62\xa73\xff7\xc3\xa90', 'entries': [ { 'name': b'foo', 'type': 'file', 'target': self.cont['sha1_git'], 'perms': 0o644, }, { 'name': b'bar\xc3', 'type': 'dir', 'target': b'12345678901234567890', 'perms': 0o2000, }, ], } self.dir2 = { 'id': b'4\x013\x422\x531\x000\xf51\xe62\xa73\xff7\xc3\xa95', 'entries': [ { 'name': b'oof', 'type': 'file', 'target': self.cont2['sha1_git'], 'perms': 0o644, } ], } self.dir3 = { 'id': hex_to_hash('33e45d56f88993aae6a0198013efa80716fd8921'), 'entries': [ { 'name': b'foo', 'type': 'file', 'target': self.cont['sha1_git'], 'perms': 0o644, }, { 'name': b'bar', 'type': 'dir', 'target': b'12345678901234560000', 'perms': 0o2000, }, { 'name': b'hello', 'type': 'file', 'target': b'12345678901234567890', 'perms': 0o644, }, ], } self.minus_offset = datetime.timezone(datetime.timedelta(minutes=-120)) self.plus_offset = datetime.timezone(datetime.timedelta(minutes=120)) self.revision = { 'id': b'56789012345678901234', 'message': b'hello', 'author': { 'name': b'Nicolas Dandrimont', 'email': b'nicolas@example.com', 'fullname': b'Nicolas Dandrimont ', }, 'date': { 'timestamp': 1234567890, 'offset': 120, 'negative_utc': None, }, 'committer': { 'name': b'St\xc3fano Zacchiroli', 'email': b'stefano@example.com', 'fullname': b'St\xc3fano Zacchiroli ' }, 'committer_date': { 'timestamp': 1123456789, 'offset': 0, 'negative_utc': True, }, 'parents': [b'01234567890123456789', b'23434512345123456789'], 'type': 'git', 'directory': self.dir['id'], 'metadata': { 'checksums': { 'sha1': 'tarball-sha1', 'sha256': 'tarball-sha256', }, 'signed-off-by': 'some-dude', 'extra_headers': [ ['gpgsig', b'test123'], ['mergetags', [b'foo\\bar', b'\x22\xaf\x89\x80\x01\x00']], ], }, 'synthetic': True } self.revision2 = { 'id': b'87659012345678904321', 'message': b'hello again', 'author': { 'name': b'Roberto Dicosmo', 'email': b'roberto@example.com', 'fullname': b'Roberto Dicosmo ', }, 'date': { 'timestamp': { 'seconds': 1234567843, 'microseconds': 220000, }, 'offset': -720, 'negative_utc': None, }, 'committer': { 'name': b'tony', 'email': b'ar@dumont.fr', 'fullname': b'tony ', }, 'committer_date': { 'timestamp': 1123456789, 'offset': 0, 'negative_utc': False, }, 'parents': [b'01234567890123456789'], 'type': 'git', 'directory': self.dir2['id'], 'metadata': None, 'synthetic': False } self.revision3 = { 'id': hex_to_hash('7026b7c1a2af56521e951c01ed20f255fa054238'), 'message': b'a simple revision with no parents this time', 'author': { 'name': b'Roberto Dicosmo', 'email': b'roberto@example.com', 'fullname': b'Roberto Dicosmo ', }, 'date': { 'timestamp': { 'seconds': 1234567843, 'microseconds': 220000, }, 'offset': -720, 'negative_utc': None, }, 'committer': { 'name': b'tony', 'email': b'ar@dumont.fr', 'fullname': b'tony ', }, 'committer_date': { 'timestamp': 1127351742, 'offset': 0, 'negative_utc': False, }, 'parents': [], 'type': 'git', 'directory': self.dir2['id'], 'metadata': None, 'synthetic': True } self.revision4 = { 'id': hex_to_hash('368a48fe15b7db2383775f97c6b247011b3f14f4'), 'message': b'parent of self.revision2', 'author': { 'name': b'me', 'email': b'me@soft.heri', 'fullname': b'me ', }, 'date': { 'timestamp': { 'seconds': 1244567843, 'microseconds': 220000, }, 'offset': -720, 'negative_utc': None, }, 'committer': { 'name': b'committer-dude', 'email': b'committer@dude.com', 'fullname': b'committer-dude ', }, 'committer_date': { 'timestamp': { 'seconds': 1244567843, 'microseconds': 220000, }, 'offset': -720, 'negative_utc': None, }, 'parents': [self.revision3['id']], 'type': 'git', 'directory': self.dir['id'], 'metadata': None, 'synthetic': False } self.origin = { 'url': 'file:///dev/null', 'type': 'git', } self.origin2 = { 'url': 'file:///dev/zero', 'type': 'git', } self.date_visit1 = datetime.datetime(2015, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc) self.occurrence = { 'branch': b'master', 'target': b'67890123456789012345', 'target_type': 'revision', } self.date_visit2 = datetime.datetime(2015, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc) self.occurrence2 = { 'branch': b'master', 'target': self.revision2['id'], 'target_type': 'revision', } self.date_visit3 = datetime.datetime(2015, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc) # template occurrence to be filled in test (cf. revision_log_by) self.occurrence3 = { 'branch': b'master', 'target_type': 'revision', } self.release = { 'id': b'87659012345678901234', 'name': b'v0.0.1', 'author': { 'name': b'olasd', 'email': b'nic@olasd.fr', 'fullname': b'olasd ', }, 'date': { 'timestamp': 1234567890, 'offset': 42, 'negative_utc': None, }, 'target': b'43210987654321098765', 'target_type': 'revision', 'message': b'synthetic release', 'synthetic': True, } self.release2 = { 'id': b'56789012348765901234', 'name': b'v0.0.2', 'author': { 'name': b'tony', 'email': b'ar@dumont.fr', 'fullname': b'tony ', }, 'date': { 'timestamp': 1634366813, 'offset': -120, 'negative_utc': None, }, 'target': b'432109\xa9765432\xc309\x00765', 'target_type': 'revision', 'message': b'v0.0.2\nMisc performance improvments + bug fixes', 'synthetic': False } self.release3 = { 'id': b'87659012345678904321', 'name': b'v0.0.2', 'author': { 'name': b'tony', 'email': b'tony@ardumont.fr', 'fullname': b'tony ', }, 'date': { 'timestamp': 1634336813, 'offset': 0, 'negative_utc': False, }, 'target': self.revision2['id'], 'target_type': 'revision', 'message': b'yet another synthetic release', 'synthetic': True, } self.fetch_history_date = datetime.datetime( 2015, 1, 2, 21, 0, 0, tzinfo=datetime.timezone.utc) self.fetch_history_end = datetime.datetime( 2015, 1, 2, 23, 0, 0, tzinfo=datetime.timezone.utc) self.fetch_history_duration = (self.fetch_history_end - self.fetch_history_date) self.fetch_history_data = { 'status': True, 'result': {'foo': 'bar'}, 'stdout': 'blabla', 'stderr': 'blablabla', } self.entity1 = { 'uuid': UUID('f96a7ec1-0058-4920-90cc-7327e4b5a4bf'), # GitHub users 'parent': UUID('ad6df473-c1d2-4f40-bc58-2b091d4a750e'), 'name': 'github:user:olasd', 'type': 'person', 'description': 'Nicolas Dandrimont', 'homepage': 'http://example.com', 'active': True, 'generated': True, 'lister_metadata': { # swh.lister.github 'lister': '34bd6b1b-463f-43e5-a697-785107f598e4', 'id': 12877, 'type': 'user', 'last_activity': '2015-11-03', }, 'metadata': None, 'validity': [ datetime.datetime(2015, 11, 3, 11, 0, 0, tzinfo=datetime.timezone.utc), ] } self.entity1_query = { 'lister': '34bd6b1b-463f-43e5-a697-785107f598e4', 'id': 12877, 'type': 'user', } self.entity2 = { 'uuid': UUID('3903d075-32d6-46d4-9e29-0aef3612c4eb'), # GitHub users 'parent': UUID('ad6df473-c1d2-4f40-bc58-2b091d4a750e'), 'name': 'github:user:zacchiro', 'type': 'person', 'description': 'Stefano Zacchiroli', 'homepage': 'http://example.com', 'active': True, 'generated': True, 'lister_metadata': { # swh.lister.github 'lister': '34bd6b1b-463f-43e5-a697-785107f598e4', 'id': 216766, 'type': 'user', 'last_activity': '2015-11-03', }, 'metadata': None, 'validity': [ datetime.datetime(2015, 11, 3, 11, 0, 0, tzinfo=datetime.timezone.utc), ] } self.entity3 = { 'uuid': UUID('111df473-c1d2-4f40-bc58-2b091d4a7111'), # GitHub users 'parent': UUID('222df473-c1d2-4f40-bc58-2b091d4a7222'), 'name': 'github:user:ardumont', 'type': 'person', 'description': 'Antoine R. Dumont a.k.a tony', 'homepage': 'https://ardumont.github.io', 'active': True, 'generated': True, 'lister_metadata': { 'lister': '34bd6b1b-463f-43e5-a697-785107f598e4', 'id': 666, 'type': 'user', 'last_activity': '2016-01-15', }, 'metadata': None, 'validity': [ datetime.datetime(2015, 11, 3, 11, 0, 0, tzinfo=datetime.timezone.utc), ] } self.entity4 = { 'uuid': UUID('222df473-c1d2-4f40-bc58-2b091d4a7222'), # GitHub users 'parent': None, 'name': 'github:user:ToNyX', 'type': 'person', 'description': 'ToNyX', 'homepage': 'https://ToNyX.github.io', 'active': True, 'generated': True, 'lister_metadata': { 'lister': '34bd6b1b-463f-43e5-a697-785107f598e4', 'id': 999, 'type': 'user', 'last_activity': '2015-12-24', }, 'metadata': None, 'validity': [ datetime.datetime(2015, 11, 3, 11, 0, 0, tzinfo=datetime.timezone.utc), ] } self.entity2_query = { 'lister_metadata': { 'lister': '34bd6b1b-463f-43e5-a697-785107f598e4', 'id': 216766, 'type': 'user', }, } def tearDown(self): shutil.rmtree(self.objroot) self.cursor.execute("""SELECT table_name FROM information_schema.tables WHERE table_schema = %s""", ('public',)) tables = set(table for (table,) in self.cursor.fetchall()) tables -= {'dbversion', 'entity', 'entity_history', 'listable_entity', 'fossology_license', 'indexer_configuration'} for table in tables: self.cursor.execute('truncate table %s cascade' % table) self.cursor.execute('delete from entity where generated=true') self.cursor.execute('delete from entity_history where generated=true') self.conn.commit() super().tearDown() @staticmethod def normalize_entity(entity): entity = copy.deepcopy(entity) for key in ('date', 'committer_date'): if key in entity: entity[key] = identifiers.normalize_timestamp(entity[key]) return entity @istest def check_config(self): self.assertTrue(self.storage.check_config(check_write=True)) self.assertTrue(self.storage.check_config(check_write=False)) @istest def content_add(self): cont = self.cont self.storage.content_add([cont]) if hasattr(self.storage, 'objstorage'): self.assertIn(cont['sha1'], self.storage.objstorage) self.cursor.execute('SELECT sha1, sha1_git, sha256, length, status' ' FROM content WHERE sha1 = %s', (cont['sha1'],)) datum = self.cursor.fetchone() self.assertEqual( (datum[0].tobytes(), datum[1].tobytes(), datum[2].tobytes(), datum[3], datum[4]), (cont['sha1'], cont['sha1_git'], cont['sha256'], cont['length'], 'visible')) @istest def content_add_collision(self): cont1 = self.cont # create (corrupted) content with same sha1{,_git} but != sha256 cont1b = cont1.copy() sha256_array = bytearray(cont1b['sha256']) sha256_array[0] += 1 cont1b['sha256'] = bytes(sha256_array) with self.assertRaises(psycopg2.IntegrityError): self.storage.content_add([cont1, cont1b]) @istest def skipped_content_add(self): cont = self.skipped_cont cont2 = self.skipped_cont2 self.storage.content_add([cont]) self.storage.content_add([cont2]) self.cursor.execute('SELECT sha1, sha1_git, sha256, length, status,' 'reason FROM skipped_content ORDER BY sha1_git') datum = self.cursor.fetchone() self.assertEqual( (datum[0], datum[1].tobytes(), datum[2], datum[3], datum[4], datum[5]), (None, cont['sha1_git'], None, cont['length'], 'absent', 'Content too long')) datum2 = self.cursor.fetchone() self.assertEqual( (datum2[0], datum2[1].tobytes(), datum2[2], datum2[3], datum2[4], datum2[5]), (None, cont2['sha1_git'], None, cont2['length'], 'absent', 'Content too long')) @istest def content_missing(self): cont2 = self.cont2 missing_cont = self.missing_cont self.storage.content_add([cont2]) gen = self.storage.content_missing([cont2, missing_cont]) self.assertEqual(list(gen), [missing_cont['sha1']]) @istest def content_missing_per_sha1(self): # given cont2 = self.cont2 missing_cont = self.missing_cont self.storage.content_add([cont2]) # when gen = self.storage.content_missing_per_sha1([cont2['sha1'], missing_cont['sha1']]) # then self.assertEqual(list(gen), [missing_cont['sha1']]) @istest def content_get_metadata(self): cont1 = self.cont.copy() cont2 = self.cont2.copy() self.storage.content_add([cont1, cont2]) gen = self.storage.content_get_metadata([cont1['sha1'], cont2['sha1']]) # we only retrieve the metadata cont1.pop('data') cont2.pop('data') self.assertEqual(list(gen), [cont1, cont2]) @istest def content_get_metadata_missing_sha1(self): cont1 = self.cont.copy() cont2 = self.cont2.copy() missing_cont = self.missing_cont.copy() self.storage.content_add([cont1, cont2]) gen = self.storage.content_get_metadata([missing_cont['sha1']]) # All the metadata keys are None missing_cont.pop('data') for key in list(missing_cont): if key != 'sha1': missing_cont[key] = None self.assertEqual(list(gen), [missing_cont]) @istest def directory_get(self): # given init_missing = list(self.storage.directory_missing([self.dir['id']])) self.assertEqual([self.dir['id']], init_missing) self.storage.directory_add([self.dir]) # when actual_dirs = list(self.storage.directory_get([self.dir['id']])) self.assertEqual(len(actual_dirs), 1) dir0 = actual_dirs[0] self.assertEqual(dir0['id'], self.dir['id']) # ids are generated so non deterministic value self.assertEqual(len(dir0['file_entries']), 1) self.assertEqual(len(dir0['dir_entries']), 1) self.assertIsNone(dir0['rev_entries']) after_missing = list(self.storage.directory_missing([self.dir['id']])) self.assertEqual([], after_missing) @istest def directory_add(self): init_missing = list(self.storage.directory_missing([self.dir['id']])) self.assertEqual([self.dir['id']], init_missing) self.storage.directory_add([self.dir]) stored_data = list(self.storage.directory_ls(self.dir['id'])) data_to_store = [{ 'dir_id': self.dir['id'], 'type': ent['type'], 'target': ent['target'], 'name': ent['name'], 'perms': ent['perms'], 'status': None, 'sha1': None, 'sha1_git': None, 'sha256': None, } for ent in sorted(self.dir['entries'], key=lambda ent: ent['name']) ] self.assertEqual(data_to_store, stored_data) after_missing = list(self.storage.directory_missing([self.dir['id']])) self.assertEqual([], after_missing) @istest def directory_entry_get_by_path(self): # given init_missing = list(self.storage.directory_missing([self.dir3['id']])) self.assertEqual([self.dir3['id']], init_missing) self.storage.directory_add([self.dir3]) expected_entries = [ { 'dir_id': self.dir3['id'], 'name': b'foo', 'type': 'file', 'target': self.cont['sha1_git'], 'sha1': None, 'sha1_git': None, 'sha256': None, 'status': None, 'perms': 0o644, }, { 'dir_id': self.dir3['id'], 'name': b'bar', 'type': 'dir', 'target': b'12345678901234560000', 'sha1': None, 'sha1_git': None, 'sha256': None, 'status': None, 'perms': 0o2000, }, { 'dir_id': self.dir3['id'], 'name': b'hello', 'type': 'file', 'target': b'12345678901234567890', 'sha1': None, 'sha1_git': None, 'sha256': None, 'status': None, 'perms': 0o644, }, ] # when (all must be found here) for entry, expected_entry in zip(self.dir3['entries'], expected_entries): actual_entry = self.storage.directory_entry_get_by_path( self.dir3['id'], [entry['name']]) self.assertEqual(actual_entry, expected_entry) # when (nothing should be found here since self.dir is not persisted.) for entry in self.dir['entries']: actual_entry = self.storage.directory_entry_get_by_path( self.dir['id'], [entry['name']]) self.assertIsNone(actual_entry) @istest def revision_add(self): init_missing = self.storage.revision_missing([self.revision['id']]) self.assertEqual([self.revision['id']], list(init_missing)) self.storage.revision_add([self.revision]) end_missing = self.storage.revision_missing([self.revision['id']]) self.assertEqual([], list(end_missing)) def cache_content_revision_objects(self): self.storage.content_add([self.cont, self.cont2, self.cont3]) directory = { 'id': b'4\x013\x422\x531\x000\xf51\xe62\xa73\xff7\xc3\xa90', 'entries': [ { 'name': b'bar', 'type': 'file', 'target': self.cont2['sha1_git'], 'perms': 0o644, }, { 'name': b'foo', 'type': 'file', 'target': self.cont['sha1_git'], 'perms': 0o644, }, { 'name': b'bar\xc3', 'type': 'dir', 'target': b'12345678901234567890', 'perms': 0o2000, }, ], } directory2 = copy.deepcopy(directory) directory2['id'] = (directory2['id'][:-1] + bytes([(directory2['id'][-1] + 1) % 256])) directory2['entries'][1] = { 'name': b'foo', 'type': 'file', 'target': self.cont3['sha1_git'], 'perms': 0o644, } self.storage.directory_add([directory, directory2]) revision = self.revision.copy() revision['directory'] = directory['id'] revision2 = copy.deepcopy(revision) revision2['parents'] = [revision['id']] revision2['directory'] = directory2['id'] revision2['id'] = (revision2['id'][:-1] + bytes([(revision2['id'][-1] + 1) % 256])) self.storage.revision_add([revision, revision2]) return (directory, directory2, revision, revision2) @istest def cache_content_revision_add(self): # Create a real arborescence tree (contents + directory) and a # revision targeting that directory. # Assert the cache is empty for that revision # Then create that revision # Trigger the cache population for that revision # Assert the cache now contains information for that revision # Trigger again the cache population for that revision # Assert the cache is not modified # given () (directory, directory2, revision, revision2) = self.cache_content_revision_objects() # assert nothing in cache yet count_query = '''select count(*) from cache_content_revision''' self.cursor.execute(count_query) ret = self.cursor.fetchone() self.assertEqual(ret, (0, )) # when, triggered the first time, we cache the revision self.storage.cache_content_revision_add([revision['id']]) # the second time, we do nothing as this is already done self.storage.cache_content_revision_add([revision['id']]) # then self.cursor.execute(count_query) ret = self.cursor.fetchone() # only 2 contents exists for that revision (the second call to # revision_cache discards as the revision is already cached) self.assertEqual(ret, (2, )) self.cursor.execute('select * from cache_content_revision') ret = self.cursor.fetchall() expected_cache_entries = [ (directory['entries'][0]['target'], False, [[revision['id'], directory['entries'][0]['name']]]), (directory['entries'][1]['target'], False, [[revision['id'], directory['entries'][1]['name']]]) ] for i, expected_entry in enumerate(expected_cache_entries): ret_entry = (ret[i][0].tobytes(), ret[i][1], [[ret[i][2][0][0].tobytes(), ret[i][2][0][1].tobytes()]]) self.assertEquals(ret_entry, expected_entry) @istest def cache_content_revision_add_twice(self): # given () (directory, directory2, revision, revision2) = self.cache_content_revision_objects() # assert nothing in cache yet count_query = '''select count(*) from cache_content_revision''' self.cursor.execute(count_query) ret = self.cursor.fetchone() self.assertEqual(ret, (0, )) # when, triggered the first time, we cache the revision self.storage.cache_content_revision_add([revision['id']]) # the second time, we do nothing as this is already done self.storage.cache_content_revision_add([revision2['id']]) # then self.cursor.execute('select * from cache_content_revision') cache_entries = { content.tobytes(): [[rev.tobytes(), path.tobytes()] for rev, path in rev_paths] for content, blacklisted, rev_paths in self.cursor.fetchall() } self.assertEquals(len(cache_entries), 3) self.assertEquals(len(cache_entries[self.cont['sha1_git']]), 1) self.assertEquals(len(cache_entries[self.cont2['sha1_git']]), 2) self.assertEquals(len(cache_entries[self.cont3['sha1_git']]), 1) @istest def cache_content_get_all(self): # given (directory, directory2, revision, revision2) = self.cache_content_revision_objects() # assert nothing in cache yet test_query = '''select sha1, sha1_git, sha256, ccr.revision_paths from cache_content_revision ccr inner join content c on c.sha1_git=ccr.content''' self.storage.cache_content_revision_add([revision['id']]) self.cursor.execute(test_query, (revision['id'],)) ret = list(cursor_to_bytes(self.cursor)) self.assertEqual(len(ret), 2) expected_contents = [] for entry in ret: expected_contents.append(dict( zip(['sha1', 'sha1_git', 'sha256', 'revision_paths'], entry))) # 1. default filters gives everything actual_cache_contents = list(self.storage.cache_content_get_all()) self.assertEquals(actual_cache_contents, expected_contents) @istest def cache_content_get(self): # given (directory, directory2, revision, revision2) = self.cache_content_revision_objects() # assert nothing in cache yet test_query = '''select c.sha1, c.sha1_git, c.sha256, ccr.revision_paths from cache_content_revision ccr inner join content c on c.sha1_git=ccr.content where ccr.content=%s''' self.storage.cache_content_revision_add([revision['id']]) self.cursor.execute(test_query, (self.cont2['sha1_git'],)) ret = list(cursor_to_bytes(self.cursor))[0] self.assertIsNotNone(ret) expected_content = dict( zip(['sha1', 'sha1_git', 'sha256', 'revision_paths'], ret)) # when actual_cache_content = self.storage.cache_content_get(self.cont2) # then self.assertEquals(actual_cache_content, expected_content) @istest def revision_log(self): # given # self.revision4 -is-child-of-> self.revision3 self.storage.revision_add([self.revision3, self.revision4]) # when actual_results = list(self.storage.revision_log( [self.revision4['id']])) # hack: ids generated for actual_result in actual_results: del actual_result['author']['id'] del actual_result['committer']['id'] self.assertEqual(len(actual_results), 2) # rev4 -child-> rev3 self.assertEquals(actual_results[0], self.normalize_entity(self.revision4)) self.assertEquals(actual_results[1], self.normalize_entity(self.revision3)) @istest def revision_log_with_limit(self): # given # self.revision4 -is-child-of-> self.revision3 self.storage.revision_add([self.revision3, self.revision4]) actual_results = list(self.storage.revision_log( [self.revision4['id']], 1)) # hack: ids generated for actual_result in actual_results: del actual_result['author']['id'] del actual_result['committer']['id'] self.assertEqual(len(actual_results), 1) self.assertEquals(actual_results[0], self.revision4) @istest def revision_log_by(self): # given origin_id = self.storage.origin_add_one(self.origin2) self.storage.revision_add([self.revision3, self.revision4]) # occurrence3 targets 'revision4' # with branch 'master' and origin origin_id occurrence3 = self.occurrence3.copy() date_visit1 = self.date_visit3 origin_visit1 = self.storage.origin_visit_add(origin_id, date_visit1) occurrence3.update({ 'origin': origin_id, 'target': self.revision4['id'], 'visit': origin_visit1['visit'], }) self.storage.occurrence_add([occurrence3]) # self.revision4 -is-child-of-> self.revision3 # when actual_results = list(self.storage.revision_log_by( origin_id, branch_name=occurrence3['branch'], timestamp=date_visit1)) # hack: ids generated for actual_result in actual_results: del actual_result['author']['id'] del actual_result['committer']['id'] self.assertEqual(len(actual_results), 2) self.assertEquals(actual_results[0], self.normalize_entity(self.revision4)) self.assertEquals(actual_results[1], self.normalize_entity(self.revision3)) # when - 2 actual_results = list(self.storage.revision_log_by( origin_id, branch_name=None, timestamp=None, limit=1)) # then for actual_result in actual_results: del actual_result['author']['id'] del actual_result['committer']['id'] self.assertEqual(len(actual_results), 1) self.assertEquals(actual_results[0], self.revision4) # when - 3 (revision not found) actual_res = list(self.storage.revision_log_by( origin_id, branch_name='inexistant-branch', timestamp=None)) self.assertEquals(actual_res, []) @staticmethod def _short_revision(revision): return [revision['id'], revision['parents']] @istest def revision_shortlog(self): # given # self.revision4 -is-child-of-> self.revision3 self.storage.revision_add([self.revision3, self.revision4]) # when actual_results = list(self.storage.revision_shortlog( [self.revision4['id']])) self.assertEqual(len(actual_results), 2) # rev4 -child-> rev3 self.assertEquals(list(actual_results[0]), self._short_revision(self.revision4)) self.assertEquals(list(actual_results[1]), self._short_revision(self.revision3)) @istest def revision_shortlog_with_limit(self): # given # self.revision4 -is-child-of-> self.revision3 self.storage.revision_add([self.revision3, self.revision4]) actual_results = list(self.storage.revision_shortlog( [self.revision4['id']], 1)) self.assertEqual(len(actual_results), 1) self.assertEquals(list(actual_results[0]), self._short_revision(self.revision4)) @istest def revision_get(self): self.storage.revision_add([self.revision]) actual_revisions = list(self.storage.revision_get( [self.revision['id'], self.revision2['id']])) # when del actual_revisions[0]['author']['id'] # hack: ids are generated del actual_revisions[0]['committer']['id'] self.assertEqual(len(actual_revisions), 2) self.assertEqual(actual_revisions[0], self.normalize_entity(self.revision)) self.assertIsNone(actual_revisions[1]) @istest def revision_get_no_parents(self): self.storage.revision_add([self.revision3]) get = list(self.storage.revision_get([self.revision3['id']])) self.assertEqual(len(get), 1) self.assertEqual(get[0]['parents'], []) # no parents on this one @istest def revision_get_by(self): # given self.storage.content_add([self.cont2]) self.storage.directory_add([self.dir2]) # point to self.cont self.storage.revision_add([self.revision2]) # points to self.dir origin_id = self.storage.origin_add_one(self.origin2) # occurrence2 points to 'revision2' with branch 'master', we # need to point to the right origin occurrence2 = self.occurrence2.copy() date_visit1 = self.date_visit2 origin_visit1 = self.storage.origin_visit_add(origin_id, date_visit1) occurrence2.update({ 'origin': origin_id, 'visit': origin_visit1['visit'], }) self.storage.occurrence_add([occurrence2]) # we want only revision 2 expected_revisions = list(self.storage.revision_get( [self.revision2['id']])) # when actual_results = list(self.storage.revision_get_by( origin_id, occurrence2['branch'], None)) self.assertEqual(actual_results[0], expected_revisions[0]) # when (with no branch filtering, it's still ok) actual_results = list(self.storage.revision_get_by( origin_id, None, None)) self.assertEqual(actual_results[0], expected_revisions[0]) @istest def revision_get_by_multiple_occurrence(self): # 2 occurrences pointing to 2 different revisions # each occurence have 1 day delta # the api must return the revision whose occurrence is the nearest. # given self.storage.content_add([self.cont2]) self.storage.directory_add([self.dir2]) self.storage.revision_add([self.revision2, self.revision3]) origin_id = self.storage.origin_add_one(self.origin2) # occurrence2 points to 'revision2' with branch 'master', we # need to point to the right origin date_visit1 = self.date_visit2 origin_visit1 = self.storage.origin_visit_add(origin_id, date_visit1) occurrence2 = self.occurrence2.copy() occurrence2.update({ 'origin': origin_id, 'visit': origin_visit1['visit'] }) dt = datetime.timedelta(days=1) date_visit2 = date_visit1 + dt origin_visit2 = self.storage.origin_visit_add(origin_id, date_visit2) occurrence3 = self.occurrence2.copy() occurrence3.update({ 'origin': origin_id, 'visit': origin_visit2['visit'], 'target': self.revision3['id'], }) # 2 occurrences on same revision with lower validity date with 1 day # delta self.storage.occurrence_add([occurrence2]) self.storage.occurrence_add([occurrence3]) # when actual_results0 = list(self.storage.revision_get_by( origin_id, occurrence2['branch'], date_visit1)) # hack: ids are generated del actual_results0[0]['author']['id'] del actual_results0[0]['committer']['id'] self.assertEquals(len(actual_results0), 1) self.assertEqual(actual_results0, [self.normalize_entity(self.revision2)]) # when actual_results1 = list(self.storage.revision_get_by( origin_id, occurrence2['branch'], date_visit1 + dt/3)) # closer to first visit # hack: ids are generated del actual_results1[0]['author']['id'] del actual_results1[0]['committer']['id'] self.assertEquals(len(actual_results1), 1) self.assertEqual(actual_results1, [self.normalize_entity(self.revision2)]) # when actual_results2 = list(self.storage.revision_get_by( origin_id, occurrence2['branch'], date_visit1 + 2*dt/3)) # closer to second visit del actual_results2[0]['author']['id'] del actual_results2[0]['committer']['id'] self.assertEquals(len(actual_results2), 1) self.assertEqual(actual_results2, [self.normalize_entity(self.revision3)]) # when actual_results3 = list(self.storage.revision_get_by( origin_id, occurrence3['branch'], date_visit2)) # hack: ids are generated del actual_results3[0]['author']['id'] del actual_results3[0]['committer']['id'] self.assertEquals(len(actual_results3), 1) self.assertEqual(actual_results3, [self.normalize_entity(self.revision3)]) # when actual_results4 = list(self.storage.revision_get_by( origin_id, None, None)) for actual_result in actual_results4: del actual_result['author']['id'] del actual_result['committer']['id'] self.assertEquals(len(actual_results4), 1) self.assertCountEqual(actual_results4, [self.normalize_entity(self.revision3)]) @istest def release_add(self): init_missing = self.storage.release_missing([self.release['id'], self.release2['id']]) self.assertEqual([self.release['id'], self.release2['id']], list(init_missing)) self.storage.release_add([self.release, self.release2]) end_missing = self.storage.release_missing([self.release['id'], self.release2['id']]) self.assertEqual([], list(end_missing)) @istest def release_get(self): # given self.storage.release_add([self.release, self.release2]) # when actual_releases = list(self.storage.release_get([self.release['id'], self.release2['id']])) # then for actual_release in actual_releases: del actual_release['author']['id'] # hack: ids are generated self.assertEquals([self.normalize_entity(self.release), self.normalize_entity(self.release2)], [actual_releases[0], actual_releases[1]]) @istest def release_get_by(self): # given self.storage.revision_add([self.revision2]) # points to self.dir self.storage.release_add([self.release3]) origin_id = self.storage.origin_add_one(self.origin2) # occurrence2 points to 'revision2' with branch 'master', we # need to point to the right origin origin_visit = self.storage.origin_visit_add(origin_id, self.date_visit2) occurrence2 = self.occurrence2.copy() occurrence2.update({ 'origin': origin_id, 'visit': origin_visit['visit'], }) self.storage.occurrence_add([occurrence2]) # we want only revision 2 expected_releases = list(self.storage.release_get( [self.release3['id']])) # when actual_results = list(self.storage.release_get_by( occurrence2['origin'])) # then self.assertEqual(actual_results[0], expected_releases[0]) @istest def origin_add_one(self): origin0 = self.storage.origin_get(self.origin) self.assertIsNone(origin0) id = self.storage.origin_add_one(self.origin) actual_origin = self.storage.origin_get({'url': self.origin['url'], 'type': self.origin['type']}) self.assertEqual(actual_origin['id'], id) id2 = self.storage.origin_add_one(self.origin) self.assertEqual(id, id2) @istest def origin_add(self): origin0 = self.storage.origin_get(self.origin) self.assertIsNone(origin0) id1, id2 = self.storage.origin_add([self.origin, self.origin2]) actual_origin = self.storage.origin_get({ 'url': self.origin['url'], 'type': self.origin['type'], }) self.assertEqual(actual_origin['id'], id1) actual_origin2 = self.storage.origin_get({ 'url': self.origin2['url'], 'type': self.origin2['type'], }) self.assertEqual(actual_origin2['id'], id2) @istest def origin_add_twice(self): add1 = self.storage.origin_add([self.origin, self.origin2]) add2 = self.storage.origin_add([self.origin, self.origin2]) self.assertEqual(add1, add2) @istest def origin_get(self): self.assertIsNone(self.storage.origin_get(self.origin)) id = self.storage.origin_add_one(self.origin) # lookup per type and url (returns id) actual_origin0 = self.storage.origin_get({'url': self.origin['url'], 'type': self.origin['type']}) self.assertEqual(actual_origin0['id'], id) # lookup per id (returns dict) actual_origin1 = self.storage.origin_get({'id': id}) self.assertEqual(actual_origin1, {'id': id, 'type': self.origin['type'], 'url': self.origin['url'], 'lister': None, 'project': None}) @istest def origin_visit_add(self): # given self.assertIsNone(self.storage.origin_get(self.origin2)) origin_id = self.storage.origin_add_one(self.origin2) self.assertIsNotNone(origin_id) # when origin_visit1 = self.storage.origin_visit_add( origin_id, ts=self.date_visit2) # then self.assertEquals(origin_visit1['origin'], origin_id) self.assertIsNotNone(origin_visit1['visit']) self.assertTrue(origin_visit1['visit'] > 0) actual_origin_visits = list(self.storage.origin_visit_get(origin_id)) self.assertEquals(actual_origin_visits, [{ 'origin': origin_id, 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'status': 'ongoing', 'metadata': None, }]) @istest def origin_visit_update(self): # given origin_id = self.storage.origin_add_one(self.origin2) origin_id2 = self.storage.origin_add_one(self.origin) origin_visit1 = self.storage.origin_visit_add( origin_id, ts=self.date_visit2) origin_visit2 = self.storage.origin_visit_add( origin_id, ts=self.date_visit3) origin_visit3 = self.storage.origin_visit_add( origin_id2, ts=self.date_visit3) # when visit1_metadata = { 'contents': 42, 'directories': 22, } self.storage.origin_visit_update( origin_id, origin_visit1['visit'], status='full', metadata=visit1_metadata) self.storage.origin_visit_update(origin_id2, origin_visit3['visit'], status='partial') # then actual_origin_visits = list(self.storage.origin_visit_get(origin_id)) self.assertEquals(actual_origin_visits, [{ 'origin': origin_visit2['origin'], 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'status': 'full', 'metadata': visit1_metadata, }, { 'origin': origin_visit2['origin'], 'date': self.date_visit3, 'visit': origin_visit2['visit'], 'status': 'ongoing', 'metadata': None, }]) actual_origin_visits_bis = list(self.storage.origin_visit_get( origin_id, limit=1)) self.assertEquals(actual_origin_visits_bis, [{ 'origin': origin_visit2['origin'], 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'status': 'full', 'metadata': visit1_metadata, }]) actual_origin_visits_ter = list(self.storage.origin_visit_get( origin_id, last_visit=origin_visit1['visit'])) self.assertEquals(actual_origin_visits_ter, [{ 'origin': origin_visit2['origin'], 'date': self.date_visit3, 'visit': origin_visit2['visit'], 'status': 'ongoing', 'metadata': None, }]) actual_origin_visits2 = list(self.storage.origin_visit_get(origin_id2)) self.assertEquals(actual_origin_visits2, [{ 'origin': origin_visit3['origin'], 'date': self.date_visit3, 'visit': origin_visit3['visit'], 'status': 'partial', 'metadata': None, }]) @istest def origin_visit_get_by(self): origin_id = self.storage.origin_add_one(self.origin2) origin_id2 = self.storage.origin_add_one(self.origin) origin_visit1 = self.storage.origin_visit_add( origin_id, ts=self.date_visit2) occurrence2 = self.occurrence2.copy() occurrence2.update({ 'origin': origin_id, 'visit': origin_visit1['visit'], }) self.storage.occurrence_add([occurrence2]) # Add some other {origin, visit} entries self.storage.origin_visit_add(origin_id, ts=self.date_visit3) self.storage.origin_visit_add(origin_id2, ts=self.date_visit3) # when visit1_metadata = { 'contents': 42, 'directories': 22, } self.storage.origin_visit_update( origin_id, origin_visit1['visit'], status='full', metadata=visit1_metadata) expected_origin_visit = origin_visit1.copy() expected_origin_visit.update({ 'origin': origin_id, 'visit': origin_visit1['visit'], 'date': self.date_visit2, 'metadata': visit1_metadata, 'status': 'full', 'occurrences': { occurrence2['branch']: { 'target': occurrence2['target'], 'target_type': occurrence2['target_type'], } } }) # when actual_origin_visit1 = self.storage.origin_visit_get_by( origin_visit1['origin'], origin_visit1['visit']) # then self.assertEquals(actual_origin_visit1, expected_origin_visit) @istest def origin_visit_get_by_no_result(self): # No result actual_origin_visit = self.storage.origin_visit_get_by( 10, 999) self.assertIsNone(actual_origin_visit) @istest def occurrence_add(self): occur = self.occurrence.copy() origin_id = self.storage.origin_add_one(self.origin2) date_visit1 = self.date_visit1 origin_visit1 = self.storage.origin_visit_add(origin_id, date_visit1) revision = self.revision.copy() revision['id'] = occur['target'] self.storage.revision_add([revision]) occur.update({ 'origin': origin_id, 'visit': origin_visit1['visit'], }) self.storage.occurrence_add([occur]) test_query = ''' with indiv_occurrences as ( select origin, branch, target, target_type, unnest(visits) as visit from occurrence_history ) select origin, branch, target, target_type, date from indiv_occurrences left join origin_visit using(origin, visit) order by origin, date''' self.cursor.execute(test_query) ret = self.cursor.fetchall() self.assertEqual(len(ret), 1) self.assertEqual( (ret[0][0], ret[0][1].tobytes(), ret[0][2].tobytes(), ret[0][3], ret[0][4]), (occur['origin'], occur['branch'], occur['target'], occur['target_type'], self.date_visit1)) date_visit2 = date_visit1 + datetime.timedelta(hours=10) origin_visit2 = self.storage.origin_visit_add(origin_id, date_visit2) occur2 = occur.copy() occur2.update({ 'visit': origin_visit2['visit'], }) self.storage.occurrence_add([occur2]) self.cursor.execute(test_query) ret = self.cursor.fetchall() self.assertEqual(len(ret), 2) self.assertEqual( (ret[0][0], ret[0][1].tobytes(), ret[0][2].tobytes(), ret[0][3], ret[0][4]), (occur['origin'], occur['branch'], occur['target'], occur['target_type'], date_visit1)) self.assertEqual( (ret[1][0], ret[1][1].tobytes(), ret[1][2].tobytes(), ret[1][3], ret[1][4]), (occur2['origin'], occur2['branch'], occur2['target'], occur2['target_type'], date_visit2)) @istest def occurrence_get(self): # given occur = self.occurrence.copy() origin_id = self.storage.origin_add_one(self.origin2) origin_visit1 = self.storage.origin_visit_add(origin_id, self.date_visit1) revision = self.revision.copy() revision['id'] = occur['target'] self.storage.revision_add([revision]) occur.update({ 'origin': origin_id, 'visit': origin_visit1['visit'], }) self.storage.occurrence_add([occur]) self.storage.occurrence_add([occur]) # when actual_occurrence = list(self.storage.occurrence_get(origin_id)) # then expected_occurrence = self.occurrence.copy() expected_occurrence.update({ 'origin': origin_id }) self.assertEquals(len(actual_occurrence), 1) self.assertEquals(actual_occurrence[0], expected_occurrence) def _trigger_cache_provenance(self, origin_visit): """Trigger cache population for cache_content_revision. """ ret = list(self.storage.cache_revision_origin_add( origin_visit['origin'], origin_visit['visit'], )) for revision_id in ret: self.storage.cache_content_revision_add([revision_id]) return ret @istest def content_find_provenance_with_present_content(self): # 1. with something to find # given origin_id = self.storage.origin_add_one(self.origin2) self.storage.content_add([self.cont2]) self.storage.directory_add([self.dir2]) # point to self.cont self.storage.revision_add([self.revision3]) # points to self.dir occurrence = self.occurrence3.copy() occurrence['target'] = self.revision3['id'] origin_visit1 = self.storage.origin_visit_add(origin_id, self.date_visit2) occurrence.update({ 'origin': origin_id, 'visit': origin_visit1['visit'], }) self.storage.occurrence_add([occurrence]) # Trigger cache population for cache_content_revision cached_revisions = self._trigger_cache_provenance(origin_visit1) self.assertIn(self.revision3['id'], cached_revisions) # when occs = list(self.storage.content_find_provenance( {'sha1': self.cont2['sha1']})) # then self.assertEquals(len(occs), 1) self.assertEquals(occs[0]['origin'], origin_visit1['origin']) self.assertEquals(occs[0]['visit'], origin_visit1['visit']) self.assertEquals(occs[0]['revision'], self.revision3['id']) self.assertEquals(occs[0]['path'], self.dir2['entries'][0]['name']) occs2 = list(self.storage.content_find_provenance( {'sha1_git': self.cont2['sha1_git']})) self.assertEquals(len(occs2), 1) self.assertEquals(occs2[0]['origin'], origin_visit1['origin']) self.assertEquals(occs2[0]['visit'], origin_visit1['visit']) self.assertEquals(occs2[0]['revision'], self.revision3['id']) self.assertEquals(occs2[0]['path'], self.dir2['entries'][0]['name']) occs3 = list(self.storage.content_find_provenance( {'sha256': self.cont2['sha256']})) self.assertEquals(len(occs3), 1) self.assertEquals(occs3[0]['origin'], origin_visit1['origin']) self.assertEquals(occs3[0]['visit'], origin_visit1['visit']) self.assertEquals(occs3[0]['revision'], self.revision3['id']) self.assertEquals(occs3[0]['path'], self.dir2['entries'][0]['name']) @istest def content_find_provenance_with_non_present_content(self): # 1. with something that does not exist missing_cont = self.missing_cont occ = list(self.storage.content_find_provenance( {'sha1': missing_cont['sha1']})) self.assertEquals(occ, [], "Content does not exist so no occurrence") # 2. with something that does not exist occ = list(self.storage.content_find_provenance( {'sha1_git': missing_cont['sha1_git']})) self.assertEquals(occ, [], "Content does not exist so no occurrence") # 3. with something that does not exist occ = list(self.storage.content_find_provenance( {'sha256': missing_cont['sha256']})) self.assertEquals(occ, [], "Content does not exist so no occurrence") @istest def content_find_occurrence_bad_input(self): # 1. with bad input with self.assertRaises(ValueError) as cm: list(self.storage.content_find_provenance({})) # empty is bad self.assertIn('content keys', cm.exception.args[0]) # 2. with bad input with self.assertRaises(ValueError) as cm: list(self.storage.content_find_provenance( {'unknown-sha1': 'something'})) # not the right key self.assertIn('content keys', cm.exception.args[0]) @istest def entity_get_from_lister_metadata(self): self.storage.entity_add([self.entity1]) fetched_entities = list( self.storage.entity_get_from_lister_metadata( [self.entity1_query, self.entity2_query])) # Entity 1 should have full metadata, with last_seen/last_id instead # of validity entity1 = self.entity1.copy() entity1['last_seen'] = entity1['validity'][0] del fetched_entities[0]['last_id'] del entity1['validity'] # Entity 2 should have no metadata entity2 = { 'uuid': None, 'lister_metadata': self.entity2_query.copy(), } self.assertEquals(fetched_entities, [entity1, entity2]) @istest def entity_get_from_lister_metadata_twice(self): self.storage.entity_add([self.entity1]) fetched_entities1 = list( self.storage.entity_get_from_lister_metadata( [self.entity1_query])) fetched_entities2 = list( self.storage.entity_get_from_lister_metadata( [self.entity1_query])) self.assertEquals(fetched_entities1, fetched_entities2) @istest def entity_get(self): # given self.storage.entity_add([self.entity4]) self.storage.entity_add([self.entity3]) # when: entity3 -child-of-> entity4 actual_entity3 = list(self.storage.entity_get(self.entity3['uuid'])) self.assertEquals(len(actual_entity3), 2) # remove dynamic data (modified by db) entity3 = self.entity3.copy() entity4 = self.entity4.copy() del entity3['validity'] del entity4['validity'] del actual_entity3[0]['last_seen'] del actual_entity3[0]['last_id'] del actual_entity3[1]['last_seen'] del actual_entity3[1]['last_id'] self.assertEquals(actual_entity3, [entity3, entity4]) # when: entity4 only child actual_entity4 = list(self.storage.entity_get(self.entity4['uuid'])) self.assertEquals(len(actual_entity4), 1) # remove dynamic data (modified by db) entity4 = self.entity4.copy() del entity4['validity'] del actual_entity4[0]['last_id'] del actual_entity4[0]['last_seen'] self.assertEquals(actual_entity4, [entity4]) @istest def entity_get_one(self): # given self.storage.entity_add([self.entity3, self.entity4]) # when: entity3 -child-of-> entity4 actual_entity3 = self.storage.entity_get_one(self.entity3['uuid']) # remove dynamic data (modified by db) entity3 = self.entity3.copy() del entity3['validity'] del actual_entity3['last_seen'] del actual_entity3['last_id'] self.assertEquals(actual_entity3, entity3) @istest def stat_counters(self): expected_keys = ['content', 'directory', 'directory_entry_dir', 'occurrence', 'origin', 'person', 'revision'] counters = self.storage.stat_counters() self.assertTrue(set(expected_keys) <= set(counters)) self.assertIsInstance(counters[expected_keys[0]], int) @istest def content_find_with_present_content(self): # 1. with something to find cont = self.cont self.storage.content_add([cont]) actually_present = self.storage.content_find({'sha1': cont['sha1']}) actually_present.pop('ctime') self.assertEqual(actually_present, { 'sha1': cont['sha1'], 'sha256': cont['sha256'], 'sha1_git': cont['sha1_git'], 'length': cont['length'], 'status': 'visible' }) # 2. with something to find actually_present = self.storage.content_find( {'sha1_git': cont['sha1_git']}) actually_present.pop('ctime') self.assertEqual(actually_present, { 'sha1': cont['sha1'], 'sha256': cont['sha256'], 'sha1_git': cont['sha1_git'], 'length': cont['length'], 'status': 'visible' }) # 3. with something to find actually_present = self.storage.content_find( {'sha256': cont['sha256']}) actually_present.pop('ctime') self.assertEqual(actually_present, { 'sha1': cont['sha1'], 'sha256': cont['sha256'], 'sha1_git': cont['sha1_git'], 'length': cont['length'], 'status': 'visible' }) # 4. with something to find actually_present = self.storage.content_find( {'sha1': cont['sha1'], 'sha1_git': cont['sha1_git'], 'sha256': cont['sha256']}) actually_present.pop('ctime') self.assertEqual(actually_present, { 'sha1': cont['sha1'], 'sha256': cont['sha256'], 'sha1_git': cont['sha1_git'], 'length': cont['length'], 'status': 'visible' }) @istest def content_find_with_non_present_content(self): # 1. with something that does not exist missing_cont = self.missing_cont actually_present = self.storage.content_find( {'sha1': missing_cont['sha1']}) self.assertIsNone(actually_present) # 2. with something that does not exist actually_present = self.storage.content_find( {'sha1_git': missing_cont['sha1_git']}) self.assertIsNone(actually_present) # 3. with something that does not exist actually_present = self.storage.content_find( {'sha256': missing_cont['sha256']}) self.assertIsNone(actually_present) @istest def content_find_bad_input(self): # 1. with bad input with self.assertRaises(ValueError): self.storage.content_find({}) # empty is bad # 2. with bad input with self.assertRaises(ValueError): self.storage.content_find( {'unknown-sha1': 'something'}) # not the right key @istest def object_find_by_sha1_git(self): sha1_gits = [b'00000000000000000000'] expected = { b'00000000000000000000': [], } self.storage.content_add([self.cont]) sha1_gits.append(self.cont['sha1_git']) expected[self.cont['sha1_git']] = [{ 'sha1_git': self.cont['sha1_git'], 'type': 'content', 'id': self.cont['sha1'], }] self.storage.directory_add([self.dir]) sha1_gits.append(self.dir['id']) expected[self.dir['id']] = [{ 'sha1_git': self.dir['id'], 'type': 'directory', 'id': self.dir['id'], }] self.storage.revision_add([self.revision]) sha1_gits.append(self.revision['id']) expected[self.revision['id']] = [{ 'sha1_git': self.revision['id'], 'type': 'revision', 'id': self.revision['id'], }] self.storage.release_add([self.release]) sha1_gits.append(self.release['id']) expected[self.release['id']] = [{ 'sha1_git': self.release['id'], 'type': 'release', 'id': self.release['id'], }] ret = self.storage.object_find_by_sha1_git(sha1_gits) for val in ret.values(): for obj in val: del obj['object_id'] self.assertEqual(expected, ret) @istest def content_mimetype_missing(self): # given cont2 = self.cont2 self.storage.content_add([cont2]) mimetypes = [ { 'id': self.cont2['sha1'], 'tool_name': 'file', 'tool_version': '5.22', }, { 'id': self.missing_cont['sha1'], 'tool_name': 'file', 'tool_version': '5.22', }] # when actual_missing = self.storage.content_mimetype_missing(mimetypes) # then self.assertEqual(list(actual_missing), [ self.cont2['sha1'], self.missing_cont['sha1'] ]) # given self.storage.content_mimetype_add([{ 'id': self.cont2['sha1'], 'mimetype': b'text/plain', 'encoding': b'utf-8', 'tool_name': 'file', 'tool_version': '5.22', }]) # when actual_missing = self.storage.content_mimetype_missing(mimetypes) # then self.assertEqual(list(actual_missing), [self.missing_cont['sha1']]) @istest def content_mimetype_add__drop_duplicate(self): # given cont2 = self.cont2 self.storage.content_add([cont2]) mimetype_v1 = { 'id': self.cont2['sha1'], 'mimetype': b'text/plain', 'encoding': b'utf-8', 'tool_name': 'file', 'tool_version': '5.22', } # given self.storage.content_mimetype_add([mimetype_v1]) # when actual_mimetypes = list(self.storage.content_mimetype_get( [self.cont2['sha1']])) # then expected_mimetypes_v1 = [{ 'id': self.cont2['sha1'], 'mimetype': b'text/plain', 'encoding': b'utf-8', 'tool': { 'name': 'file', 'version': '5.22', } }] self.assertEqual(actual_mimetypes, expected_mimetypes_v1) # given mimetype_v2 = mimetype_v1.copy() mimetype_v2.update({ 'mimetype': b'text/html', 'encoding': b'us-ascii', }) self.storage.content_mimetype_add([mimetype_v2]) actual_mimetypes = list(self.storage.content_mimetype_get( [self.cont2['sha1']])) # mimetype did not change as the v2 was dropped. self.assertEqual(actual_mimetypes, expected_mimetypes_v1) @istest def content_mimetype_add__update_in_place_duplicate(self): # given cont2 = self.cont2 self.storage.content_add([cont2]) mimetype_v1 = { 'id': self.cont2['sha1'], 'mimetype': b'text/plain', 'encoding': b'utf-8', 'tool_name': 'file', 'tool_version': '5.22', } # given self.storage.content_mimetype_add([mimetype_v1]) # when actual_mimetypes = list(self.storage.content_mimetype_get( [self.cont2['sha1']])) expected_mimetypes_v1 = [{ 'id': self.cont2['sha1'], 'mimetype': b'text/plain', 'encoding': b'utf-8', 'tool': { 'name': 'file', 'version': '5.22', } }] # then self.assertEqual(actual_mimetypes, expected_mimetypes_v1) # given mimetype_v2 = mimetype_v1.copy() mimetype_v2.update({ 'mimetype': b'text/html', 'encoding': b'us-ascii', }) self.storage.content_mimetype_add([mimetype_v2], conflict_update=True) actual_mimetypes = list(self.storage.content_mimetype_get( [self.cont2['sha1']])) expected_mimetypes_v2 = [{ 'id': self.cont2['sha1'], 'mimetype': b'text/html', 'encoding': b'us-ascii', 'tool': { 'name': 'file', 'version': '5.22', } }] # mimetype did change as the v2 was used to overwrite v1 self.assertEqual(actual_mimetypes, expected_mimetypes_v2) @istest def content_mimetype_get(self): # given cont2 = self.cont2 self.storage.content_add([cont2]) mimetypes = [self.cont2['sha1'], self.missing_cont['sha1']] mimetype1 = { 'id': self.cont2['sha1'], 'mimetype': b'text/plain', 'encoding': b'utf-8', 'tool_name': 'file', 'tool_version': '5.22', } # when self.storage.content_mimetype_add([mimetype1]) # then actual_mimetypes = list(self.storage.content_mimetype_get(mimetypes)) # then expected_mimetypes = [{ 'id': self.cont2['sha1'], 'mimetype': b'text/plain', 'encoding': b'utf-8', 'tool': { 'name': 'file', 'version': '5.22', } }] self.assertEqual(actual_mimetypes, expected_mimetypes) @istest def content_language_missing(self): # given cont2 = self.cont2 self.storage.content_add([cont2]) languages = [ { 'id': self.cont2['sha1'], 'tool_name': 'pygments', 'tool_version': '2.0.1+dfsg-1.1+deb8u1', }, { 'id': self.missing_cont['sha1'], 'tool_name': 'pygments', 'tool_version': '2.0.1+dfsg-1.1+deb8u1', } ] # when actual_missing = list(self.storage.content_language_missing(languages)) # then self.assertEqual(list(actual_missing), [ self.cont2['sha1'], self.missing_cont['sha1'], ]) # given self.storage.content_language_add([{ 'id': self.cont2['sha1'], 'lang': 'haskell', 'tool_name': 'pygments', 'tool_version': '2.0.1+dfsg-1.1+deb8u1', }]) # when actual_missing = list(self.storage.content_language_missing(languages)) # then self.assertEqual(actual_missing, [self.missing_cont['sha1']]) @istest def content_language_get(self): # given cont2 = self.cont2 self.storage.content_add([cont2]) language1 = { 'id': self.cont2['sha1'], 'lang': 'common-lisp', 'tool_name': 'pygments', 'tool_version': '2.0.1+dfsg-1.1+deb8u1', } # when self.storage.content_language_add([language1]) # then actual_languages = list(self.storage.content_language_get( [self.cont2['sha1'], self.missing_cont['sha1']])) # then expected_languages = [{ 'id': self.cont2['sha1'], 'lang': 'common-lisp', 'tool': { 'name': 'pygments', 'version': '2.0.1+dfsg-1.1+deb8u1', } }] self.assertEqual(actual_languages, expected_languages) @istest def content_language_add__drop_duplicate(self): # given cont2 = self.cont2 self.storage.content_add([cont2]) language_v1 = { 'id': self.cont2['sha1'], 'lang': 'emacslisp', 'tool_name': 'pygments', 'tool_version': '2.0.1+dfsg-1.1+deb8u1', } # given self.storage.content_language_add([language_v1]) # when actual_languages = list(self.storage.content_language_get( [self.cont2['sha1']])) # then expected_languages_v1 = [{ 'id': self.cont2['sha1'], 'lang': 'emacslisp', 'tool': { 'name': 'pygments', 'version': '2.0.1+dfsg-1.1+deb8u1', } }] self.assertEqual(actual_languages, expected_languages_v1) # given language_v2 = language_v1.copy() language_v2.update({ 'lang': 'common-lisp', }) self.storage.content_language_add([language_v2]) actual_languages = list(self.storage.content_language_get( [self.cont2['sha1']])) # language did not change as the v2 was dropped. self.assertEqual(actual_languages, expected_languages_v1) @istest def content_language_add__update_in_place_duplicate(self): # given cont2 = self.cont2 self.storage.content_add([cont2]) language_v1 = { 'id': self.cont2['sha1'], 'lang': 'common-lisp', 'tool_name': 'pygments', 'tool_version': '2.0.1+dfsg-1.1+deb8u1', } # given self.storage.content_language_add([language_v1]) # when actual_languages = list(self.storage.content_language_get( [self.cont2['sha1']])) # then expected_languages_v1 = [{ 'id': self.cont2['sha1'], 'lang': 'common-lisp', 'tool': { 'name': 'pygments', 'version': '2.0.1+dfsg-1.1+deb8u1', } }] self.assertEqual(actual_languages, expected_languages_v1) # given language_v2 = language_v1.copy() language_v2.update({ 'lang': 'emacslisp', }) self.storage.content_language_add([language_v2], conflict_update=True) actual_languages = list(self.storage.content_language_get( [self.cont2['sha1']])) # language did not change as the v2 was dropped. expected_languages_v2 = [{ 'id': self.cont2['sha1'], 'lang': 'emacslisp', 'tool': { 'name': 'pygments', 'version': '2.0.1+dfsg-1.1+deb8u1', } }] # language did change as the v2 was used to overwrite v1 self.assertEqual(actual_languages, expected_languages_v2) @istest def content_ctags_missing(self): # given cont2 = self.cont2 self.storage.content_add([cont2]) ctags = [ { 'id': self.cont2['sha1'], 'tool_name': 'universal-ctags', 'tool_version': '~git7859817b', }, { 'id': self.missing_cont['sha1'], 'tool_name': 'universal-ctags', 'tool_version': '~git7859817b', } ] # when actual_missing = self.storage.content_ctags_missing(ctags) # then self.assertEqual(list(actual_missing), [ self.cont2['sha1'], self.missing_cont['sha1'] ]) # given self.storage.content_ctags_add([ { 'id': self.cont2['sha1'], 'tool_name': 'universal-ctags', 'tool_version': '~git7859817b', 'ctags': [{ 'name': 'done', 'kind': 'variable', 'line': 119, 'lang': 'OCaml', }] }, ]) # when actual_missing = self.storage.content_ctags_missing(ctags) # then self.assertEqual(list(actual_missing), [self.missing_cont['sha1']]) @istest def content_ctags_get(self): # given cont2 = self.cont2 self.storage.content_add([cont2]) ctags = [self.cont2['sha1'], self.missing_cont['sha1']] ctag1 = { 'id': self.cont2['sha1'], 'tool_name': 'universal-ctags', 'tool_version': '~git7859817b', 'ctags': [ { 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Python', }, { 'name': 'main', 'kind': 'function', 'line': 119, 'lang': 'Python', }] } # when self.storage.content_ctags_add([ctag1]) # then actual_ctags = list(self.storage.content_ctags_get(ctags)) # then expected_ctags = [ { 'id': self.cont2['sha1'], 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', }, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Python', }, { 'id': self.cont2['sha1'], 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', }, 'name': 'main', 'kind': 'function', 'line': 119, 'lang': 'Python', } ] self.assertEqual(actual_ctags, expected_ctags) @istest def content_ctags_search(self): # 1. given cont = self.cont cont2 = self.cont2 self.storage.content_add([cont, cont2]) ctag1 = { 'id': cont['sha1'], 'tool_name': 'universal-ctags', 'tool_version': '~git7859817b', 'ctags': [ { 'name': 'hello', 'kind': 'function', 'line': 133, 'lang': 'Python', }, { 'name': 'counter', 'kind': 'variable', 'line': 119, 'lang': 'Python', }, ] } ctag2 = { 'id': cont2['sha1'], 'tool_name': 'universal-ctags', 'tool_version': '~git7859817b', 'ctags': [ { 'name': 'hello', 'kind': 'variable', 'line': 100, 'lang': 'C', }, ] } self.storage.content_ctags_add([ctag1, ctag2]) # 1. when actual_ctags = list(self.storage.content_ctags_search('hello', limit=1)) # 1. then self.assertEqual(actual_ctags, [ { 'id': ctag1['id'], 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', }, 'name': 'hello', 'kind': 'function', 'line': 133, 'lang': 'Python', } ]) # 2. when actual_ctags = list(self.storage.content_ctags_search( 'hello', limit=1, last_sha1=ctag1['id'])) # 2. then self.assertEqual(actual_ctags, [ { 'id': ctag2['id'], 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', }, 'name': 'hello', 'kind': 'variable', 'line': 100, 'lang': 'C', } ]) # 3. when actual_ctags = list(self.storage.content_ctags_search('hello')) # 3. then self.assertEqual(actual_ctags, [ { 'id': ctag1['id'], 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', }, 'name': 'hello', 'kind': 'function', 'line': 133, 'lang': 'Python', }, { 'id': ctag2['id'], 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', }, 'name': 'hello', 'kind': 'variable', 'line': 100, 'lang': 'C', }, ]) # 4. when actual_ctags = list(self.storage.content_ctags_search('counter')) # then self.assertEqual(actual_ctags, [{ 'id': ctag1['id'], 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', }, 'name': 'counter', 'kind': 'variable', 'line': 119, 'lang': 'Python', }]) @istest def content_ctags_search_no_result(self): actual_ctags = list(self.storage.content_ctags_search('counter')) self.assertEquals(actual_ctags, []) @istest def content_ctags_add__add_new_ctags_added(self): # given cont2 = self.cont2 self.storage.content_add([cont2]) ctag_v1 = { 'id': self.cont2['sha1'], 'tool_name': 'universal-ctags', 'tool_version': '~git7859817b', 'ctags': [{ 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', }] } # given self.storage.content_ctags_add([ctag_v1]) self.storage.content_ctags_add([ctag_v1]) # conflict does nothing # when actual_ctags = list(self.storage.content_ctags_get( [self.cont2['sha1']])) # then expected_ctags = [{ 'id': self.cont2['sha1'], 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', } }] self.assertEqual(actual_ctags, expected_ctags) # given ctag_v2 = ctag_v1.copy() ctag_v2.update({ 'ctags': [ { 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', } ] }) self.storage.content_ctags_add([ctag_v2]) expected_ctags = [ { 'id': self.cont2['sha1'], 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', }, }, { 'id': self.cont2['sha1'], 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', }, } ] actual_ctags = list(self.storage.content_ctags_get( [self.cont2['sha1']])) self.assertEqual(actual_ctags, expected_ctags) @istest def content_ctags_add__update_in_place(self): # given cont2 = self.cont2 self.storage.content_add([cont2]) ctag_v1 = { 'id': self.cont2['sha1'], 'tool_name': 'universal-ctags', 'tool_version': '~git7859817b', 'ctags': [{ 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', }] } # given self.storage.content_ctags_add([ctag_v1]) # when actual_ctags = list(self.storage.content_ctags_get( [self.cont2['sha1']])) # then expected_ctags = [ { 'id': self.cont2['sha1'], 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', } } ] self.assertEqual(actual_ctags, expected_ctags) # given ctag_v2 = ctag_v1.copy() ctag_v2.update({ 'ctags': [ { 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', }, { 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', } ] }) self.storage.content_ctags_add([ctag_v2], conflict_update=True) actual_ctags = list(self.storage.content_ctags_get( [self.cont2['sha1']])) # ctag did change as the v2 was used to overwrite v1 expected_ctags = [ { 'id': self.cont2['sha1'], 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', }, }, { 'id': self.cont2['sha1'], 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', 'tool': { 'name': 'universal-ctags', 'version': '~git7859817b', }, } ] self.assertEqual(actual_ctags, expected_ctags) @istest def content_fossology_license_missing(self): # given cont = self.cont self.storage.content_add([cont]) licenses = [ { 'id': cont['sha1'], 'tool_name': 'nomos', 'tool_version': '3.1.0rc2-31-ga2cbb8c', }, { 'id': self.missing_cont['sha1'], 'tool_name': 'nomos', 'tool_version': '3.1.0rc2-31-ga2cbb8c', } ] # when actual_missing = list(self.storage.content_fossology_license_missing( licenses)) # then self.assertEqual(actual_missing, [ cont['sha1'], self.missing_cont['sha1'] ]) # given r = self.storage.content_fossology_license_add([{ 'id': cont['sha1'], 'licenses': ['GPL-2.0', 'GPL-2.0+'], 'tool_name': 'nomos', 'tool_version': '3.1.0rc2-31-ga2cbb8c', }]) self.assertEqual(r, []) # when actual_missing = list(self.storage.content_fossology_license_missing( licenses)) # then self.assertEqual(actual_missing, [self.missing_cont['sha1']]) @istest def content_fossology_license_get(self): # given cont = self.cont self.storage.content_add([cont]) licenses = [cont['sha1'], self.missing_cont['sha1']] license1 = { 'id': cont['sha1'], 'licenses': ['GPL-2.0+'], 'tool_name': 'nomos', 'tool_version': '3.1.0rc2-31-ga2cbb8c', } # when r = self.storage.content_fossology_license_add([license1]) self.assertEquals(r, []) # then actual_licenses = list(self.storage.content_fossology_license_get( licenses)) # then self.assertEqual(actual_licenses, [license1]) @istest def content_fossology_license_add__wrong_license(self): # given cont = self.cont self.storage.content_add([cont]) license_v1 = { 'id': cont['sha1'], 'licenses': ['blackhole'], 'tool_name': 'nomos', 'tool_version': '3.1.0rc2-31-ga2cbb8c', } # given r = self.storage.content_fossology_license_add([license_v1]) # then self.assertEqual(r, [license_v1]) # when actual_licenses = list(self.storage.content_fossology_license_get( [cont['sha1']])) # then self.assertEqual(actual_licenses, []) @istest def content_fossology_license_add__new_license_added(self): # given cont = self.cont self.storage.content_add([cont]) license_v1 = { 'id': cont['sha1'], 'licenses': ['Apache-2.0'], 'tool_name': 'nomos', 'tool_version': '3.1.0rc2-31-ga2cbb8c', } # given self.storage.content_fossology_license_add([license_v1]) # conflict does nothing self.storage.content_fossology_license_add([license_v1]) # when actual_licenses = list(self.storage.content_fossology_license_get( [cont['sha1']])) # then self.assertEqual(actual_licenses[0], license_v1) # given license_v2 = license_v1.copy() license_v2.update({ 'licenses': ['BSD-2-Clause'], }) self.storage.content_fossology_license_add([license_v2]) actual_licenses = list(self.storage.content_fossology_license_get( [cont['sha1']])) expected_license = license_v1.copy() expected_license.update({ 'licenses': ['Apache-2.0', 'BSD-2-Clause'], }) # license did not change as the v2 was dropped. self.assertEqual(actual_licenses[0], expected_license) @istest def content_fossology_license_add__update_in_place_duplicate(self): # given cont = self.cont self.storage.content_add([cont]) license_v1 = { 'id': cont['sha1'], 'licenses': ['CECILL'], 'tool_name': 'nomos', 'tool_version': '3.1.0rc2-31-ga2cbb8c', } # given self.storage.content_fossology_license_add([license_v1]) # conflict does nothing self.storage.content_fossology_license_add([license_v1]) # when actual_licenses = list(self.storage.content_fossology_license_get( [cont['sha1']])) # then self.assertEqual(actual_licenses[0], license_v1) # given license_v2 = license_v1.copy() license_v2.update({ 'licenses': ['CECILL-2.0'] }) self.storage.content_fossology_license_add([license_v2], conflict_update=True) actual_licenses = list(self.storage.content_fossology_license_get( [cont['sha1']])) # license did change as the v2 was used to overwrite v1 self.assertEqual(actual_licenses[0], license_v2) class TestStorage(AbstractTestStorage, unittest.TestCase): """Test the local storage""" # Can only be tested with local storage as you can't mock # datetimes for the remote server @istest def fetch_history(self): origin = self.storage.origin_add_one(self.origin) with patch('datetime.datetime'): datetime.datetime.now.return_value = self.fetch_history_date fetch_history_id = self.storage.fetch_history_start(origin) datetime.datetime.now.assert_called_with(tz=datetime.timezone.utc) with patch('datetime.datetime'): datetime.datetime.now.return_value = self.fetch_history_end self.storage.fetch_history_end(fetch_history_id, self.fetch_history_data) fetch_history = self.storage.fetch_history_get(fetch_history_id) expected_fetch_history = self.fetch_history_data.copy() expected_fetch_history['id'] = fetch_history_id expected_fetch_history['origin'] = origin expected_fetch_history['date'] = self.fetch_history_date expected_fetch_history['duration'] = self.fetch_history_duration self.assertEqual(expected_fetch_history, fetch_history) @istest def person_get(self): # given person0 = { 'fullname': b'bob ', 'name': b'bob', 'email': b'alice@bob', } id0 = self.storage._person_add(person0) person1 = { 'fullname': b'tony ', 'name': b'tony', 'email': b'tony@bob', } id1 = self.storage._person_add(person1) # when actual_persons = self.storage.person_get([id0, id1]) # given (person injection through release for example) self.assertEqual( list(actual_persons), [ { 'id': id0, 'fullname': person0['fullname'], 'name': person0['name'], 'email': person0['email'], }, { 'id': id1, 'fullname': person1['fullname'], 'name': person1['name'], 'email': person1['email'], }, ]) + + +class AlteringSchemaTest(AbstractTestStorage, unittest.TestCase): + """This class is dedicated for the rare case where the schema needs to + be altered dynamically. + + Otherwise, the tests could be blocking when ran altogether. + + """ + @istest + def content_update(self): + cont = self.cont + + self.storage.content_add([cont]) + # alter the sha1_git for example + cont['sha1_git'] = hex_to_hash( + '3a60a5275d0333bf13468e8b3dcab90f4046e654') + + self.storage.content_update([cont], keys=['sha1_git']) + + self.cursor.execute('SELECT sha1, sha1_git, sha256, length, status' + ' FROM content WHERE sha1 = %s', + (cont['sha1'],)) + datum = self.cursor.fetchone() + self.assertEqual( + (datum[0].tobytes(), datum[1].tobytes(), datum[2].tobytes(), + datum[3], datum[4]), + (cont['sha1'], cont['sha1_git'], cont['sha256'], + cont['length'], 'visible')) + + @istest + def content_update_with_new_cols(self): + self.cursor.execute("""alter table content + add column test text default null, + add column test2 text default null""") + + cont = self.cont2 + self.storage.content_add([cont]) + cont['test'] = 'value-1' + cont['test2'] = 'value-2' + + self.storage.content_update([cont], keys=['test', 'test2']) + + self.cursor.execute( + 'SELECT sha1, sha1_git, sha256, length, status, test, test2' + ' FROM content WHERE sha1 = %s', + (cont['sha1'],)) + + datum = self.cursor.fetchone() + self.assertEqual( + (datum[0].tobytes(), datum[1].tobytes(), datum[2].tobytes(), + datum[3], datum[4], datum[5], datum[6]), + (cont['sha1'], cont['sha1_git'], cont['sha256'], + cont['length'], 'visible', cont['test'], cont['test2'])) + + self.cursor.execute("""alter table content drop column test, + drop column test2""") diff --git a/swh/storage/vault/api/client.py b/swh/storage/vault/api/client.py index a8bfb5b7e..c158245b2 100644 --- a/swh/storage/vault/api/client.py +++ b/swh/storage/vault/api/client.py @@ -1,25 +1,35 @@ # Copyright (C) 2016-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.core import hashutil from swh.core.api import SWHRemoteAPI from swh.storage.exc import StorageAPIError class RemoteVaultCache(SWHRemoteAPI): """Client to the Software Heritage vault cache.""" def __init__(self, base_url): super().__init__(api_exception=StorageAPIError, url=base_url) def directory_ls(self): return self.get('vault/directory/') def directory_get(self, obj_id): return self.get('vault/directory/%s/' % (hashutil.hash_to_hex(obj_id))) def directory_cook(self, obj_id): return self.post('vault/directory/%s/' % hashutil.hash_to_hex(obj_id), data={}) + + def revision_ls(self): + return self.get('vault/revision/') + + def revision_get(self, obj_id): + return self.get('vault/revision/%s/' % (hashutil.hash_to_hex(obj_id))) + + def revision_cook(self, obj_id): + return self.post('vault/revision/%s/' % hashutil.hash_to_hex(obj_id), + data={}) diff --git a/swh/storage/vault/api/cooking_tasks.py b/swh/storage/vault/api/cooking_tasks.py index dbb576a63..ac08f7475 100644 --- a/swh/storage/vault/api/cooking_tasks.py +++ b/swh/storage/vault/api/cooking_tasks.py @@ -1,31 +1,32 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.scheduler.task import Task from swh.core import hashutil from ..cache import VaultCache -from ..cooker import DirectoryVaultCooker +from ..cooker import DirectoryVaultCooker, RevisionVaultCooker from ... import get_storage COOKER_TYPES = { - 'directory': DirectoryVaultCooker + 'directory': DirectoryVaultCooker, + 'revision': RevisionVaultCooker, } class SWHCookingTask(Task): """Main task which archives a contents batch. """ task_queue = 'swh_storage_vault_cooking' def run(self, type, hex_id, storage_args, cache_args): # Initialize elements storage = get_storage(**storage_args) cache = VaultCache(**cache_args) # Initialize cooker cooker = COOKER_TYPES[type](storage, cache) # Perform the cooking cooker.cook(obj_id=hashutil.hex_to_hash(hex_id)) diff --git a/swh/storage/vault/api/server.py b/swh/storage/vault/api/server.py index fe1dc5231..6ec9843df 100644 --- a/swh/storage/vault/api/server.py +++ b/swh/storage/vault/api/server.py @@ -1,104 +1,104 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import click from flask import abort, g from werkzeug.routing import BaseConverter from swh.core import config from swh.core.api import (SWHServerAPIApp, error_handler, encode_data_server as encode_data) from swh.storage import get_storage from swh.storage.vault.api import cooking_tasks # NOQA from swh.storage.vault.cache import VaultCache from swh.storage.vault.cooker import DirectoryVaultCooker from swh.scheduler.celery_backend.config import app as celery_app cooking_task_name = 'swh.storage.vault.api.cooking_tasks.SWHCookingTask' DEFAULT_CONFIG = { 'storage': ('dict', { 'cls': 'local', 'args': { 'db': 'dbname=softwareheritage-dev', 'objstorage': { 'root': '/tmp/objects', 'slicing': '0:2/2:4/4:6', }, }, }), 'cache': ('dict', {'root': '/tmp/vaultcache'}) } class RegexConverter(BaseConverter): def __init__(self, url_map, *items): super().__init__(url_map) self.regex = items[0] app = SWHServerAPIApp(__name__) app.url_map.converters['regex'] = RegexConverter @app.errorhandler(Exception) def my_error_handler(exception): return error_handler(exception, encode_data) @app.before_request def before_request(): g.cache = VaultCache(**app.config['cache']) g.cooker = DirectoryVaultCooker( get_storage(**app.config['storage']), g.cache ) @app.route('/') def index(): return 'SWH vault API server' @app.route('/vault//', methods=['GET']) def ls_directory(type): return encode_data(list( g.cache.ls(type) )) @app.route('/vault///', methods=['GET']) def get_cooked_directory(type, id): if not g.cache.is_cached(type, id): abort(404) return encode_data(g.cache.get(type, id).decode()) @app.route('/vault///', methods=['POST']) def cook_request_directory(type, id): task = celery_app.tasks[cooking_task_name] task.delay(type, id, app.config['storage'], app.config['cache']) # Return url to get the content and 201 CREATED return encode_data('/vault/%s/%s/' % (type, id)), 201 @click.command() @click.argument('config-path', required=1) @click.option('--host', default='0.0.0.0', help="Host to run the server") -@click.option('--port', default=5000, type=click.INT, +@click.option('--port', default=5005, type=click.INT, help="Binding port of the server") @click.option('--debug/--nodebug', default=True, help="Indicates if the server should run in debug mode") def launch(config_path, host, port, debug): app.config.update(config.read(config_path, DEFAULT_CONFIG)) app.run(host, port=int(port), debug=bool(debug)) if __name__ == '__main__': launch() diff --git a/swh/storage/vault/cooker.py b/swh/storage/vault/cooker.py index 6554a11ae..8210db934 100644 --- a/swh/storage/vault/cooker.py +++ b/swh/storage/vault/cooker.py @@ -1,224 +1,296 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import io import itertools +import logging import os import tarfile import tempfile +from pathlib import Path + from swh.core import hashutil SKIPPED_MESSAGE = (b'This content have not been retrieved in ' b'Software Heritage archive due to its size') HIDDEN_MESSAGE = (b'This content is hidden') +def get_tar_bytes(path, arcname=None): + path = Path(path) + if not arcname: + arcname = path.name + tar_buffer = io.BytesIO() + tar = tarfile.open(fileobj=tar_buffer, mode='w') + tar.add(str(path), arcname=arcname) + return tar_buffer.getbuffer() + + class BaseVaultCooker(metaclass=abc.ABCMeta): """Abstract base class for the vault's bundle creators This class describes a common API for the cookers. To define a new cooker, inherit from this class and override: - CACHE_TYPE_KEY: key to use for the bundle to reference in cache - def cook(obj_id): cook the object into a bundle - def notify_bundle_ready(notif_data, bundle_id): notify the bundle is ready. """ CACHE_TYPE_KEY = None def __init__(self, storage, cache): self.storage = storage self.cache = cache @abc.abstractmethod def cook(self, obj_id): """Cook the requested object into a bundle The type of the object represented by the id depends on the concrete class. Very likely, each type of bundle will have its own cooker class. Args: obj_id: id of the object to be cooked into a bundle. """ pass def update_cache(self, id, bundle_content): """Update the cache with id and bundle_content. """ self.cache.add(self.CACHE_TYPE_KEY, id, bundle_content) @abc.abstractmethod def notify_bundle_ready(self, notif_data, bundle_id): """Notify the bundle bundle_id is ready. """ pass class DirectoryVaultCooker(BaseVaultCooker): """Cooker to create a directory bundle """ CACHE_TYPE_KEY = 'directory' def __init__(self, storage, cache): """Initialize a cooker that create directory bundles Args: storage: source storage where content are retrieved. cache: destination storage where the cooked bundle are stored. """ self.storage = storage self.cache = cache - def cook(self, dir_id): + def cook(self, obj_id): """Cook the requested directory into a Bundle Args: - dir_id (bytes): the id of the directory to be cooked. + obj_id (bytes): the id of the directory to be cooked. Returns: bytes that correspond to the bundle """ # Create the bytes that corresponds to the compressed # directory. directory_cooker = DirectoryCooker(self.storage) - bundle_content = directory_cooker.get_directory_bytes(dir_id) + bundle_content = directory_cooker.get_directory_bytes(obj_id) # Cache the bundle - self.update_cache(dir_id, bundle_content) + self.update_cache(obj_id, bundle_content) # Make a notification that the bundle have been cooked # NOT YET IMPLEMENTED see TODO in function. self.notify_bundle_ready( - notif_data='Bundle %s ready' % hashutil.hash_to_hex(dir_id), - bundle_id=dir_id) + notif_data='Bundle %s ready' % hashutil.hash_to_hex(obj_id), + bundle_id=obj_id) - def notify_bundle_ready(self, bundle_id): + def notify_bundle_ready(self, notif_data, bundle_id): + # TODO plug this method with the notification method once + # done. + pass + + +class RevisionVaultCooker(BaseVaultCooker): + """Cooker to create a directory bundle """ + CACHE_TYPE_KEY = 'revision' + + def __init__(self, storage, cache): + """Initialize a cooker that create revision bundles + + Args: + storage: source storage where content are retrieved. + cache: destination storage where the cooked bundle are stored. + + """ + self.storage = storage + self.cache = cache + + def cook(self, obj_id): + """Cook the requested revision into a Bundle + + Args: + obj_id (bytes): the id of the revision to be cooked. + + Returns: + bytes that correspond to the bundle + + """ + directory_cooker = DirectoryCooker(self.storage) + with tempfile.TemporaryDirectory(suffix='.cook') as root_tmp: + root = Path(root_tmp) + for revision in self.storage.revision_log([obj_id]): + revdir = root / hashutil.hash_to_hex(revision['id']) + revdir.mkdir() + directory_cooker.build_directory(revision['directory'], + str(revdir).encode()) + bundle_content = get_tar_bytes(root_tmp, + hashutil.hash_to_hex(obj_id)) + # Cache the bundle + self.update_cache(obj_id, bundle_content) + # Make a notification that the bundle have been cooked + # NOT YET IMPLEMENTED see TODO in function. + self.notify_bundle_ready( + notif_data='Bundle %s ready' % hashutil.hash_to_hex(obj_id), + bundle_id=obj_id) + + def notify_bundle_ready(self, notif_data, bundle_id): # TODO plug this method with the notification method once # done. pass class DirectoryCooker(): """Creates a cooked directory from its sha1_git in the db. Warning: This is NOT a directly accessible cooker, but a low-level one that executes the manipulations. """ def __init__(self, storage): self.storage = storage def get_directory_bytes(self, dir_id): # Create temporary folder to retrieve the files into. root = bytes(tempfile.mkdtemp(prefix='directory.', suffix='.cook'), 'utf8') + self.build_directory(dir_id, root) + # Use the created directory to make a bundle with the data as + # a compressed directory. + bundle_content = self._create_bundle_content( + root, + hashutil.hash_to_hex(dir_id)) + return bundle_content + + def build_directory(self, dir_id, root): # Retrieve data from the database. data = self.storage.directory_ls(dir_id, recursive=True) + # Split into files and directory data. + # TODO(seirl): also handle revision data. data1, data2 = itertools.tee(data, 2) dir_data = (entry['name'] for entry in data1 if entry['type'] == 'dir') file_data = (entry for entry in data2 if entry['type'] == 'file') # Recreate the directory's subtree and then the files into it. self._create_tree(root, dir_data) self._create_files(root, file_data) - # Use the created directory to make a bundle with the data as - # a compressed directory. - bundle_content = self._create_bundle_content( - root, - hashutil.hash_to_hex(dir_id)) - return bundle_content - def _create_tree(self, root, directory_paths): """Create a directory tree from the given paths The tree is created from `root` and each given path in `directory_paths` will be created. """ # Directories are sorted by depth so they are created in the # right order bsep = bytes(os.path.sep, 'utf8') dir_names = sorted( directory_paths, key=lambda x: len(x.split(bsep))) for dir_name in dir_names: os.makedirs(os.path.join(root, dir_name)) def _create_files(self, root, file_datas): """Create the files according to their status. """ # Then create the files for file_data in file_datas: path = os.path.join(root, file_data['name']) status = file_data['status'] + perms = file_data['perms'] if status == 'absent': self._create_file_absent(path) elif status == 'hidden': self._create_file_hidden(path) else: content = self._get_file_content(file_data['sha1']) - self._create_file(path, content) + self._create_file(path, content, perms) - def _create_file(self, path, content): + def _create_file(self, path, content, perms=0o100644): """Create the given file and fill it with content. """ - with open(path, 'wb') as f: - f.write(content) + if perms not in (0o100644, 0o100755, 0o120000): + logging.warning('File {} has invalid permission {}, ' + 'defaulting to 644.'.format(path, perms)) + + if perms == 0o120000: # Symbolic link + os.symlink(content, path) + else: + with open(path, 'wb') as f: + f.write(content) + os.chmod(path, perms & 0o777) def _get_file_content(self, obj_id): """Get the content of the given file. """ content = list(self.storage.content_get([obj_id]))[0]['data'] return content def _create_file_absent(self, path): """Create a file that indicates a skipped content Create the given file but fill it with a specific content to indicate that the content have not been retrieved by the software heritage archive due to its size. """ self._create_file(self, SKIPPED_MESSAGE) def _create_file_hidden(self, path): """Create a file that indicates an hidden content Create the given file but fill it with a specific content to indicate that the content could not be retrieved due to privacy policy. """ self._create_file(self, HIDDEN_MESSAGE) def _create_bundle_content(self, path, hex_dir_id): """Create a bundle from the given directory Args: path: location of the directory to package. hex_dir_id: hex representation of the directory id Returns: bytes that represent the compressed directory as a bundle. """ - tar_buffer = io.BytesIO() - tar = tarfile.open(fileobj=tar_buffer, mode='w') - tar.add(path.decode(), arcname=hex_dir_id) - return tar_buffer.getbuffer() + return get_tar_bytes(path.decode(), hex_dir_id) diff --git a/version.txt b/version.txt index cfac30d6b..46662403e 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.79-0-g5c41ffc \ No newline at end of file +v0.0.80-0-g543c8a4 \ No newline at end of file