diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..f5d2a49cd --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "restructuredtext.workspaceRoot": "/home/antoine/swh/swh-environment/swh-storage" +} \ No newline at end of file diff --git a/PKG-INFO b/PKG-INFO index 4cf8d0a39..3333a7719 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,12 +1,12 @@ Metadata-Version: 2.1 Name: swh.storage -Version: 0.0.98 +Version: 0.0.99 Summary: Software Heritage storage manager Home-page: https://forge.softwareheritage.org/diffusion/DSTO/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN Provides-Extra: listener Provides-Extra: schemata diff --git a/sql/clusters.dot b/sql/clusters.dot index ecc1b7189..e8f75e48e 100644 --- a/sql/clusters.dot +++ b/sql/clusters.dot @@ -1,99 +1,95 @@ subgraph "logical_grouping" { style = rounded; bgcolor = gray95; color = gray; subgraph cluster_meta { label = <schema versioning
version: @@VERSION@@>; dbversion; } subgraph cluster_content { label = <content>; content; skipped_content; } subgraph cluster_directory { label = <directories>; directory; directory_entry_dir; directory_entry_file; directory_entry_rev; } subgraph cluster_revision { label = <revisions>; revision; revision_history; person; } subgraph cluster_release { label = <releases>; release; } subgraph cluster_snapshots { label = <snapshots>; occurrence; occurrence_history; + snapshot; + snapshot_branch; + snapshot_branches; } subgraph cluster_origins { label = <origins>; origin; fetch_history; origin_visit; } subgraph cluster_entity { label = <entities>; entity; entity_history; entity_equivalence; listable_entity; list_history; } - subgraph cluster_provenance { - label = <provenance>; - cache_revision_origin; - cache_content_revision; - cache_content_revision_processed; + subgraph cluster_metadata { + label = <metadata>; + metadata_provider; + origin_metadata; + tool; + } + + subgraph cluster_statistics { + label = <statistics>; + object_counts; } { edge [style = dashed]; # "rtcolN" identifies the N-th row in a table, as a source # "ltcolN" identifies the N-th row in a table, as a destination "directory_entry_dir":rtcol2 -> "directory":ltcol1; "directory_entry_file":rtcol2 -> "content":ltcol2; "directory_entry_file":rtcol2 -> "skipped_content":ltcol2; "directory_entry_rev":rtcol2 -> "revision":ltcol1; "directory":rtcol2 -> "directory_entry_dir":ltcol1; "directory":rtcol3 -> "directory_entry_file":ltcol1; "directory":rtcol4 -> "directory_entry_rev":ltcol1; "occurrence":rtcol3 -> "revision":ltcol1; "occurrence_history":rtcol3 -> "revision":ltcol1; "release":rtcol2 -> "revision":ltcol1; "revision":rtcol9 -> "directory":ltcol1; "revision_history":rtcol2 -> "revision":ltcol1; "entity_history":rtcol3 -> "entity_history":ltcol2; "entity_history":rtcol10 -> "listable_entity":ltcol1; } - - subgraph cluster_metadata { - label = <metadata>; - content_metadata; - revision_metadata; - } - - subgraph cluster_statistics { - label = <statistics>; - object_counts; - } - } diff --git a/sql/swh-func.sql b/sql/swh-func.sql index 706918ab6..3e186cc28 100644 --- a/sql/swh-func.sql +++ b/sql/swh-func.sql @@ -1,1532 +1,1608 @@ create or replace function hash_sha1(text) returns text as $$ select encode(digest($1, 'sha1'), 'hex') $$ language sql strict immutable; comment on function hash_sha1(text) is 'Compute SHA1 hash as text'; -- create a temporary table called tmp_TBLNAME, mimicking existing table -- TBLNAME -- -- Args: -- tblname: name of the table to mimick create or replace function swh_mktemp(tblname regclass) returns void language plpgsql as $$ begin execute format(' create temporary table tmp_%1$I (like %1$I including defaults) on commit drop; alter table tmp_%1$I drop column if exists object_id; ', tblname); return; end $$; -- create a temporary table for directory entries called tmp_TBLNAME, -- mimicking existing table TBLNAME with an extra dir_id (sha1_git) -- column, and dropping the id column. -- -- This is used to create the tmp_directory_entry_ tables. -- -- Args: -- tblname: name of the table to mimick create or replace function swh_mktemp_dir_entry(tblname regclass) returns void language plpgsql as $$ begin execute format(' create temporary table tmp_%1$I (like %1$I including defaults, dir_id sha1_git) on commit drop; alter table tmp_%1$I drop column id; ', tblname); return; end $$; -- create a temporary table for revisions called tmp_revisions, -- mimicking existing table revision, replacing the foreign keys to -- people with an email and name field -- create or replace function swh_mktemp_revision() returns void language sql as $$ create temporary table tmp_revision ( like revision including defaults, author_fullname bytea, author_name bytea, author_email bytea, committer_fullname bytea, committer_name bytea, committer_email bytea ) on commit drop; alter table tmp_revision drop column author; alter table tmp_revision drop column committer; alter table tmp_revision drop column object_id; $$; -- create a temporary table for releases called tmp_release, -- mimicking existing table release, replacing the foreign keys to -- people with an email and name field -- create or replace function swh_mktemp_release() returns void language sql as $$ create temporary table tmp_release ( like release including defaults, author_fullname bytea, author_name bytea, author_email bytea ) on commit drop; alter table tmp_release drop column author; alter table tmp_release drop column object_id; $$; -- create a temporary table with a single "bytea" column for fast object lookup. create or replace function swh_mktemp_bytea() returns void language sql as $$ create temporary table tmp_bytea ( id bytea ) on commit drop; $$; -- create a temporary table for occurrence_history create or replace function swh_mktemp_occurrence_history() returns void language sql as $$ create temporary table tmp_occurrence_history( like occurrence_history including defaults, visit bigint not null ) on commit drop; alter table tmp_occurrence_history drop column visits, drop column object_id; $$; -- create a temporary table for entity_history, sans id create or replace function swh_mktemp_entity_history() returns void language sql as $$ create temporary table tmp_entity_history ( like entity_history including defaults) on commit drop; alter table tmp_entity_history drop column id; $$; -- create a temporary table for entities called tmp_entity_lister, -- with only the columns necessary for retrieving the uuid of a listed -- entity. create or replace function swh_mktemp_entity_lister() returns void language sql as $$ create temporary table tmp_entity_lister ( id bigint, lister_metadata jsonb ) on commit drop; $$; -- create a temporary table for the branches of a snapshot create or replace function swh_mktemp_snapshot_branch() returns void language sql as $$ create temporary table tmp_snapshot_branch ( name bytea not null, target bytea, target_type snapshot_target ) on commit drop; $$; create or replace function swh_mktemp_tool() returns void language sql as $$ create temporary table tmp_tool ( like tool including defaults ) on commit drop; alter table tmp_tool drop column id; $$; -- a content signature is a set of cryptographic checksums that we use to -- uniquely identify content, for the purpose of verifying if we already have -- some content or not during content injection create type content_signature as ( sha1 sha1, sha1_git sha1_git, sha256 sha256, blake2s256 blake2s256 ); -- check which entries of tmp_content are missing from content -- -- operates in bulk: 0. swh_mktemp(content), 1. COPY to tmp_content, -- 2. call this function create or replace function swh_content_missing() returns setof content_signature language plpgsql as $$ begin return query ( select sha1, sha1_git, sha256, blake2s256 from tmp_content as tmp where not exists ( select 1 from content as c where c.sha1 = tmp.sha1 and c.sha1_git = tmp.sha1_git and c.sha256 = tmp.sha256 ) ); return; end $$; -- check which entries of tmp_content_sha1 are missing from content -- -- operates in bulk: 0. swh_mktemp_content_sha1(), 1. COPY to tmp_content_sha1, -- 2. call this function create or replace function swh_content_missing_per_sha1() returns setof sha1 language plpgsql as $$ begin return query (select id::sha1 from tmp_bytea as tmp where not exists (select 1 from content as c where c.sha1=tmp.id)); end $$; -- check which entries of tmp_skipped_content are missing from skipped_content -- -- operates in bulk: 0. swh_mktemp(skipped_content), 1. COPY to tmp_skipped_content, -- 2. call this function create or replace function swh_skipped_content_missing() returns setof content_signature language plpgsql as $$ begin return query select sha1, sha1_git, sha256, blake2s256 from tmp_skipped_content t where not exists (select 1 from skipped_content s where s.sha1 is not distinct from t.sha1 and s.sha1_git is not distinct from t.sha1_git and s.sha256 is not distinct from t.sha256); return; end $$; -- Look up content based on one or several different checksums. Return all -- content information if the content is found; a NULL row otherwise. -- -- At least one checksum should be not NULL. If several are not NULL, they will -- be AND-ed together in the lookup query. -- -- Note: this function is meant to be used to look up individual contents -- (e.g., for the web app), for batch lookup of missing content (e.g., to be -- added) see swh_content_missing create or replace function swh_content_find( sha1 sha1 default NULL, sha1_git sha1_git default NULL, sha256 sha256 default NULL, blake2s256 blake2s256 default NULL ) returns content language plpgsql as $$ declare con content; filters text[] := array[] :: text[]; -- AND-clauses used to filter content q text; begin if sha1 is not null then filters := filters || format('sha1 = %L', sha1); end if; if sha1_git is not null then filters := filters || format('sha1_git = %L', sha1_git); end if; if sha256 is not null then filters := filters || format('sha256 = %L', sha256); end if; if blake2s256 is not null then filters := filters || format('blake2s256 = %L', blake2s256); end if; if cardinality(filters) = 0 then return null; else q = format('select * from content where %s', array_to_string(filters, ' and ')); execute q into con; return con; end if; end $$; -- add tmp_content entries to content, skipping duplicates -- -- operates in bulk: 0. swh_mktemp(content), 1. COPY to tmp_content, -- 2. call this function create or replace function swh_content_add() returns void language plpgsql as $$ begin insert into content (sha1, sha1_git, sha256, blake2s256, length, status) select distinct sha1, sha1_git, sha256, blake2s256, length, status from tmp_content where (sha1, sha1_git, sha256) in ( select sha1, sha1_git, sha256 from swh_content_missing() ); -- TODO XXX use postgres 9.5 "UPSERT" support here, when available. -- Specifically, using "INSERT .. ON CONFLICT IGNORE" we can avoid -- the extra swh_content_missing() query here. return; end $$; -- add tmp_skipped_content entries to skipped_content, skipping duplicates -- -- operates in bulk: 0. swh_mktemp(skipped_content), 1. COPY to tmp_skipped_content, -- 2. call this function create or replace function swh_skipped_content_add() returns void language plpgsql as $$ begin insert into skipped_content (sha1, sha1_git, sha256, blake2s256, length, status, reason, origin) select distinct sha1, sha1_git, sha256, blake2s256, length, status, reason, origin from tmp_skipped_content where (coalesce(sha1, ''), coalesce(sha1_git, ''), coalesce(sha256, '')) in ( select coalesce(sha1, ''), coalesce(sha1_git, ''), coalesce(sha256, '') from swh_skipped_content_missing() ); -- TODO XXX use postgres 9.5 "UPSERT" support here, when available. -- Specifically, using "INSERT .. ON CONFLICT IGNORE" we can avoid -- the extra swh_content_missing() query here. return; end $$; -- Update content entries from temporary table. -- (columns are potential new columns added to the schema, this cannot be empty) -- create or replace function swh_content_update(columns_update text[]) returns void language plpgsql as $$ declare query text; tmp_array text[]; begin if array_length(columns_update, 1) = 0 then raise exception 'Please, provide the list of column names to update.'; end if; tmp_array := array(select format('%1$s=t.%1$s', unnest) from unnest(columns_update)); query = format('update content set %s from tmp_content t where t.sha1 = content.sha1', array_to_string(tmp_array, ', ')); execute query; return; end $$; comment on function swh_content_update(text[]) IS 'Update existing content''s columns'; -- check which entries of tmp_directory are missing from directory -- -- operates in bulk: 0. swh_mktemp(directory), 1. COPY to tmp_directory, -- 2. call this function create or replace function swh_directory_missing() returns setof sha1_git language plpgsql as $$ begin return query select id from tmp_directory t where not exists ( select 1 from directory d where d.id = t.id); return; end $$; -- Retrieve information on directory from temporary table create or replace function swh_directory_get() returns setof directory language plpgsql as $$ begin return query select d.* from tmp_directory t inner join directory d on t.id = d.id; return; end $$; create type directory_entry_type as enum('file', 'dir', 'rev'); -- Add tmp_directory_entry_* entries to directory_entry_* and directory, -- skipping duplicates in directory_entry_*. This is a generic function that -- works on all kind of directory entries. -- -- operates in bulk: 0. swh_mktemp_dir_entry('directory_entry_*'), 1 COPY to -- tmp_directory_entry_*, 2. call this function -- -- Assumption: this function is used in the same transaction that inserts the -- context directory in table "directory". create or replace function swh_directory_entry_add(typ directory_entry_type) returns void language plpgsql as $$ begin execute format(' insert into directory_entry_%1$s (target, name, perms) select distinct t.target, t.name, t.perms from tmp_directory_entry_%1$s t where not exists ( select 1 from directory_entry_%1$s i where t.target = i.target and t.name = i.name and t.perms = i.perms) ', typ); execute format(' with new_entries as ( select t.dir_id, array_agg(i.id) as entries from tmp_directory_entry_%1$s t inner join directory_entry_%1$s i using (target, name, perms) group by t.dir_id ) update tmp_directory as d set %1$s_entries = new_entries.entries from new_entries where d.id = new_entries.dir_id ', typ); return; end $$; -- Insert the data from tmp_directory, tmp_directory_entry_file, -- tmp_directory_entry_dir, tmp_directory_entry_rev into their final -- tables. -- -- Prerequisites: -- directory ids in tmp_directory -- entries in tmp_directory_entry_{file,dir,rev} -- create or replace function swh_directory_add() returns void language plpgsql as $$ begin perform swh_directory_entry_add('file'); perform swh_directory_entry_add('dir'); perform swh_directory_entry_add('rev'); insert into directory select * from tmp_directory t where not exists ( select 1 from directory d where d.id = t.id); return; end $$; -- a directory listing entry with all the metadata -- -- can be used to list a directory, and retrieve all the data in one go. create type directory_entry as ( dir_id sha1_git, -- id of the parent directory type directory_entry_type, -- type of entry target sha1_git, -- id of target name unix_path, -- path name, relative to containing dir perms file_perms, -- unix-like permissions status content_status, -- visible or absent sha1 sha1, -- content if sha1 if type is not dir sha1_git sha1_git, -- content's sha1 git if type is not dir sha256 sha256, -- content's sha256 if type is not dir length bigint -- content length if type is not dir ); -- List a single level of directory walked_dir_id -- FIXME: order by name is not correct. For git, we need to order by -- lexicographic order but as if a trailing / is present in directory -- name create or replace function swh_directory_walk_one(walked_dir_id sha1_git) returns setof directory_entry language sql stable as $$ with dir as ( select id as dir_id, dir_entries, file_entries, rev_entries from directory where id = walked_dir_id), ls_d as (select dir_id, unnest(dir_entries) as entry_id from dir), ls_f as (select dir_id, unnest(file_entries) as entry_id from dir), ls_r as (select dir_id, unnest(rev_entries) as entry_id from dir) (select dir_id, 'dir'::directory_entry_type as type, e.target, e.name, e.perms, NULL::content_status, NULL::sha1, NULL::sha1_git, NULL::sha256, NULL::bigint from ls_d left join directory_entry_dir e on ls_d.entry_id = e.id) union (select dir_id, 'file'::directory_entry_type as type, e.target, e.name, e.perms, c.status, c.sha1, c.sha1_git, c.sha256, c.length from ls_f left join directory_entry_file e on ls_f.entry_id = e.id left join content c on e.target = c.sha1_git) union (select dir_id, 'rev'::directory_entry_type as type, e.target, e.name, e.perms, NULL::content_status, NULL::sha1, NULL::sha1_git, NULL::sha256, NULL::bigint from ls_r left join directory_entry_rev e on ls_r.entry_id = e.id) order by name; $$; -- List recursively the revision directory arborescence create or replace function swh_directory_walk(walked_dir_id sha1_git) returns setof directory_entry language sql stable as $$ with recursive entries as ( select dir_id, type, target, name, perms, status, sha1, sha1_git, sha256, length from swh_directory_walk_one(walked_dir_id) union all select dir_id, type, target, (dirname || '/' || name)::unix_path as name, perms, status, sha1, sha1_git, sha256, length from (select (swh_directory_walk_one(dirs.target)).*, dirs.name as dirname from (select target, name from entries where type = 'dir') as dirs) as with_parent ) select dir_id, type, target, name, perms, status, sha1, sha1_git, sha256, length from entries $$; create or replace function swh_revision_walk(revision_id sha1_git) returns setof directory_entry language sql stable as $$ select dir_id, type, target, name, perms, status, sha1, sha1_git, sha256, length from swh_directory_walk((select directory from revision where id=revision_id)) $$; COMMENT ON FUNCTION swh_revision_walk(sha1_git) IS 'Recursively list the revision targeted directory arborescence'; -- Find a directory entry by its path create or replace function swh_find_directory_entry_by_path( walked_dir_id sha1_git, dir_or_content_path bytea[]) returns directory_entry language plpgsql as $$ declare end_index integer; paths bytea default ''; path bytea; res bytea[]; r record; begin end_index := array_upper(dir_or_content_path, 1); res[1] := walked_dir_id; for i in 1..end_index loop path := dir_or_content_path[i]; -- concatenate path for patching the name in the result record (if we found it) if i = 1 then paths = path; else paths := paths || '/' || path; -- concatenate paths end if; if i <> end_index then select * from swh_directory_walk_one(res[i] :: sha1_git) where name=path and type = 'dir' limit 1 into r; else select * from swh_directory_walk_one(res[i] :: sha1_git) where name=path limit 1 into r; end if; -- find the path if r is null then return null; else -- store the next dir to lookup the next local path from res[i+1] := r.target; end if; end loop; -- at this moment, r is the result. Patch its 'name' with the full path before returning it. r.name := paths; return r; end $$; -- List all revision IDs starting from a given revision, going back in time -- -- TODO ordering: should be breadth-first right now (what do we want?) -- TODO ordering: ORDER BY parent_rank somewhere? create or replace function swh_revision_list(root_revisions bytea[], num_revs bigint default NULL) returns table (id sha1_git, parents bytea[]) language sql stable as $$ with recursive full_rev_list(id) as ( (select id from revision where id = ANY(root_revisions)) union (select h.parent_id from revision_history as h join full_rev_list on h.id = full_rev_list.id) ), rev_list as (select id from full_rev_list limit num_revs) select rev_list.id as id, array(select rh.parent_id::bytea from revision_history rh where rh.id = rev_list.id order by rh.parent_rank ) as parent from rev_list; $$; -- List all the children of a given revision create or replace function swh_revision_list_children(root_revisions bytea[], num_revs bigint default NULL) returns table (id sha1_git, parents bytea[]) language sql stable as $$ with recursive full_rev_list(id) as ( (select id from revision where id = ANY(root_revisions)) union (select h.id from revision_history as h join full_rev_list on h.parent_id = full_rev_list.id) ), rev_list as (select id from full_rev_list limit num_revs) select rev_list.id as id, array(select rh.parent_id::bytea from revision_history rh where rh.id = rev_list.id order by rh.parent_rank ) as parent from rev_list; $$; -- Detailed entry for a revision create type revision_entry as ( id sha1_git, date timestamptz, date_offset smallint, date_neg_utc_offset boolean, committer_date timestamptz, committer_date_offset smallint, committer_date_neg_utc_offset boolean, type revision_type, directory sha1_git, message bytea, author_id bigint, author_fullname bytea, author_name bytea, author_email bytea, committer_id bigint, committer_fullname bytea, committer_name bytea, committer_email bytea, metadata jsonb, synthetic boolean, parents bytea[], object_id bigint ); -- "git style" revision log. Similar to swh_revision_list(), but returning all -- information associated to each revision, and expanding authors/committers create or replace function swh_revision_log(root_revisions bytea[], num_revs bigint default NULL) returns setof revision_entry language sql stable as $$ select t.id, r.date, r.date_offset, r.date_neg_utc_offset, r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset, r.type, r.directory, r.message, a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic, t.parents, r.object_id from swh_revision_list(root_revisions, num_revs) as t left join revision r on t.id = r.id left join person a on a.id = r.author left join person c on c.id = r.committer; $$; -- Retrieve revisions from tmp_bytea in bulk create or replace function swh_revision_get() returns setof revision_entry language plpgsql as $$ begin return query select r.id, r.date, r.date_offset, r.date_neg_utc_offset, r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset, r.type, r.directory, r.message, a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic, array(select rh.parent_id::bytea from revision_history rh where rh.id = t.id order by rh.parent_rank) as parents, r.object_id from tmp_bytea t left join revision r on t.id = r.id left join person a on a.id = r.author left join person c on c.id = r.committer; return; end $$; -- List missing revisions from tmp_bytea create or replace function swh_revision_missing() returns setof sha1_git language plpgsql as $$ begin return query select id::sha1_git from tmp_bytea t where not exists ( select 1 from revision r where r.id = t.id); return; end $$; -- Detailed entry for a release create type release_entry as ( id sha1_git, target sha1_git, target_type object_type, date timestamptz, date_offset smallint, date_neg_utc_offset boolean, name bytea, comment bytea, synthetic boolean, author_id bigint, author_fullname bytea, author_name bytea, author_email bytea, object_id bigint ); -- Detailed entry for release create or replace function swh_release_get() returns setof release_entry language plpgsql as $$ begin return query select r.id, r.target, r.target_type, r.date, r.date_offset, r.date_neg_utc_offset, r.name, r.comment, r.synthetic, p.id as author_id, p.fullname as author_fullname, p.name as author_name, p.email as author_email, r.object_id from tmp_bytea t inner join release r on t.id = r.id inner join person p on p.id = r.author; return; end $$; -- Create entries in person from tmp_revision create or replace function swh_person_add_from_revision() returns void language plpgsql as $$ begin with t as ( select author_fullname as fullname, author_name as name, author_email as email from tmp_revision union select committer_fullname as fullname, committer_name as name, committer_email as email from tmp_revision ) insert into person (fullname, name, email) select distinct fullname, name, email from t where not exists ( select 1 from person p where t.fullname = p.fullname ); return; end $$; -- Create entries in revision from tmp_revision create or replace function swh_revision_add() returns void language plpgsql as $$ begin perform swh_person_add_from_revision(); insert into revision (id, date, date_offset, date_neg_utc_offset, committer_date, committer_date_offset, committer_date_neg_utc_offset, type, directory, message, author, committer, metadata, synthetic) select t.id, t.date, t.date_offset, t.date_neg_utc_offset, t.committer_date, t.committer_date_offset, t.committer_date_neg_utc_offset, t.type, t.directory, t.message, a.id, c.id, t.metadata, t.synthetic from tmp_revision t left join person a on a.fullname = t.author_fullname left join person c on c.fullname = t.committer_fullname; return; end $$; -- List missing releases from tmp_bytea create or replace function swh_release_missing() returns setof sha1_git language plpgsql as $$ begin return query select id::sha1_git from tmp_bytea t where not exists ( select 1 from release r where r.id = t.id); end $$; -- Create entries in person from tmp_release create or replace function swh_person_add_from_release() returns void language plpgsql as $$ begin with t as ( select distinct author_fullname as fullname, author_name as name, author_email as email from tmp_release ) insert into person (fullname, name, email) select fullname, name, email from t where not exists ( select 1 from person p where t.fullname = p.fullname ); return; end $$; -- Create entries in release from tmp_release create or replace function swh_release_add() returns void language plpgsql as $$ begin perform swh_person_add_from_release(); insert into release (id, target, target_type, date, date_offset, date_neg_utc_offset, name, comment, author, synthetic) select t.id, t.target, t.target_type, t.date, t.date_offset, t.date_neg_utc_offset, t.name, t.comment, a.id, t.synthetic from tmp_release t left join person a on a.fullname = t.author_fullname; return; end $$; create or replace function swh_occurrence_update_for_origin(origin_id bigint) returns void language sql as $$ delete from occurrence where origin = origin_id; insert into occurrence (origin, branch, target, target_type) select origin, branch, target, target_type from occurrence_history where origin = origin_id and (select visit from origin_visit where origin = origin_id order by date desc limit 1) = any(visits); $$; create or replace function swh_occurrence_update_all() returns void language plpgsql as $$ declare origin_id origin.id%type; begin for origin_id in select distinct id from origin loop perform swh_occurrence_update_for_origin(origin_id); end loop; return; end; $$; -- add a new origin_visit for origin origin_id at date. -- -- Returns the new visit id. create or replace function swh_origin_visit_add(origin_id bigint, date timestamptz) returns bigint language sql as $$ with last_known_visit as ( select coalesce(max(visit), 0) as visit from origin_visit where origin = origin_id ) insert into origin_visit (origin, date, visit, status) values (origin_id, date, (select visit from last_known_visit) + 1, 'ongoing') returning visit; $$; -- add tmp_occurrence_history entries to occurrence_history -- -- operates in bulk: 0. swh_mktemp(occurrence_history), 1. COPY to tmp_occurrence_history, -- 2. call this function create or replace function swh_occurrence_history_add() returns void language plpgsql as $$ declare origin_id origin.id%type; begin -- Create or update occurrence_history with occurrence_history_id_visit as ( select tmp_occurrence_history.*, object_id, visits from tmp_occurrence_history left join occurrence_history using(origin, branch, target, target_type) ), occurrences_to_update as ( select object_id, visit from occurrence_history_id_visit where object_id is not null ), update_occurrences as ( update occurrence_history set visits = array(select unnest(occurrence_history.visits) as e union select occurrences_to_update.visit as e order by e) from occurrences_to_update where occurrence_history.object_id = occurrences_to_update.object_id ) insert into occurrence_history (origin, branch, target, target_type, visits) select origin, branch, target, target_type, ARRAY[visit] from occurrence_history_id_visit where object_id is null; -- update occurrence for origin_id in select distinct origin from tmp_occurrence_history loop perform swh_occurrence_update_for_origin(origin_id); end loop; return; end $$; create or replace function swh_snapshot_add(origin bigint, visit bigint, snapshot_id snapshot.id%type) returns void language plpgsql as $$ declare snapshot_object_id snapshot.object_id%type; begin select object_id from snapshot where id = snapshot_id into snapshot_object_id; if snapshot_object_id is null then insert into snapshot (id) values (snapshot_id) returning object_id into snapshot_object_id; insert into snapshot_branch (name, target_type, target) select name, target_type, target from tmp_snapshot_branch tmp where not exists ( select 1 from snapshot_branch sb where sb.name = tmp.name and sb.target = tmp.target and sb.target_type = tmp.target_type ) on conflict do nothing; insert into snapshot_branches (snapshot_id, branch_id) select snapshot_object_id, sb.object_id as branch_id from tmp_snapshot_branch tmp join snapshot_branch sb using (name, target, target_type) where tmp.target is not null and tmp.target_type is not null union select snapshot_object_id, sb.object_id as branch_id from tmp_snapshot_branch tmp join snapshot_branch sb using (name) where tmp.target is null and tmp.target_type is null and sb.target is null and sb.target_type is null; end if; update origin_visit ov set snapshot_id = snapshot_object_id where ov.origin=swh_snapshot_add.origin and ov.visit=swh_snapshot_add.visit; end; $$; create type snapshot_result as ( snapshot_id sha1_git, name bytea, target bytea, target_type snapshot_target ); create or replace function swh_snapshot_get_by_id(id snapshot.id%type) returns setof snapshot_result language sql stable as $$ select swh_snapshot_get_by_id.id as snapshot_id, name, target, target_type from snapshot_branches inner join snapshot_branch on snapshot_branches.branch_id = snapshot_branch.object_id where snapshot_id = (select object_id from snapshot where snapshot.id = swh_snapshot_get_by_id.id) $$; create or replace function swh_snapshot_get_by_origin_visit(origin_id bigint, visit_id bigint) returns snapshot.id%type language sql stable as $$ select snapshot.id from origin_visit left join snapshot on snapshot.object_id = origin_visit.snapshot_id where origin_visit.origin=origin_id and origin_visit.visit=visit_id; $$; -- Absolute path: directory reference + complete path relative to it create type content_dir as ( directory sha1_git, path unix_path ); -- Find the containing directory of a given content, specified by sha1 -- (note: *not* sha1_git). -- -- Return a pair (dir_it, path) where path is a UNIX path that, from the -- directory root, reach down to a file with the desired content. Return NULL -- if no match is found. -- -- In case of multiple paths (i.e., pretty much always), an arbitrary one is -- chosen. create or replace function swh_content_find_directory(content_id sha1) returns content_dir language sql stable as $$ with recursive path as ( -- Recursively build a path from the requested content to a root -- directory. Each iteration returns a pair (dir_id, filename) where -- filename is relative to dir_id. Stops when no parent directory can -- be found. (select dir.id as dir_id, dir_entry_f.name as name, 0 as depth from directory_entry_file as dir_entry_f join content on content.sha1_git = dir_entry_f.target join directory as dir on dir.file_entries @> array[dir_entry_f.id] where content.sha1 = content_id limit 1) union all (select dir.id as dir_id, (dir_entry_d.name || '/' || path.name)::unix_path as name, path.depth + 1 from path join directory_entry_dir as dir_entry_d on dir_entry_d.target = path.dir_id join directory as dir on dir.dir_entries @> array[dir_entry_d.id] limit 1) ) select dir_id, name from path order by depth desc limit 1; $$; -- Walk the revision history starting from a given revision, until a matching -- occurrence is found. Return all occurrence information if one is found, NULL -- otherwise. create or replace function swh_revision_find_occurrence(revision_id sha1_git) returns occurrence language sql stable as $$ select origin, branch, target, target_type from swh_revision_list_children(ARRAY[revision_id] :: bytea[]) as rev_list left join occurrence_history occ_hist on rev_list.id = occ_hist.target where occ_hist.origin is not null and occ_hist.target_type = 'revision' limit 1; $$; -- Find the visit of origin id closest to date visit_date create or replace function swh_visit_find_by_date(origin bigint, visit_date timestamptz default NOW()) returns origin_visit language sql stable as $$ with closest_two_visits as (( select ov, (date - visit_date) as interval from origin_visit ov where ov.origin = origin and ov.date >= visit_date order by ov.date asc limit 1 ) union ( select ov, (visit_date - date) as interval from origin_visit ov where ov.origin = origin and ov.date < visit_date order by ov.date desc limit 1 )) select (ov).* from closest_two_visits order by interval limit 1 $$; -- Find the visit of origin id closest to date visit_date create or replace function swh_visit_get(origin bigint) returns origin_visit language sql stable as $$ select * from origin_visit where origin=origin order by date desc $$; -- Retrieve occurrence by filtering on origin_id and optionally on -- branch_name and/or validity range create or replace function swh_occurrence_get_by( origin_id bigint, branch_name bytea default NULL, date timestamptz default NULL) returns setof occurrence_history language plpgsql as $$ declare filters text[] := array[] :: text[]; -- AND-clauses used to filter content visit_id bigint; q text; begin if origin_id is null then raise exception 'Needs an origin_id to get an occurrence.'; end if; filters := filters || format('origin = %L', origin_id); if branch_name is not null then filters := filters || format('branch = %L', branch_name); end if; if date is not null then select visit from swh_visit_find_by_date(origin_id, date) into visit_id; else select visit from origin_visit where origin = origin_id order by origin_visit.date desc limit 1 into visit_id; end if; if visit_id is null then return; end if; filters := filters || format('%L = any(visits)', visit_id); q = format('select * from occurrence_history where %s', array_to_string(filters, ' and ')); return query execute q; end $$; -- Retrieve revisions by occurrence criterion filtering create or replace function swh_revision_get_by( origin_id bigint, branch_name bytea default NULL, date timestamptz default NULL) returns setof revision_entry language sql stable as $$ select r.id, r.date, r.date_offset, r.date_neg_utc_offset, r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset, r.type, r.directory, r.message, a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic, array(select rh.parent_id::bytea from revision_history rh where rh.id = r.id order by rh.parent_rank ) as parents, r.object_id from swh_occurrence_get_by(origin_id, branch_name, date) as occ inner join revision r on occ.target = r.id left join person a on a.id = r.author left join person c on c.id = r.committer; $$; -- Retrieve a release by occurrence criterion create or replace function swh_release_get_by( origin_id bigint) returns setof release_entry language sql stable as $$ select r.id, r.target, r.target_type, r.date, r.date_offset, r.date_neg_utc_offset, r.name, r.comment, r.synthetic, a.id as author_id, a.fullname as author_fullname, a.name as author_name, a.email as author_email, r.object_id from release r inner join occurrence_history occ on occ.target = r.target left join person a on a.id = r.author where occ.origin = origin_id and occ.target_type = 'revision' and r.target_type = 'revision'; $$; create type object_found as ( sha1_git sha1_git, type object_type, id bytea, -- sha1 or sha1_git depending on object_type object_id bigint ); -- Find objects by sha1_git, return their type and their main identifier create or replace function swh_object_find_by_sha1_git() returns setof object_found language plpgsql as $$ begin return query with known_objects as (( select id as sha1_git, 'release'::object_type as type, id, object_id from release r where exists (select 1 from tmp_bytea t where t.id = r.id) ) union all ( select id as sha1_git, 'revision'::object_type as type, id, object_id from revision r where exists (select 1 from tmp_bytea t where t.id = r.id) ) union all ( select id as sha1_git, 'directory'::object_type as type, id, object_id from directory d where exists (select 1 from tmp_bytea t where t.id = d.id) ) union all ( select sha1_git as sha1_git, 'content'::object_type as type, sha1 as id, object_id from content c where exists (select 1 from tmp_bytea t where t.id = c.sha1_git) )) select t.id::sha1_git as sha1_git, k.type, k.id, k.object_id from tmp_bytea t left join known_objects k on t.id = k.sha1_git; end $$; -- Create entries in entity_history from tmp_entity_history -- -- TODO: do something smarter to compress the entries if the data -- didn't change. create or replace function swh_entity_history_add() returns void language plpgsql as $$ begin insert into entity_history ( uuid, parent, name, type, description, homepage, active, generated, lister_metadata, metadata, validity ) select * from tmp_entity_history; return; end $$; create or replace function swh_update_entity_from_entity_history() returns trigger language plpgsql as $$ begin insert into entity (uuid, parent, name, type, description, homepage, active, generated, lister_metadata, metadata, last_seen, last_id) select uuid, parent, name, type, description, homepage, active, generated, lister_metadata, metadata, unnest(validity), id from entity_history where uuid = NEW.uuid order by unnest(validity) desc limit 1 on conflict (uuid) do update set parent = EXCLUDED.parent, name = EXCLUDED.name, type = EXCLUDED.type, description = EXCLUDED.description, homepage = EXCLUDED.homepage, active = EXCLUDED.active, generated = EXCLUDED.generated, lister_metadata = EXCLUDED.lister_metadata, metadata = EXCLUDED.metadata, last_seen = EXCLUDED.last_seen, last_id = EXCLUDED.last_id; return null; end $$; create trigger update_entity after insert or update on entity_history for each row execute procedure swh_update_entity_from_entity_history(); -- map an id of tmp_entity_lister to a full entity create type entity_id as ( id bigint, uuid uuid, parent uuid, name text, type entity_type, description text, homepage text, active boolean, generated boolean, lister_metadata jsonb, metadata jsonb, last_seen timestamptz, last_id bigint ); -- find out the uuid of the entries of entity with the metadata -- contained in tmp_entity_lister create or replace function swh_entity_from_tmp_entity_lister() returns setof entity_id language plpgsql as $$ begin return query select t.id, e.* from tmp_entity_lister t left join entity e on e.lister_metadata @> t.lister_metadata; return; end $$; create or replace function swh_entity_get(entity_uuid uuid) returns setof entity language sql stable as $$ with recursive entity_hierarchy as ( select e.* from entity e where uuid = entity_uuid union select p.* from entity_hierarchy e join entity p on e.parent = p.uuid ) select * from entity_hierarchy; $$; -- Object listing by object_id create or replace function swh_content_list_by_object_id( min_excl bigint, max_incl bigint ) returns setof content language sql stable as $$ select * from content where object_id > min_excl and object_id <= max_incl order by object_id; $$; create or replace function swh_revision_list_by_object_id( min_excl bigint, max_incl bigint ) returns setof revision_entry language sql stable as $$ with revs as ( select * from revision where object_id > min_excl and object_id <= max_incl ) select r.id, r.date, r.date_offset, r.date_neg_utc_offset, r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset, r.type, r.directory, r.message, a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic, array(select rh.parent_id::bytea from revision_history rh where rh.id = r.id order by rh.parent_rank) as parents, r.object_id from revs r left join person a on a.id = r.author left join person c on c.id = r.committer order by r.object_id; $$; create or replace function swh_release_list_by_object_id( min_excl bigint, max_incl bigint ) returns setof release_entry language sql stable as $$ with rels as ( select * from release where object_id > min_excl and object_id <= max_incl ) select r.id, r.target, r.target_type, r.date, r.date_offset, r.date_neg_utc_offset, r.name, r.comment, r.synthetic, p.id as author_id, p.fullname as author_fullname, p.name as author_name, p.email as author_email, r.object_id from rels r left join person p on p.id = r.author order by r.object_id; $$; create or replace function swh_occurrence_by_origin_visit(origin_id bigint, visit_id bigint) returns setof occurrence language sql stable as $$ select origin, branch, target, target_type from occurrence_history where origin = origin_id and visit_id = ANY(visits); $$; -- end revision_metadata functions -- origin_metadata functions create type origin_metadata_signature as ( id bigint, origin_id bigint, discovery_date timestamptz, tool_id bigint, metadata jsonb, provider_id integer, provider_name text, provider_type text, provider_url text ); create or replace function swh_origin_metadata_get_by_origin( origin integer) returns setof origin_metadata_signature language sql stable as $$ select om.id as id, origin_id, discovery_date, tool_id, om.metadata, mp.id as provider_id, provider_name, provider_type, provider_url from origin_metadata as om inner join metadata_provider mp on om.provider_id = mp.id where om.origin_id = origin order by discovery_date desc; $$; create or replace function swh_origin_metadata_get_by_provider_type( origin integer, type text) returns setof origin_metadata_signature language sql stable as $$ select om.id as id, origin_id, discovery_date, tool_id, om.metadata, mp.id as provider_id, provider_name, provider_type, provider_url from origin_metadata as om inner join metadata_provider mp on om.provider_id = mp.id where om.origin_id = origin and mp.provider_type = type order by discovery_date desc; $$; -- end origin_metadata functions -- add tmp_tool entries to tool, -- skipping duplicates if any. -- -- operates in bulk: 0. create temporary tmp_tool, 1. COPY to -- it, 2. call this function to insert and filtering out duplicates create or replace function swh_tool_add() returns setof tool language plpgsql as $$ begin insert into tool(name, version, configuration) select name, version, configuration from tmp_tool tmp on conflict(name, version, configuration) do nothing; return query select id, name, version, configuration from tmp_tool join tool using(name, version, configuration); return; end $$; -- simple counter mapping a textual label to an integer value create type counter as ( label text, value bigint ); -- return statistics about the number of tuples in various SWH tables -- -- Note: the returned values are based on postgres internal statistics -- (pg_class table), which are only updated daily (by autovacuum) or so create or replace function swh_stat_counters() returns setof counter language sql stable as $$ select object_type as label, value as value from object_counts where object_type in ( 'content', 'directory', 'directory_entry_dir', 'directory_entry_file', 'directory_entry_rev', 'occurrence', 'occurrence_history', 'origin', 'origin_visit', 'person', 'entity', 'entity_history', 'release', 'revision', 'revision_history', 'skipped_content' ); $$; create or replace function swh_update_counter(object_type text) returns void language plpgsql as $$ begin execute format(' insert into object_counts (value, last_update, object_type) values ((select count(*) from %1$I), NOW(), %1$L) on conflict (object_type) do update set value = excluded.value, last_update = excluded.last_update', object_type); return; end; $$; + +create or replace function swh_update_counter_bucketed() + returns void + language plpgsql +as $$ +declare + query text; + line_to_update int; + new_value bigint; +begin + select + object_counts_bucketed.line, + format( + 'select count(%I) from %I where %s', + coalesce(identifier, '*'), + object_type, + coalesce( + concat_ws( + ' and ', + case when bucket_start is not null then + format('%I >= %L', identifier, bucket_start) -- lower bound condition, inclusive + end, + case when bucket_end is not null then + format('%I < %L', identifier, bucket_end) -- upper bound condition, exclusive + end + ), + 'true' + ) + ) + from object_counts_bucketed + order by coalesce(last_update, now() - '1 month'::interval) asc + limit 1 + into line_to_update, query; + + execute query into new_value; + + update object_counts_bucketed + set value = new_value, + last_update = now() + where object_counts_bucketed.line = line_to_update; + +END +$$; + +create or replace function swh_update_counters_from_buckets() + returns trigger + language plpgsql +as $$ +begin +with to_update as ( + select object_type, sum(value) as value, max(last_update) as last_update + from object_counts_bucketed ob1 + where not exists ( + select 1 from object_counts_bucketed ob2 + where ob1.object_type = ob2.object_type + and value is null + ) + group by object_type +) update object_counts + set + value = to_update.value, + last_update = to_update.last_update + from to_update + where + object_counts.object_type = to_update.object_type + and object_counts.value != to_update.value; +return null; +end +$$; + +create trigger update_counts_from_bucketed + after insert or update + on object_counts_bucketed + for each row + when (NEW.line % 256 = 0) + execute procedure swh_update_counters_from_buckets(); diff --git a/sql/swh-indexes.sql b/sql/swh-indexes.sql index 5987088a5..ef2038ffb 100644 --- a/sql/swh-indexes.sql +++ b/sql/swh-indexes.sql @@ -1,253 +1,257 @@ -- content create unique index concurrently content_pkey on content(sha1); create unique index concurrently on content(sha1_git); create index concurrently on content(sha256); create index concurrently on content(blake2s256); create index concurrently on content(ctime); -- TODO use a BRIN index here (postgres >= 9.5) create unique index concurrently on content(object_id); alter table content add primary key using index content_pkey; -- entity_history create unique index concurrently entity_history_pkey on entity_history(id); create index concurrently on entity_history(uuid); create index concurrently on entity_history(name); alter table entity_history add primary key using index entity_history_pkey; -- entity create unique index concurrently entity_pkey on entity(uuid); create index concurrently on entity(name); create index concurrently on entity using gin(lister_metadata jsonb_path_ops); alter table entity add primary key using index entity_pkey; alter table entity add constraint entity_parent_fkey foreign key (parent) references entity(uuid) deferrable initially deferred not valid; alter table entity validate constraint entity_parent_fkey; alter table entity add constraint entity_last_id_fkey foreign key (last_id) references entity_history(id) not valid; alter table entity validate constraint entity_last_id_fkey; -- entity_equivalence create unique index concurrently entity_equivalence_pkey on entity_equivalence(entity1, entity2); alter table entity_equivalence add primary key using index entity_equivalence_pkey; alter table entity_equivalence add constraint "entity_equivalence_entity1_fkey" foreign key (entity1) references entity(uuid) not valid; alter table entity_equivalence validate constraint entity_equivalence_entity1_fkey; alter table entity_equivalence add constraint "entity_equivalence_entity2_fkey" foreign key (entity2) references entity(uuid) not valid; alter table entity_equivalence validate constraint entity_equivalence_entity2_fkey; alter table entity_equivalence add constraint "order_entities" check (entity1 < entity2) not valid; alter table entity_equivalence validate constraint order_entities; -- listable_entity create unique index concurrently listable_entity_pkey on listable_entity(uuid); alter table listable_entity add primary key using index listable_entity_pkey; alter table listable_entity add constraint listable_entity_uuid_fkey foreign key (uuid) references entity(uuid) not valid; alter table listable_entity validate constraint listable_entity_uuid_fkey; -- list_history create unique index concurrently list_history_pkey on list_history(id); alter table list_history add primary key using index list_history_pkey; alter table list_history add constraint list_history_entity_fkey foreign key (entity) references listable_entity(uuid) not valid; alter table list_history validate constraint list_history_entity_fkey; -- origin create unique index concurrently origin_pkey on origin(id); alter table origin add primary key using index origin_pkey; create index concurrently on origin(type, url); alter table origin add constraint origin_lister_fkey foreign key (lister) references listable_entity(uuid) not valid; alter table origin validate constraint origin_lister_fkey; alter table origin add constraint origin_project_fkey foreign key (project) references entity(uuid) not valid; alter table origin validate constraint origin_project_fkey; -- skipped_content alter table skipped_content add constraint skipped_content_sha1_sha1_git_sha256_key unique (sha1, sha1_git, sha256); create index concurrently on skipped_content(sha1); create index concurrently on skipped_content(sha1_git); create index concurrently on skipped_content(sha256); create index concurrently on skipped_content(blake2s256); create unique index concurrently on skipped_content(object_id); alter table skipped_content add constraint skipped_content_origin_fkey foreign key (origin) references origin(id) not valid; alter table skipped_content validate constraint skipped_content_origin_fkey; -- fetch_history create unique index concurrently fetch_history_pkey on fetch_history(id); alter table fetch_history add primary key using index fetch_history_pkey; alter table fetch_history add constraint fetch_history_origin_fkey foreign key (origin) references origin(id) not valid; alter table fetch_history validate constraint fetch_history_origin_fkey; -- directory create unique index concurrently directory_pkey on directory(id); alter table directory add primary key using index directory_pkey; create index concurrently on directory using gin (dir_entries); create index concurrently on directory using gin (file_entries); create index concurrently on directory using gin (rev_entries); create unique index concurrently on directory(object_id); -- directory_entry_dir create unique index concurrently directory_entry_dir_pkey on directory_entry_dir(id); alter table directory_entry_dir add primary key using index directory_entry_dir_pkey; create unique index concurrently on directory_entry_dir(target, name, perms); -- directory_entry_file create unique index concurrently directory_entry_file_pkey on directory_entry_file(id); alter table directory_entry_file add primary key using index directory_entry_file_pkey; create unique index concurrently on directory_entry_file(target, name, perms); -- directory_entry_rev create unique index concurrently directory_entry_rev_pkey on directory_entry_rev(id); alter table directory_entry_rev add primary key using index directory_entry_rev_pkey; create unique index concurrently on directory_entry_rev(target, name, perms); -- person create unique index concurrently person_pkey on person(id); alter table person add primary key using index person_pkey; create unique index concurrently on person(fullname); create index concurrently on person(name); create index concurrently on person(email); -- revision create unique index concurrently revision_pkey on revision(id); alter table revision add primary key using index revision_pkey; alter table revision add constraint revision_author_fkey foreign key (author) references person(id) not valid; alter table revision validate constraint revision_author_fkey; alter table revision add constraint revision_committer_fkey foreign key (committer) references person(id) not valid; alter table revision validate constraint revision_committer_fkey; create index concurrently on revision(directory); create unique index concurrently on revision(object_id); -- revision_history create unique index concurrently revision_history_pkey on revision_history(id, parent_rank); alter table revision_history add primary key using index revision_history_pkey; create index concurrently on revision_history(parent_id); alter table revision_history add constraint revision_history_id_fkey foreign key (id) references revision(id) not valid; alter table revision_history validate constraint revision_history_id_fkey; -- snapshot create unique index concurrently snapshot_pkey on snapshot(object_id); alter table snapshot add primary key using index snapshot_pkey; create unique index concurrently on snapshot(id); -- snapshot_branch create unique index concurrently snapshot_branch_pkey on snapshot_branch(object_id); alter table snapshot_branch add primary key using index snapshot_branch_pkey; create unique index concurrently on snapshot_branch (target_type, target, name); alter table snapshot_branch add constraint snapshot_branch_target_check check ((target_type is null) = (target is null)) not valid; alter table snapshot_branch validate constraint snapshot_branch_target_check; alter table snapshot_branch add constraint snapshot_target_check check (target_type not in ('content', 'directory', 'revision', 'release', 'snapshot') or length(target) = 20) not valid; alter table snapshot_branch validate constraint snapshot_target_check; create unique index concurrently on snapshot_branch (name) where target_type is null and target is null; -- snapshot_branches create unique index concurrently snapshot_branches_pkey on snapshot_branches(snapshot_id, branch_id); alter table snapshot_branches add primary key using index snapshot_branches_pkey; alter table snapshot_branches add constraint snapshot_branches_snapshot_id_fkey foreign key (snapshot_id) references snapshot(object_id) not valid; alter table snapshot_branches validate constraint snapshot_branches_snapshot_id_fkey; alter table snapshot_branches add constraint snapshot_branches_branch_id_fkey foreign key (branch_id) references snapshot_branch(object_id) not valid; alter table snapshot_branches validate constraint snapshot_branches_branch_id_fkey; -- origin_visit create unique index concurrently origin_visit_pkey on origin_visit(origin, visit); alter table origin_visit add primary key using index origin_visit_pkey; create index concurrently on origin_visit(date); alter table origin_visit add constraint origin_visit_origin_fkey foreign key (origin) references origin(id) not valid; alter table origin_visit validate constraint origin_visit_origin_fkey; alter table origin_visit add constraint origin_visit_snapshot_id_fkey foreign key (snapshot_id) references snapshot(object_id) not valid; alter table origin_visit validate constraint origin_visit_snapshot_id_fkey; -- occurrence_history create unique index concurrently occurrence_history_pkey on occurrence_history(object_id); alter table occurrence_history add primary key using index occurrence_history_pkey; create index concurrently on occurrence_history(target, target_type); create index concurrently on occurrence_history(origin, branch); create unique index concurrently on occurrence_history(origin, branch, target, target_type); alter table occurrence_history add constraint occurrence_history_origin_fkey foreign key (origin) references origin(id) not valid; alter table occurrence_history validate constraint occurrence_history_origin_fkey; -- occurrence create unique index concurrently occurrence_pkey on occurrence(origin, branch); alter table occurrence add primary key using index occurrence_pkey; alter table occurrence add constraint occurrence_origin_fkey foreign key (origin) references origin(id) not valid; alter table occurrence validate constraint occurrence_origin_fkey; -- release create unique index concurrently release_pkey on release(id); alter table release add primary key using index release_pkey; create index concurrently on release(target, target_type); create unique index concurrently on release(object_id); alter table release add constraint release_author_fkey foreign key (author) references person(id) not valid; alter table release validate constraint release_author_fkey; -- tool create unique index tool_pkey on tool(id); alter table tool add primary key using index tool_pkey; create unique index on tool(name, version, configuration); -- metadata_provider create unique index concurrently metadata_provider_pkey on metadata_provider(id); alter table metadata_provider add primary key using index metadata_provider_pkey; create index concurrently on metadata_provider(provider_name, provider_url); -- origin_metadata create unique index concurrently origin_metadata_pkey on origin_metadata(id); alter table origin_metadata add primary key using index origin_metadata_pkey; create index concurrently on origin_metadata(origin_id, provider_id, tool_id); alter table origin_metadata add constraint origin_metadata_origin_fkey foreign key (origin_id) references origin(id) not valid; alter table origin_metadata validate constraint origin_metadata_origin_fkey; alter table origin_metadata add constraint origin_metadata_provider_fkey foreign key (provider_id) references metadata_provider(id) not valid; alter table origin_metadata validate constraint origin_metadata_provider_fkey; alter table origin_metadata add constraint origin_metadata_tool_fkey foreign key (tool_id) references tool(id) not valid; alter table origin_metadata validate constraint origin_metadata_tool_fkey; -- object_counts create unique index concurrently object_counts_pkey on object_counts(object_type); alter table object_counts add primary key using index object_counts_pkey; + +-- object_counts_bucketed +create unique index concurrently object_counts_bucketed_pkey on object_counts_bucketed(line); +alter table object_counts_bucketed add primary key using index object_counts_bucketed_pkey; diff --git a/sql/swh-schema.sql b/sql/swh-schema.sql index df4553d28..a9e2564a9 100644 --- a/sql/swh-schema.sql +++ b/sql/swh-schema.sql @@ -1,434 +1,445 @@ --- --- Software Heritage Data Model --- -- drop schema if exists swh cascade; -- create schema swh; -- set search_path to swh; create table dbversion ( version int primary key, release timestamptz, description text ); insert into dbversion(version, release, description) - values(117, now(), 'Work In Progress'); + values(118, now(), 'Work In Progress'); -- a SHA1 checksum (not necessarily originating from Git) create domain sha1 as bytea check (length(value) = 20); -- a Git object ID, i.e., a SHA1 checksum create domain sha1_git as bytea check (length(value) = 20); -- a SHA256 checksum create domain sha256 as bytea check (length(value) = 32); -- a blake2 checksum create domain blake2s256 as bytea check (length(value) = 32); -- UNIX path (absolute, relative, individual path component, etc.) create domain unix_path as bytea; -- a set of UNIX-like access permissions, as manipulated by, e.g., chmod create domain file_perms as int; -- Checksums about actual file content. Note that the content itself is not -- stored in the DB, but on external (key-value) storage. A single checksum is -- used as key there, but the other can be used to verify that we do not inject -- content collisions not knowingly. create table content ( sha1 sha1 not null, sha1_git sha1_git not null, sha256 sha256 not null, blake2s256 blake2s256, length bigint not null, ctime timestamptz not null default now(), -- creation time, i.e. time of (first) injection into the storage status content_status not null default 'visible', object_id bigserial ); -- Entities constitute a typed hierarchy of organization, hosting -- facilities, groups, people and software projects. -- -- Examples of entities: Software Heritage, Debian, GNU, GitHub, -- Apache, The Linux Foundation, the Debian Python Modules Team, the -- torvalds GitHub user, the torvalds/linux GitHub project. -- -- The data model is hierarchical (via the parent attribute) and might -- store sub-branches of existing entities. The key feature of an -- entity is might be *listed* (if it is available in listable_entity) -- to retrieve information about its content, i.e: sub-entities, -- projects, origins. -- The history of entities. Allows us to keep historical metadata -- about entities. The temporal invariant is the uuid. Root -- organization uuids are manually generated (and available in -- swh-data.sql). -- -- For generated entities (generated = true), we can provide -- generation_metadata to allow listers to retrieve the uuids of previous -- iterations of the entity. -- -- Inactive entities that have been active in the past (active = -- false) should register the timestamp at which we saw them -- deactivate, in a new entry of entity_history. create table entity_history ( id bigserial not null, uuid uuid, parent uuid, -- should reference entity_history(uuid) name text not null, type entity_type not null, description text, homepage text, active boolean not null, -- whether the entity was seen on the last listing generated boolean not null, -- whether this entity has been generated by a lister lister_metadata jsonb, -- lister-specific metadata, used for queries metadata jsonb, validity timestamptz[] -- timestamps at which we have seen this entity ); -- The entity table provides a view of the latest information on a -- given entity. It is updated via a trigger on entity_history. create table entity ( uuid uuid not null, parent uuid, name text not null, type entity_type not null, description text, homepage text, active boolean not null, -- whether the entity was seen on the last listing generated boolean not null, -- whether this entity has been generated by a lister lister_metadata jsonb, -- lister-specific metadata, used for queries metadata jsonb, last_seen timestamptz, -- last listing time or disappearance time for active=false last_id bigint -- last listing id ); -- Register the equivalence between two entities. Allows sideways -- navigation in the entity table create table entity_equivalence ( entity1 uuid, entity2 uuid ); -- Register a lister for a specific entity. create table listable_entity ( uuid uuid, enabled boolean not null default true, -- do we list this entity automatically? list_engine text, -- crawler to be used to list entity's content list_url text, -- root URL to start the listing list_params jsonb, -- org-specific listing parameter latest_list timestamptz -- last time the entity's content has been listed ); -- Log of all entity listings (i.e., entity crawling) that have been -- done in the past, or are still ongoing. create table list_history ( id bigserial not null, date timestamptz not null, status boolean, -- true if and only if the listing has been successful result jsonb, -- more detailed return value, depending on status stdout text, stderr text, duration interval, -- fetch duration of NULL if still ongoing entity uuid ); -- An origin is a place, identified by an URL, where software can be found. We -- support different kinds of origins, e.g., git and other VCS repositories, -- web pages that list tarballs URLs (e.g., http://www.kernel.org), indirect -- tarball URLs (e.g., http://www.example.org/latest.tar.gz), etc. The key -- feature of an origin is that it can be *fetched* (wget, git clone, svn -- checkout, etc.) to retrieve all the contained software. create table origin ( id bigserial not null, type text, -- TODO use an enum here (?) url text not null, lister uuid, project uuid ); -- Content we have seen but skipped for some reason. This table is -- separate from the content table as we might not have the sha1 -- checksum of that data (for instance when we inject git -- repositories, objects that are too big will be skipped here, and we -- will only know their sha1_git). 'reason' contains the reason the -- content was skipped. origin is a nullable column allowing to find -- out which origin contains that skipped content. create table skipped_content ( sha1 sha1, sha1_git sha1_git, sha256 sha256, blake2s256 blake2s256, length bigint not null, ctime timestamptz not null default now(), status content_status not null default 'absent', reason text not null, origin bigint, object_id bigserial ); -- Log of all origin fetches (i.e., origin crawling) that have been done in the -- past, or are still ongoing. Similar to list_history, but for origins. create table fetch_history ( id bigserial, origin bigint, date timestamptz not null, status boolean, -- true if and only if the fetch has been successful result jsonb, -- more detailed returned values, times, etc... stdout text, stderr text, -- null when status is true, filled otherwise duration interval -- fetch duration of NULL if still ongoing ); -- A file-system directory. A directory is a list of directory entries (see -- tables: directory_entry_{dir,file}). -- -- To list the contents of a directory: -- 1. list the contained directory_entry_dir using array dir_entries -- 2. list the contained directory_entry_file using array file_entries -- 3. list the contained directory_entry_rev using array rev_entries -- 4. UNION -- -- Synonyms/mappings: -- * git: tree create table directory ( id sha1_git, dir_entries bigint[], -- sub-directories, reference directory_entry_dir file_entries bigint[], -- contained files, reference directory_entry_file rev_entries bigint[], -- mounted revisions, reference directory_entry_rev object_id bigserial -- short object identifier ); -- A directory entry pointing to a sub-directory. create table directory_entry_dir ( id bigserial, target sha1_git, -- id of target directory name unix_path, -- path name, relative to containing dir perms file_perms -- unix-like permissions ); -- A directory entry pointing to a file. create table directory_entry_file ( id bigserial, target sha1_git, -- id of target file name unix_path, -- path name, relative to containing dir perms file_perms -- unix-like permissions ); -- A directory entry pointing to a revision. create table directory_entry_rev ( id bigserial, target sha1_git, -- id of target revision name unix_path, -- path name, relative to containing dir perms file_perms -- unix-like permissions ); create table person ( id bigserial, name bytea, -- advisory: not null if we managed to parse a name email bytea, -- advisory: not null if we managed to parse an email fullname bytea not null -- freeform specification; what is actually used in the checksums -- will usually be of the form 'name ' ); -- A snapshot of a software project at a specific point in time. -- -- Synonyms/mappings: -- * git / subversion / etc: commit -- * tarball: a specific tarball -- -- Revisions are organized as DAGs. Each revision points to 0, 1, or more (in -- case of merges) parent revisions. Each revision points to a directory, i.e., -- a file-system tree containing files and directories. create table revision ( id sha1_git, date timestamptz, date_offset smallint, committer_date timestamptz, committer_date_offset smallint, type revision_type not null, directory sha1_git, -- file-system tree message bytea, author bigint, committer bigint, synthetic boolean not null default false, -- true if synthetic (cf. swh-loader-tar) metadata jsonb, -- extra metadata (tarball checksums, extra commit information, etc...) object_id bigserial, date_neg_utc_offset boolean, committer_date_neg_utc_offset boolean ); -- either this table or the sha1_git[] column on the revision table create table revision_history ( id sha1_git, parent_id sha1_git, parent_rank int not null default 0 -- parent position in merge commits, 0-based ); -- The timestamps at which Software Heritage has made a visit of the given origin. create table origin_visit ( origin bigint not null, visit bigint not null, date timestamptz not null, status origin_visit_status not null, metadata jsonb, snapshot_id bigint ); comment on column origin_visit.origin is 'Visited origin'; comment on column origin_visit.visit is 'Visit number the visit occurred for that origin'; comment on column origin_visit.date is 'Visit date for that origin'; comment on column origin_visit.status is 'Visit status for that origin'; comment on column origin_visit.metadata is 'Metadata associated with the visit'; comment on column origin_visit.snapshot_id is 'id of the snapshot associated with the visit'; -- The content of software origins is indexed starting from top-level pointers -- called "branches". Every time we fetch some origin we store in this table -- where the branches pointed to at fetch time. -- -- Synonyms/mappings: -- * git: ref (in the "git update-ref" sense) create table occurrence_history ( origin bigint not null, branch bytea not null, -- e.g., b"master" (for VCS), or b"sid" (for Debian) target sha1_git not null, -- ref target, e.g., commit id target_type object_type not null, -- ref target type visits bigint[] not null, -- the visits where that occurrence was valid. References -- origin_visit(visit), where o_h.origin = origin_visit.origin. object_id bigserial not null, -- short object identifier snapshot_branch_id bigint ); -- Materialized view of occurrence_history, storing the *current* value of each -- branch, as last seen by SWH. create table occurrence ( origin bigint, branch bytea not null, target sha1_git not null, target_type object_type not null ); create table snapshot ( object_id bigserial not null, id sha1_git ); create table snapshot_branch ( object_id bigserial not null, name bytea not null, target bytea, target_type snapshot_target ); create table snapshot_branches ( snapshot_id bigint not null, branch_id bigint not null ); -- A "memorable" point in the development history of a project. -- -- Synonyms/mappings: -- * git: tag (of the annotated kind, otherwise they are just references) -- * tarball: the release version number create table release ( id sha1_git not null, target sha1_git, date timestamptz, date_offset smallint, name bytea, comment bytea, author bigint, synthetic boolean not null default false, -- true if synthetic (cf. swh-loader-tar) object_id bigserial, target_type object_type not null, date_neg_utc_offset boolean ); -- Tools create table tool ( id serial not null, name text not null, version text not null, configuration jsonb ); comment on table tool is 'Tool information'; comment on column tool.id is 'Tool identifier'; comment on column tool.version is 'Tool name'; comment on column tool.version is 'Tool version'; comment on column tool.configuration is 'Tool configuration: command line, flags, etc...'; create table metadata_provider ( id serial not null, provider_name text not null, provider_type text not null, provider_url text, metadata jsonb ); comment on table metadata_provider is 'Metadata provider information'; comment on column metadata_provider.id is 'Provider''s identifier'; comment on column metadata_provider.provider_name is 'Provider''s name'; comment on column metadata_provider.provider_url is 'Provider''s url'; comment on column metadata_provider.metadata is 'Other metadata about provider'; -- Discovery of metadata during a listing, loading, deposit or external_catalog of an origin -- also provides a translation to a defined json schema using a translation tool (tool_id) create table origin_metadata( id bigserial not null, -- PK object identifier origin_id bigint not null, -- references origin(id) discovery_date timestamptz not null, -- when it was extracted provider_id bigint not null, -- ex: 'hal', 'lister-github', 'loader-github' tool_id bigint not null, metadata jsonb not null ); comment on table origin_metadata is 'keeps all metadata found concerning an origin'; comment on column origin_metadata.id is 'the origin_metadata object''s id'; comment on column origin_metadata.origin_id is 'the origin id for which the metadata was found'; comment on column origin_metadata.discovery_date is 'the date of retrieval'; comment on column origin_metadata.provider_id is 'the metadata provider: github, openhub, deposit, etc.'; comment on column origin_metadata.tool_id is 'the tool used for extracting metadata: lister-github, etc.'; comment on column origin_metadata.metadata is 'metadata in json format but with original terms'; -- Keep a cache of object counts create table object_counts ( - object_type text, - value bigint, - last_update timestamptz + object_type text, -- table for which we're counting objects (PK) + value bigint, -- count of objects in the table + last_update timestamptz, -- last update for the object count in this table + single_update boolean -- whether we update this table standalone (true) or through bucketed counts (false) +); + +CREATE TABLE object_counts_bucketed ( + line serial NOT NULL, -- PK + object_type text NOT NULL, -- table for which we're counting objects + identifier text NOT NULL, -- identifier across which we're bucketing objects + bucket_start bytea, -- lower bound (inclusive) for the bucket + bucket_end bytea, -- upper bound (exclusive) for the bucket + value bigint, -- count of objects in the bucket + last_update timestamptz -- last update for the object count in this bucket ); diff --git a/sql/upgrades/118.sql b/sql/upgrades/118.sql new file mode 100644 index 000000000..658f81483 --- /dev/null +++ b/sql/upgrades/118.sql @@ -0,0 +1,107 @@ +-- SWH DB schema upgrade +-- from_version: 117 +-- to_version: 118 +-- description: implement bucketed object counts + +insert into dbversion(version, release, description) + values(118, now(), 'Work In Progress'); + +CREATE SEQUENCE object_counts_bucketed_line_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MAXVALUE + NO MINVALUE + CACHE 1; + +CREATE TABLE object_counts_bucketed ( + line integer DEFAULT nextval('object_counts_bucketed_line_seq'::regclass) NOT NULL, + object_type text NOT NULL, + identifier text NOT NULL, + bucket_start bytea, + bucket_end bytea, + "value" bigint, + last_update timestamp with time zone +); + +ALTER TABLE object_counts + ADD COLUMN single_update boolean; + +ALTER SEQUENCE object_counts_bucketed_line_seq + OWNED BY object_counts_bucketed.line; + +CREATE OR REPLACE FUNCTION swh_update_counter_bucketed() RETURNS void + LANGUAGE plpgsql + AS $$ +declare + query text; + line_to_update int; + new_value bigint; +begin + select + object_counts_bucketed.line, + format( + 'select count(%I) from %I where %s', + coalesce(identifier, '*'), + object_type, + coalesce( + concat_ws( + ' and ', + case when bucket_start is not null then + format('%I >= %L', identifier, bucket_start) -- lower bound condition, inclusive + end, + case when bucket_end is not null then + format('%I < %L', identifier, bucket_end) -- upper bound condition, exclusive + end + ), + 'true' + ) + ) + from object_counts_bucketed + order by coalesce(last_update, now() - '1 month'::interval) asc + limit 1 + into line_to_update, query; + + execute query into new_value; + + update object_counts_bucketed + set value = new_value, + last_update = now() + where object_counts_bucketed.line = line_to_update; + +END +$$; + +CREATE OR REPLACE FUNCTION swh_update_counters_from_buckets() RETURNS trigger + LANGUAGE plpgsql + AS $$ +begin +with to_update as ( + select object_type, sum(value) as value, max(last_update) as last_update + from object_counts_bucketed ob1 + where not exists ( + select 1 from object_counts_bucketed ob2 + where ob1.object_type = ob2.object_type + and value is null + ) + group by object_type +) update object_counts + set + value = to_update.value, + last_update = to_update.last_update + from to_update + where + object_counts.object_type = to_update.object_type + and object_counts.value != to_update.value; +return null; +end +$$; + +ALTER TABLE object_counts_bucketed + ADD CONSTRAINT object_counts_bucketed_pkey PRIMARY KEY (line); + +CREATE TRIGGER update_counts_from_bucketed + AFTER INSERT OR UPDATE ON object_counts_bucketed + FOR EACH ROW + WHEN (((new.line % 256) = 0)) + EXECUTE PROCEDURE swh_update_counters_from_buckets(); diff --git a/swh.storage.egg-info/PKG-INFO b/swh.storage.egg-info/PKG-INFO index 4cf8d0a39..3333a7719 100644 --- a/swh.storage.egg-info/PKG-INFO +++ b/swh.storage.egg-info/PKG-INFO @@ -1,12 +1,12 @@ Metadata-Version: 2.1 Name: swh.storage -Version: 0.0.98 +Version: 0.0.99 Summary: Software Heritage storage manager Home-page: https://forge.softwareheritage.org/diffusion/DSTO/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN Provides-Extra: listener Provides-Extra: schemata diff --git a/swh.storage.egg-info/SOURCES.txt b/swh.storage.egg-info/SOURCES.txt index 4fb299653..ef6901974 100644 --- a/swh.storage.egg-info/SOURCES.txt +++ b/swh.storage.egg-info/SOURCES.txt @@ -1,194 +1,201 @@ .gitignore AUTHORS LICENSE MANIFEST.in Makefile Makefile.local README.db_testing README.dev requirements-swh.txt requirements.txt setup.py version.txt +.vscode/settings.json bin/swh-storage-add-dir debian/changelog debian/compat debian/control debian/copyright debian/rules debian/source/format docs/.gitignore docs/Makefile docs/Makefile.local docs/archiver-blueprint.rst docs/conf.py docs/index.rst docs/sql-storage.rst docs/_static/.placeholder docs/_templates/.placeholder sql/.gitignore sql/Makefile sql/TODO sql/clusters.dot sql/swh-data.sql sql/swh-enums.sql sql/swh-func.sql sql/swh-indexes.sql sql/swh-init.sql sql/swh-schema.sql sql/swh-triggers.sql sql/bin/db-init sql/bin/db-upgrade sql/bin/dot_add_content sql/doc/json sql/doc/json/.gitignore sql/doc/json/Makefile sql/doc/json/entity.lister_metadata.schema.json sql/doc/json/entity.metadata.schema.json sql/doc/json/entity_history.lister_metadata.schema.json sql/doc/json/entity_history.metadata.schema.json sql/doc/json/fetch_history.result.schema.json sql/doc/json/list_history.result.schema.json sql/doc/json/listable_entity.list_params.schema.json sql/doc/json/origin_visit.metadata.json sql/doc/json/tool.tool_configuration.schema.json sql/json/.gitignore sql/json/Makefile sql/json/entity.lister_metadata.schema.json sql/json/entity.metadata.schema.json sql/json/entity_history.lister_metadata.schema.json sql/json/entity_history.metadata.schema.json sql/json/fetch_history.result.schema.json sql/json/list_history.result.schema.json sql/json/listable_entity.list_params.schema.json sql/json/origin_visit.metadata.json sql/json/tool.tool_configuration.schema.json sql/upgrades/015.sql sql/upgrades/016.sql sql/upgrades/017.sql sql/upgrades/018.sql sql/upgrades/019.sql sql/upgrades/020.sql sql/upgrades/021.sql sql/upgrades/022.sql sql/upgrades/023.sql sql/upgrades/024.sql sql/upgrades/025.sql sql/upgrades/026.sql sql/upgrades/027.sql sql/upgrades/028.sql sql/upgrades/029.sql sql/upgrades/030.sql sql/upgrades/032.sql sql/upgrades/033.sql sql/upgrades/034.sql sql/upgrades/035.sql sql/upgrades/036.sql sql/upgrades/037.sql sql/upgrades/038.sql sql/upgrades/039.sql sql/upgrades/040.sql sql/upgrades/041.sql sql/upgrades/042.sql sql/upgrades/043.sql sql/upgrades/044.sql sql/upgrades/045.sql sql/upgrades/046.sql sql/upgrades/047.sql sql/upgrades/048.sql sql/upgrades/049.sql sql/upgrades/050.sql sql/upgrades/051.sql sql/upgrades/052.sql sql/upgrades/053.sql sql/upgrades/054.sql sql/upgrades/055.sql sql/upgrades/056.sql sql/upgrades/057.sql sql/upgrades/058.sql sql/upgrades/059.sql sql/upgrades/060.sql sql/upgrades/061.sql sql/upgrades/062.sql sql/upgrades/063.sql sql/upgrades/064.sql sql/upgrades/065.sql sql/upgrades/066.sql sql/upgrades/067.sql sql/upgrades/068.sql sql/upgrades/069.sql sql/upgrades/070.sql sql/upgrades/071.sql sql/upgrades/072.sql sql/upgrades/073.sql sql/upgrades/074.sql sql/upgrades/075.sql sql/upgrades/076.sql sql/upgrades/077.sql sql/upgrades/078.sql sql/upgrades/079.sql sql/upgrades/080.sql sql/upgrades/081.sql sql/upgrades/082.sql sql/upgrades/083.sql sql/upgrades/084.sql sql/upgrades/085.sql sql/upgrades/086.sql sql/upgrades/087.sql sql/upgrades/088.sql sql/upgrades/089.sql sql/upgrades/090.sql sql/upgrades/091.sql sql/upgrades/092.sql sql/upgrades/093.sql sql/upgrades/094.sql sql/upgrades/095.sql sql/upgrades/096.sql sql/upgrades/097.sql sql/upgrades/098.sql sql/upgrades/099.sql sql/upgrades/100.sql sql/upgrades/101.sql sql/upgrades/102.sql sql/upgrades/103.sql sql/upgrades/104.sql sql/upgrades/105.sql sql/upgrades/106.sql sql/upgrades/107.sql sql/upgrades/108.sql sql/upgrades/109.sql sql/upgrades/110.sql sql/upgrades/111.sql sql/upgrades/112.sql sql/upgrades/113.sql sql/upgrades/114.sql sql/upgrades/115.sql sql/upgrades/116.sql sql/upgrades/117.sql +sql/upgrades/118.sql swh/__init__.py swh.storage.egg-info/PKG-INFO swh.storage.egg-info/SOURCES.txt swh.storage.egg-info/dependency_links.txt swh.storage.egg-info/requires.txt swh.storage.egg-info/top_level.txt swh/storage/__init__.py swh/storage/common.py swh/storage/converters.py swh/storage/db.py swh/storage/exc.py swh/storage/listener.py swh/storage/storage.py +swh/storage/algos/__init__.py +swh/storage/algos/diff.py +swh/storage/algos/dir_iterators.py swh/storage/api/__init__.py swh/storage/api/client.py swh/storage/api/server.py swh/storage/schemata/__init__.py swh/storage/schemata/distribution.py swh/storage/tests/__init__.py swh/storage/tests/server_testing.py swh/storage/tests/storage_testing.py swh/storage/tests/test_api_client.py swh/storage/tests/test_converters.py swh/storage/tests/test_db.py swh/storage/tests/test_storage.py +swh/storage/tests/algos/__init__.py +swh/storage/tests/algos/test_diff.py utils/dump_revisions.py utils/fix_revisions_from_dump.py \ No newline at end of file diff --git a/swh/storage/algos/__init__.py b/swh/storage/algos/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/swh/storage/algos/diff.py b/swh/storage/algos/diff.py new file mode 100644 index 000000000..75c53f11e --- /dev/null +++ b/swh/storage/algos/diff.py @@ -0,0 +1,402 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +# Utility module to efficiently compute the list of changed files +# between two directory trees. +# The implementation is inspired from the work of Alberto Cortés +# for the go-git project. For more details, you can refer to: +# - this blog post: https://blog.sourced.tech/post/difftree/ +# - the reference implementation in go: +# https://github.com/src-d/go-git/tree/master/utils/merkletrie + + +import collections + +from swh.model.identifiers import directory_identifier + +from .dir_iterators import ( + DirectoryIterator, DoubleDirectoryIterator, Remaining +) + +# get the hash identifier for an empty directory +_empty_dir_hash = directory_identifier({'entries': []}) + + +def _get_rev(storage, rev_id): + """ + Return revision data from swh storage. + """ + return list(storage.revision_get([rev_id]))[0] + + +class _RevisionChangesList(object): + """ + Helper class to track the changes between two + revision directories. + """ + + def __init__(self, storage, track_renaming): + """ + Args: + storage: instance of swh storage + track_renaming (bool): whether to track or not files renaming + """ + self.storage = storage + self.track_renaming = track_renaming + self.result = [] + # dicts used to track file renaming based on hash value + # we use a list instead of a single entry to handle the corner + # case when a repository contains multiple instance of + # the same file in different directories and a commit + # renames all of them + self.inserted_hash_idx = collections.defaultdict(list) + self.deleted_hash_idx = collections.defaultdict(list) + + def add_insert(self, it_to): + """ + Add a file insertion in the to directory. + + Args: + it_to (swh.storage.algos.dir_iterators.DirectoryIterator): + iterator on the to directory + """ + to_hash = it_to.current_hash() + # if the current file hash has been previously marked as deleted, + # the file has been renamed + if self.track_renaming and self.deleted_hash_idx[to_hash]: + # pop the delete change index in the same order it was inserted + change = self.result[self.deleted_hash_idx[to_hash].pop(0)] + # change the delete change as a rename one + change['type'] = 'rename' + change['to'] = it_to.current() + change['to_path'] = it_to.current_path() + else: + # add the insert change in the list + self.result.append({'type': 'insert', + 'from': None, + 'from_path': None, + 'to': it_to.current(), + 'to_path': it_to.current_path()}) + # if rename tracking is activated, add the change index in + # the inserted_hash_idx dict + if self.track_renaming: + self.inserted_hash_idx[to_hash].append(len(self.result) - 1) + + def add_delete(self, it_from): + """ + Add a file deletion in the from directory. + + Args: + it_from (swh.storage.algos.dir_iterators.DirectoryIterator): + iterator on the from directory + """ + from_hash = it_from.current_hash() + # if the current file has been previously marked as inserted, + # the file has been renamed + if self.track_renaming and self.inserted_hash_idx[from_hash]: + # pop the insert chnage index in the same order it was inserted + change = self.result[self.inserted_hash_idx[from_hash].pop(0)] + # change the insert change as a rename one + change['type'] = 'rename' + change['from'] = it_from.current() + change['from_path'] = it_from.current_path() + else: + # add the delete change in the list + self.result.append({'type': 'delete', + 'from': it_from.current(), + 'from_path': it_from.current_path(), + 'to': None, + 'to_path': None}) + # if rename tracking is activated, add the change index in + # the deleted_hash_idx dict + if self.track_renaming: + self.deleted_hash_idx[from_hash].append(len(self.result) - 1) + + def add_modify(self, it_from, it_to): + """ + Add a file modification in the to directory. + + Args: + it_from (swh.storage.algos.dir_iterators.DirectoryIterator): + iterator on the from directory + it_to (swh.storage.algos.dir_iterators.DirectoryIterator): + iterator on the to directory + """ + self.result.append({'type': 'modify', + 'from': it_from.current(), + 'from_path': it_from.current_path(), + 'to': it_to.current(), + 'to_path': it_to.current_path()}) + + def add_recursive(self, it, insert): + """ + Recursively add changes from a directory. + + Args: + it (swh.storage.algos.dir_iterators.DirectoryIterator): + iterator on a directory + insert (bool): the type of changes to add (insertion + or deletion) + """ + # current iterated element is a regular file, + # simply add adequate change in the list + if not it.current_is_dir(): + if insert: + self.add_insert(it) + else: + self.add_delete(it) + return + # current iterated element is a directory, + dir_id = it.current_hash() + # handle empty dir insertion/deletion as the swh model allow + # to have such object compared to git + if dir_id == _empty_dir_hash: + if insert: + self.add_insert(it) + else: + self.add_delete(it) + # iterate on files reachable from it and add + # adequate changes in the list + else: + sub_it = DirectoryIterator(self.storage, dir_id, + it.current_path() + b'/') + sub_it_current = sub_it.step() + while sub_it_current: + if not sub_it.current_is_dir(): + if insert: + self.add_insert(sub_it) + else: + self.add_delete(sub_it) + sub_it_current = sub_it.step() + + def add_recursive_insert(self, it_to): + """ + Recursively add files insertion from a to directory. + + Args: + it_to (swh.storage.algos.dir_iterators.DirectoryIterator): + iterator on a to directory + """ + self.add_recursive(it_to, True) + + def add_recursive_delete(self, it_from): + """ + Recursively add files deletion from a from directory. + + Args: + it_from (swh.storage.algos.dir_iterators.DirectoryIterator): + iterator on a from directory + """ + self.add_recursive(it_from, False) + + +def _diff_elts_same_name(changes, it): + """" + Compare two directory entries with the same name and add adequate + changes if any. + + Args: + changes (_RevisionChangesList): the list of changes between + two revisions + it (swh.storage.algos.dir_iterators.DoubleDirectoryIterator): + the iterator traversing two revision directories at the same time + """ + # compare the two current directory elements of the iterator + status = it.compare() + # elements have same hash and same permissions: + # no changes to add and call next on the two iterators + if status['same_hash'] and status['same_perms']: + it.next_both() + # elements are regular files and have been modified: + # insert the modification change in the list and + # call next on the two iterators + elif status['both_are_files']: + changes.add_modify(it.it_from, it.it_to) + it.next_both() + # one element is a regular file, the other a directory: + # recursively add delete/insert changes and call next + # on the two iterators + elif status['file_and_dir']: + changes.add_recursive_delete(it.it_from) + changes.add_recursive_insert(it.it_to) + it.next_both() + # both elements are directories: + elif status['both_are_dirs']: + # from directory is empty: + # recursively add insert changes in the to directory + # and call next on the two iterators + if status['from_is_empty_dir']: + changes.add_recursive_insert(it.it_to) + it.next_both() + # to directory is empty: + # recursively add delete changes in the from directory + # and call next on the two iterators + elif status['to_is_empty_dir']: + changes.add_recursive_delete(it.it_from) + it.next_both() + # both directories are not empty: + # call step on the two iterators to descend further in + # the directory trees. + elif not status['from_is_empty_dir'] and not status['to_is_empty_dir']: + it.step_both() + + +def _compare_paths(path1, path2): + """ + Compare paths in lexicographic depth-first order. + For instance, it returns: + - "a" < "b" + - "b/c/d" < "b" + - "c/foo.txt" < "c.txt" + """ + path1_parts = path1.split(b'/') + path2_parts = path2.split(b'/') + i = 0 + while True: + if len(path1_parts) == len(path2_parts) and i == len(path1_parts): + return 0 + elif len(path2_parts) == i: + return 1 + elif len(path1_parts) == i: + return -1 + else: + if path2_parts[i] > path1_parts[i]: + return -1 + elif path2_parts[i] < path1_parts[i]: + return 1 + i = i + 1 + + +def _diff_elts(changes, it): + """ + Compare two directory entries. + + Args: + changes (_RevisionChangesList): the list of changes between + two revisions + it (swh.storage.algos.dir_iterators.DoubleDirectoryIterator): + the iterator traversing two revision directories at the same time + """ + # compare current to and from path in depth-first lexicographic order + c = _compare_paths(it.it_from.current_path(), it.it_to.current_path()) + # current from path is lower than the current to path: + # the from path has been deleted + if c < 0: + changes.add_recursive_delete(it.it_from) + it.next_from() + # current from path is greather than the current to path: + # the to path has been inserted + elif c > 0: + changes.add_recursive_insert(it.it_to) + it.next_to() + # paths are the same and need more processing + else: + _diff_elts_same_name(changes, it) + + +def diff_directories(storage, from_dir, to_dir, track_renaming=False): + """ + Compute the differential between two directories, i.e. the list of + file changes (insertion / deletion / modification / renaming) + between them. + + Args: + storage (swh.storage.storage.Storage): instance of a swh + storage (either local or remote, for optimal performance + the use of a local storage is recommended) + from_dir (bytes): the swh identifier of the directory to compare from + to_dir (bytes): the swh identifier of the directory to compare to + track_renaming (bool): whether or not to track files renaming + + Returns: + list: A list of dict representing the changes between the two + revisions. Each dict contains the following entries: + + - *type*: a string describing the type of change + ('insert' / 'delete' / 'modify' / 'rename') + + - *from*: a dict containing the directory entry metadata in the + from revision (None in case of an insertion) + + - *from_path*: bytes string corresponding to the absolute path + of the from revision entry (None in case of an insertion) + + - *to*: a dict containing the directory entry metadata in the + to revision (None in case of a deletion) + + - *to_path*: bytes string corresponding to the absolute path + of the to revision entry (None in case of a deletion) + + The returned list is sorted in lexicographic depth-first order + according to the value of the *to_path* field. + + """ + changes = _RevisionChangesList(storage, track_renaming) + it = DoubleDirectoryIterator(storage, from_dir, to_dir) + while True: + r = it.remaining() + if r == Remaining.NoMoreFiles: + break + elif r == Remaining.OnlyFromFilesRemain: + changes.add_recursive_delete(it.it_from) + it.next_from() + elif r == Remaining.OnlyToFilesRemain: + changes.add_recursive_insert(it.it_to) + it.next_to() + else: + _diff_elts(changes, it) + return changes.result + + +def diff_revisions(storage, from_rev, to_rev, track_renaming=False): + """ + Compute the differential between two revisions, + i.e. the list of file changes between the two associated directories. + + Args: + storage (swh.storage.storage.Storage): instance of a swh + storage (either local or remote, for optimal performance + the use of a local storage is recommended) + from_rev (bytes): the identifier of the revision to compare from + to_rev (bytes): the identifier of the revision to compare to + track_renaming (bool): whether or not to track files renaming + + Returns: + list: A list of dict describing the introduced file changes + (see :func:`swh.storage.algos.diff.diff_directories`). + + """ + from_dir = None + if from_rev: + from_dir = _get_rev(storage, from_rev)['directory'] + to_dir = _get_rev(storage, to_rev)['directory'] + return diff_directories(storage, from_dir, to_dir, track_renaming) + + +def diff_revision(storage, revision, track_renaming=False): + """ + Computes the differential between a revision and its first parent. + If the revision has no parents, the directory to compare from + is considered as empty. + In other words, it computes the file changes introduced in a + specific revision. + + Args: + storage (swh.storage.storage.Storage): instance of a swh + storage (either local or remote, for optimal performance + the use of a local storage is recommended) + revision (bytes): the identifier of the revision from which to + compute the introduced changes. + track_renaming (bool): whether or not to track files renaming + + Returns: + list: A list of dict describing the introduced file changes + (see :func:`swh.storage.algos.diff.diff_directories`). + """ + rev_data = _get_rev(storage, revision) + parent = None + if rev_data['parents']: + parent = rev_data['parents'][0] + return diff_revisions(storage, parent, revision, track_renaming) diff --git a/swh/storage/algos/dir_iterators.py b/swh/storage/algos/dir_iterators.py new file mode 100644 index 000000000..798644d5f --- /dev/null +++ b/swh/storage/algos/dir_iterators.py @@ -0,0 +1,347 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +# Utility module to iterate on directory trees. +# The implementation is inspired from the work of Alberto Cortés +# for the go-git project. For more details, you can refer to: +# - this blog post: https://blog.sourced.tech/post/difftree/ +# - the reference implementation in go: +# https://github.com/src-d/go-git/tree/master/utils/merkletrie + + +from enum import Enum + +from swh.model.identifiers import directory_identifier + +# get the hash identifier for an empty directory +_empty_dir_hash = directory_identifier({'entries': []}) + + +def _get_dir(storage, dir_id): + """ + Return directory data from swh storage. + """ + return storage.directory_ls(dir_id) if dir_id else [] + + +class DirectoryIterator(object): + """ + Helper class used to iterate on a directory tree in a depth-first search + way with some additionnal features: + - sibling nodes are iterated in lexicographic order by name + - it is possible to skip the visit of sub-directories nodes + for efficency reasons when comparing two trees (no need to + go deeper if two directories have the same hash) + """ + + def __init__(self, storage, dir_id, base_path=b''): + """ + Args: + storage (swh.storage.storage.Storage): instance of swh storage + (either local or remote) + dir_id (bytes): identifier of a root directory + base_path (bytes): optional base path used when traversing + a sub-directory + """ + self.storage = storage + self.root_dir_id = dir_id + self.base_path = base_path + self.restart() + + def restart(self): + """ + Restart the iteration at the beginning. + """ + # stack of frames representing currently visited directories: + # the root directory is at the bottom while the current one + # is at the top + self.frames = [] + self._push_dir_frame(self.root_dir_id) + self.has_started = False + + def _push_dir_frame(self, dir_id): + """ + Visit a sub-directory by pushing a new frame to the stack. + Each frame is itself a stack of directory entries. + + Args: + dir_id (bytes): identifier of a root directory + """ + if dir_id: + if dir_id == _empty_dir_hash: + self.frames.append([]) + else: + # get directory entries + dir_data = _get_dir(self.storage, dir_id) + # sort them in lexicographical order + dir_data = sorted(dir_data, key=lambda e: e['name']) + # reverse the ordering in order to unstack the "smallest" + # entry each time the iterator advances + dir_data.reverse() + # push the directory frame to the main stack + self.frames.append(dir_data) + + def top(self): + """ + Returns: + list: The top frame of the main directories stack + """ + if not self.frames: + return None + return self.frames[-1] + + def current(self): + """ + Returns: + dict: The current visited directory entry, i.e. the + top element from the top frame + """ + top_frame = self.top() + if not top_frame: + return None + return top_frame[-1] + + def current_hash(self): + """ + Returns: + bytes: The hash value of the currently visited directory + entry + """ + return self.current()['target'] + + def current_perms(self): + """ + Returns: + int: The permissions value of the currently visited directory + entry + """ + return self.current()['perms'] + + def current_path(self): + """ + Returns: + str: The absolute path from the root directory of + the currently visited directory entry + """ + top_frame = self.top() + if not top_frame: + return None + path = [] + for frame in self.frames: + path.append(frame[-1]['name']) + return self.base_path + b'/'.join(path) + + def current_is_dir(self): + """ + Returns: + bool: If the currently visited directory entry is + a directory + """ + return self.current()['type'] == 'dir' + + def _advance(self, descend): + """ + Advance in the tree iteration. + + Args: + descend (bool): whether or not to push a new frame + if the currently visited element is a sub-directory + + Returns: + dict: The description of the newly visited directory entry + """ + current = self.current() + if not self.has_started or not current: + self.has_started = True + return current + + if descend and self.current_is_dir(): + self._push_dir_frame(current['target']) + else: + self.drop() + + return self.current() + + def next(self): + """ + Advance the tree iteration by dropping the current visited + directory entry from the top frame. If the top frame ends up empty, + the operation is recursively applied to remove all empty frames + as the tree is climbed up towards its root. + + Returns: + dict: The description of the newly visited directory entry + """ + return self._advance(False) + + def step(self): + """ + Advance the tree iteration like the next operation with the + difference that if the current visited element is a sub-directory + a new frame representing its content is pushed to the main stack. + + Returns: + dict: The description of the newly visited directory entry + """ + return self._advance(True) + + def drop(self): + """ + Drop the current visited element from the top frame. + If the frame ends up empty, the operation is recursively + applied. + """ + frame = self.top() + if not frame: + return + frame.pop() + if not frame: + self.frames.pop() + self.drop() + + +class Remaining(Enum): + """ + Enum to represent the current state when iterating + on both directory trees at the same time. + """ + NoMoreFiles = 0 + OnlyToFilesRemain = 1 + OnlyFromFilesRemain = 2 + BothHaveFiles = 3 + + +class DoubleDirectoryIterator(object): + """ + Helper class to traverse two directory trees at the same + time and compare their contents to detect changes between them. + """ + + def __init__(self, storage, dir_from, dir_to): + """ + Args: + storage: instance of swh storage + dir_from (bytes): hash identifier of the from directory + dir_to (bytes): hash identifier of the to directory + """ + self.storage = storage + self.dir_from = dir_from + self.dir_to = dir_to + self.restart() + + def restart(self): + """ + Restart the double iteration at the beginning. + """ + # initialize custom dfs iterators for the two directories + self.it_from = DirectoryIterator(self.storage, self.dir_from) + self.it_to = DirectoryIterator(self.storage, self.dir_to) + # grab the first element of each iterator + self.it_from.next() + self.it_to.next() + + def next_from(self): + """ + Apply the next operation on the from iterator. + """ + self.it_from.next() + + def next_to(self): + """ + Apply the next operation on the to iterator. + """ + self.it_to.next() + + def next_both(self): + """ + Apply the next operation on both iterators. + """ + self.next_from() + self.next_to() + + def step_from(self): + """ + Apply the step operation on the from iterator. + """ + self.it_from.step() + + def step_to(self): + """ + Apply the step operation on the from iterator. + """ + self.it_to.step() + + def step_both(self): + """ + Apply the step operation on the both iterators. + """ + self.step_from() + self.step_to() + + def remaining(self): + """ + Returns: + Remaining: the current state of the double iteration + """ + from_current = self.it_from.current() + to_current = self.it_to.current() + # no more files to iterate in both iterators + if not from_current and not to_current: + return Remaining.NoMoreFiles + # still some files to iterate in the to iterator + elif not from_current and to_current: + return Remaining.OnlyToFilesRemain + # still some files to iterate in the from iterator + elif from_current and not to_current: + return Remaining.OnlyFromFilesRemain + # still files to iterate in the both iterators + else: + return Remaining.BothHaveFiles + + def compare(self): + """ + Compare the current iterated directory entries in both iterators + and return the comparison status. + + Returns: + dict: The status of the comparison with the following bool values: + * *same_hash*: indicates if the two entries have the same hash + * *same_perms*: indicates if the two entries have the same + permissions + * *both_are_dirs*: indicates if the two entries are directories + * *both_are_files*: indicates if the two entries are regular + files + * *file_and_dir*: indicates if one of the entry is a directory + and the other a regular file + * *from_is_empty_dir*: indicates if the from entry is the + empty directory + * *from_is_empty_dir*: indicates if the to entry is the + empty directory + """ + from_current_hash = self.it_from.current_hash() + to_current_hash = self.it_to.current_hash() + from_current_perms = self.it_from.current_perms() + to_current_perms = self.it_to.current_perms() + from_is_dir = self.it_from.current_is_dir() + to_is_dir = self.it_to.current_is_dir() + status = {} + # compare hash + status['same_hash'] = from_current_hash == to_current_hash + # compare permissions + status['same_perms'] = from_current_perms == to_current_perms + # check if both elements are directories + status['both_are_dirs'] = from_is_dir and to_is_dir + # check if both elements are regular files + status['both_are_files'] = not from_is_dir and not to_is_dir + # check if one element is a directory, the other a regular file + status['file_and_dir'] = (not status['both_are_dirs'] and + not status['both_are_files']) + # check if the from element is the empty directory + status['from_is_empty_dir'] = (from_is_dir and + from_current_hash == _empty_dir_hash) + # check if the to element is the empty directory + status['to_is_empty_dir'] = (to_is_dir and + to_current_hash == _empty_dir_hash) + return status diff --git a/swh/storage/api/client.py b/swh/storage/api/client.py index 6f0048c10..e2157a616 100644 --- a/swh/storage/api/client.py +++ b/swh/storage/api/client.py @@ -1,220 +1,237 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.core.api import SWHRemoteAPI from ..exc import StorageAPIError class RemoteStorage(SWHRemoteAPI): """Proxy to a remote storage API""" def __init__(self, url): super().__init__(api_exception=StorageAPIError, url=url) def check_config(self, *, check_write): return self.post('check_config', {'check_write': check_write}) def content_add(self, content): return self.post('content/add', {'content': content}) def content_update(self, content, keys=[]): return self.post('content/update', {'content': content, 'keys': keys}) def content_missing(self, content, key_hash='sha1'): return self.post('content/missing', {'content': content, 'key_hash': key_hash}) def content_missing_per_sha1(self, contents): return self.post('content/missing/sha1', {'contents': contents}) def content_get(self, content): return self.post('content/data', {'content': content}) def content_get_metadata(self, content): return self.post('content/metadata', {'content': content}) def content_find(self, content): return self.post('content/present', {'content': content}) def directory_add(self, directories): return self.post('directory/add', {'directories': directories}) def directory_missing(self, directories): return self.post('directory/missing', {'directories': directories}) def directory_get(self, directories): return self.post('directory', dict(directories=directories)) def directory_ls(self, directory, recursive=False): return self.get('directory/ls', {'directory': directory, 'recursive': recursive}) def revision_get(self, revisions): return self.post('revision', {'revisions': revisions}) def revision_get_by(self, origin_id, branch_name, timestamp, limit=None): return self.post('revision/by', dict(origin_id=origin_id, branch_name=branch_name, timestamp=timestamp, limit=limit)) def revision_log(self, revisions, limit=None): return self.post('revision/log', {'revisions': revisions, 'limit': limit}) def revision_log_by(self, origin_id, branch_name, timestamp, limit=None): return self.post('revision/logby', {'origin_id': origin_id, 'branch_name': branch_name, 'timestamp': timestamp, 'limit': limit}) def revision_shortlog(self, revisions, limit=None): return self.post('revision/shortlog', {'revisions': revisions, 'limit': limit}) def revision_add(self, revisions): return self.post('revision/add', {'revisions': revisions}) def revision_missing(self, revisions): return self.post('revision/missing', {'revisions': revisions}) def release_add(self, releases): return self.post('release/add', {'releases': releases}) def release_get(self, releases): return self.post('release', {'releases': releases}) def release_get_by(self, origin_id, limit=None): return self.post('release/by', dict(origin_id=origin_id, limit=limit)) def release_missing(self, releases): return self.post('release/missing', {'releases': releases}) def object_find_by_sha1_git(self, ids): return self.post('object/find_by_sha1_git', {'ids': ids}) def occurrence_get(self, origin_id): return self.post('occurrence', {'origin_id': origin_id}) def occurrence_add(self, occurrences): return self.post('occurrence/add', {'occurrences': occurrences}) def snapshot_add(self, origin, visit, snapshot, back_compat=False): return self.post('snapshot/add', { 'origin': origin, 'visit': visit, 'snapshot': snapshot, 'back_compat': back_compat}) def snapshot_get(self, snapshot_id): return self.post('snapshot', {'snapshot_id': snapshot_id}) def snapshot_get_by_origin_visit(self, origin, visit): return self.post('snapshot/by_origin_visit', {'origin': origin, 'visit': visit}) def snapshot_get_latest(self, origin, allowed_statuses=None): return self.post('snapshot/latest', { 'origin': origin, 'allowed_statuses': allowed_statuses }) def origin_get(self, origin): return self.post('origin/get', {'origin': origin}) def origin_search(self, url_pattern, offset=0, limit=50, regexp=False): return self.post('origin/search', {'url_pattern': url_pattern, 'offset': offset, 'limit': limit, 'regexp': regexp}) def origin_add(self, origins): return self.post('origin/add_multi', {'origins': origins}) def origin_add_one(self, origin): return self.post('origin/add', {'origin': origin}) def origin_visit_add(self, origin, ts): return self.post('origin/visit/add', {'origin': origin, 'ts': ts}) def origin_visit_update(self, origin, visit_id, status, metadata=None): return self.post('origin/visit/update', {'origin': origin, 'visit_id': visit_id, 'status': status, 'metadata': metadata}) def origin_visit_get(self, origin, last_visit=None, limit=None): return self.post('origin/visit/get', { 'origin': origin, 'last_visit': last_visit, 'limit': limit}) def origin_visit_get_by(self, origin, visit): return self.post('origin/visit/getby', {'origin': origin, 'visit': visit}) def person_get(self, person): return self.post('person', {'person': person}) def fetch_history_start(self, origin_id): return self.post('fetch_history/start', {'origin_id': origin_id}) def fetch_history_end(self, fetch_history_id, data): return self.post('fetch_history/end', {'fetch_history_id': fetch_history_id, 'data': data}) def fetch_history_get(self, fetch_history_id): return self.get('fetch_history', {'id': fetch_history_id}) def entity_add(self, entities): return self.post('entity/add', {'entities': entities}) def entity_get(self, uuid): return self.post('entity/get', {'uuid': uuid}) def entity_get_one(self, uuid): return self.get('entity', {'uuid': uuid}) def entity_get_from_lister_metadata(self, entities): return self.post('entity/from_lister_metadata', {'entities': entities}) def stat_counters(self): return self.get('stat/counters') def directory_entry_get_by_path(self, directory, paths): return self.post('directory/path', dict(directory=directory, paths=paths)) def tool_add(self, tools): return self.post('tool/add', {'tools': tools}) def tool_get(self, tool): return self.post('tool/data', {'tool': tool}) def origin_metadata_add(self, origin_id, ts, provider, tool, metadata): return self.post('origin/metadata/add', {'origin_id': origin_id, 'ts': ts, 'provider': provider, 'tool': tool, 'metadata': metadata}) def origin_metadata_get_by(self, origin_id, provider_type=None): return self.post('origin/metadata/get', { 'origin_id': origin_id, 'provider_type': provider_type }) def metadata_provider_add(self, provider_name, provider_type, provider_url, metadata): return self.post('provider/add', {'provider_name': provider_name, 'provider_type': provider_type, 'provider_url': provider_url, 'metadata': metadata}) def metadata_provider_get(self, provider_id): return self.post('provider/get', {'provider_id': provider_id}) def metadata_provider_get_by(self, provider): return self.post('provider/getby', {'provider': provider}) + + def diff_directories(self, from_dir, to_dir, track_renaming=False): + return self.post('algos/diff_directories', + {'from_dir': from_dir, + 'to_dir': to_dir, + 'track_renaming': track_renaming}) + + def diff_revisions(self, from_rev, to_rev, track_renaming=False): + return self.post('algos/diff_revisions', + {'from_rev': from_rev, + 'to_rev': to_rev, + 'track_renaming': track_renaming}) + + def diff_revision(self, revision, track_renaming=False): + return self.post('algos/diff_revision', + {'revision': revision, + 'track_renaming': track_renaming}) diff --git a/swh/storage/api/server.py b/swh/storage/api/server.py index 0db44adba..23c48cc43 100644 --- a/swh/storage/api/server.py +++ b/swh/storage/api/server.py @@ -1,374 +1,389 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json import logging import click from flask import g, request from swh.core import config from swh.storage import get_storage from swh.core.api import (SWHServerAPIApp, decode_request, error_handler, encode_data_server as encode_data) DEFAULT_CONFIG_PATH = 'storage/storage' DEFAULT_CONFIG = { 'storage': ('dict', { 'cls': 'local', 'args': { 'db': 'dbname=softwareheritage-dev', 'objstorage': { 'cls': 'pathslicing', 'args': { 'root': '/srv/softwareheritage/objects', 'slicing': '0:2/2:4/4:6', }, }, }, }) } app = SWHServerAPIApp(__name__) @app.errorhandler(Exception) def my_error_handler(exception): return error_handler(exception, encode_data) @app.before_request def before_request(): g.storage = get_storage(**app.config['storage']) @app.route('/') def index(): return 'SWH Storage API server' @app.route('/check_config', methods=['POST']) def check_config(): return encode_data(g.storage.check_config(**decode_request(request))) @app.route('/content/missing', methods=['POST']) def content_missing(): return encode_data(g.storage.content_missing(**decode_request(request))) @app.route('/content/missing/sha1', methods=['POST']) def content_missing_per_sha1(): return encode_data(g.storage.content_missing_per_sha1( **decode_request(request))) @app.route('/content/present', methods=['POST']) def content_find(): return encode_data(g.storage.content_find(**decode_request(request))) @app.route('/content/add', methods=['POST']) def content_add(): return encode_data(g.storage.content_add(**decode_request(request))) @app.route('/content/update', methods=['POST']) def content_update(): return encode_data(g.storage.content_update(**decode_request(request))) @app.route('/content/data', methods=['POST']) def content_get(): return encode_data(g.storage.content_get(**decode_request(request))) @app.route('/content/metadata', methods=['POST']) def content_get_metadata(): return encode_data(g.storage.content_get_metadata( **decode_request(request))) @app.route('/directory', methods=['POST']) def directory_get(): return encode_data(g.storage.directory_get(**decode_request(request))) @app.route('/directory/missing', methods=['POST']) def directory_missing(): return encode_data(g.storage.directory_missing(**decode_request(request))) @app.route('/directory/add', methods=['POST']) def directory_add(): return encode_data(g.storage.directory_add(**decode_request(request))) @app.route('/directory/path', methods=['POST']) def directory_entry_get_by_path(): return encode_data(g.storage.directory_entry_get_by_path( **decode_request(request))) @app.route('/directory/ls', methods=['GET']) def directory_ls(): dir = request.args['directory'].encode('utf-8', 'surrogateescape') rec = json.loads(request.args.get('recursive', 'False').lower()) return encode_data(g.storage.directory_ls(dir, recursive=rec)) @app.route('/revision/add', methods=['POST']) def revision_add(): return encode_data(g.storage.revision_add(**decode_request(request))) @app.route('/revision', methods=['POST']) def revision_get(): return encode_data(g.storage.revision_get(**decode_request(request))) @app.route('/revision/by', methods=['POST']) def revision_get_by(): return encode_data(g.storage.revision_get_by(**decode_request(request))) @app.route('/revision/log', methods=['POST']) def revision_log(): return encode_data(g.storage.revision_log(**decode_request(request))) @app.route('/revision/logby', methods=['POST']) def revision_log_by(): return encode_data(g.storage.revision_log_by(**decode_request(request))) @app.route('/revision/shortlog', methods=['POST']) def revision_shortlog(): return encode_data(g.storage.revision_shortlog(**decode_request(request))) @app.route('/revision/missing', methods=['POST']) def revision_missing(): return encode_data(g.storage.revision_missing(**decode_request(request))) @app.route('/release/add', methods=['POST']) def release_add(): return encode_data(g.storage.release_add(**decode_request(request))) @app.route('/release', methods=['POST']) def release_get(): return encode_data(g.storage.release_get(**decode_request(request))) @app.route('/release/by', methods=['POST']) def release_get_by(): return encode_data(g.storage.release_get_by(**decode_request(request))) @app.route('/release/missing', methods=['POST']) def release_missing(): return encode_data(g.storage.release_missing(**decode_request(request))) @app.route('/object/find_by_sha1_git', methods=['POST']) def object_find_by_sha1_git(): return encode_data(g.storage.object_find_by_sha1_git( **decode_request(request))) @app.route('/occurrence', methods=['POST']) def occurrence_get(): return encode_data(g.storage.occurrence_get(**decode_request(request))) @app.route('/occurrence/add', methods=['POST']) def occurrence_add(): return encode_data(g.storage.occurrence_add(**decode_request(request))) @app.route('/snapshot/add', methods=['POST']) def snapshot_add(): return encode_data(g.storage.snapshot_add(**decode_request(request))) @app.route('/snapshot', methods=['POST']) def snapshot_get(): return encode_data(g.storage.snapshot_get(**decode_request(request))) @app.route('/snapshot/by_origin_visit', methods=['POST']) def snapshot_get_by_origin_visit(): return encode_data(g.storage.snapshot_get_by_origin_visit( **decode_request(request))) @app.route('/snapshot/latest', methods=['POST']) def snapshot_get_latest(): return encode_data(g.storage.snapshot_get_latest( **decode_request(request))) @app.route('/origin/get', methods=['POST']) def origin_get(): return encode_data(g.storage.origin_get(**decode_request(request))) @app.route('/origin/search', methods=['POST']) def origin_search(): return encode_data(g.storage.origin_search(**decode_request(request))) @app.route('/origin/add_multi', methods=['POST']) def origin_add(): return encode_data(g.storage.origin_add(**decode_request(request))) @app.route('/origin/add', methods=['POST']) def origin_add_one(): return encode_data(g.storage.origin_add_one(**decode_request(request))) @app.route('/origin/visit/get', methods=['POST']) def origin_visit_get(): return encode_data(g.storage.origin_visit_get(**decode_request(request))) @app.route('/origin/visit/getby', methods=['POST']) def origin_visit_get_by(): return encode_data( g.storage.origin_visit_get_by(**decode_request(request))) @app.route('/origin/visit/add', methods=['POST']) def origin_visit_add(): return encode_data(g.storage.origin_visit_add(**decode_request(request))) @app.route('/origin/visit/update', methods=['POST']) def origin_visit_update(): return encode_data(g.storage.origin_visit_update( **decode_request(request))) @app.route('/person', methods=['POST']) def person_get(): return encode_data(g.storage.person_get(**decode_request(request))) @app.route('/fetch_history', methods=['GET']) def fetch_history_get(): return encode_data(g.storage.fetch_history_get(request.args['id'])) @app.route('/fetch_history/start', methods=['POST']) def fetch_history_start(): return encode_data( g.storage.fetch_history_start(**decode_request(request))) @app.route('/fetch_history/end', methods=['POST']) def fetch_history_end(): return encode_data( g.storage.fetch_history_end(**decode_request(request))) @app.route('/entity/add', methods=['POST']) def entity_add(): return encode_data( g.storage.entity_add(**decode_request(request))) @app.route('/entity/get', methods=['POST']) def entity_get(): return encode_data( g.storage.entity_get(**decode_request(request))) @app.route('/entity', methods=['GET']) def entity_get_one(): return encode_data(g.storage.entity_get_one(request.args['uuid'])) @app.route('/entity/from_lister_metadata', methods=['POST']) def entity_from_lister_metadata(): return encode_data( g.storage.entity_get_from_lister_metadata(**decode_request(request))) @app.route('/tool/data', methods=['POST']) def tool_get(): return encode_data(g.storage.tool_get( **decode_request(request))) @app.route('/tool/add', methods=['POST']) def tool_add(): return encode_data(g.storage.tool_add( **decode_request(request))) @app.route('/origin/metadata/add', methods=['POST']) def origin_metadata_add(): return encode_data(g.storage.origin_metadata_add(**decode_request( request))) @app.route('/origin/metadata/get', methods=['POST']) def origin_metadata_get_by(): return encode_data(g.storage.origin_metadata_get_by(**decode_request( request))) @app.route('/provider/add', methods=['POST']) def metadata_provider_add(): return encode_data(g.storage.metadata_provider_add(**decode_request( request))) @app.route('/provider/get', methods=['POST']) def metadata_provider_get(): return encode_data(g.storage.metadata_provider_get(**decode_request( request))) @app.route('/provider/getby', methods=['POST']) def metadata_provider_get_by(): return encode_data(g.storage.metadata_provider_get_by(**decode_request( request))) @app.route('/stat/counters', methods=['GET']) def stat_counters(): return encode_data(g.storage.stat_counters()) +@app.route('/algos/diff_directories', methods=['POST']) +def diff_directories(): + return encode_data(g.storage.diff_directories(**decode_request(request))) + + +@app.route('/algos/diff_revisions', methods=['POST']) +def diff_revisions(): + return encode_data(g.storage.diff_revisions(**decode_request(request))) + + +@app.route('/algos/diff_revision', methods=['POST']) +def diff_revision(): + return encode_data(g.storage.diff_revision(**decode_request(request))) + + def run_from_webserver(environ, start_response, config_path=DEFAULT_CONFIG_PATH): """Run the WSGI app from the webserver, loading the configuration.""" cfg = config.load_named_config(config_path, DEFAULT_CONFIG) app.config.update(cfg) handler = logging.StreamHandler() app.logger.addHandler(handler) return app(environ, start_response) @click.command() @click.argument('config-path', required=1) @click.option('--host', default='0.0.0.0', help="Host to run the server") @click.option('--port', default=5002, type=click.INT, help="Binding port of the server") @click.option('--debug/--nodebug', default=True, help="Indicates if the server should run in debug mode") def launch(config_path, host, port, debug): app.config.update(config.read(config_path, DEFAULT_CONFIG)) app.run(host, port=int(port), debug=bool(debug)) if __name__ == '__main__': launch() diff --git a/swh/storage/storage.py b/swh/storage/storage.py index 3d6c9ccf4..d58d1efb1 100644 --- a/swh/storage/storage.py +++ b/swh/storage/storage.py @@ -1,1583 +1,1638 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict import datetime import itertools import json import dateutil.parser import psycopg2 from . import converters from .common import db_transaction_generator, db_transaction from .db import Db from .exc import StorageDBError +from .algos import diff from swh.model.hashutil import ALGORITHMS from swh.objstorage import get_objstorage from swh.objstorage.exc import ObjNotFoundError # Max block size of contents to return BULK_BLOCK_CONTENT_LEN_MAX = 10000 CONTENT_HASH_KEYS = ['sha1', 'sha1_git', 'sha256', 'blake2s256'] class Storage(): """SWH storage proxy, encompassing DB and object storage """ def __init__(self, db, objstorage): """ Args: db_conn: either a libpq connection string, or a psycopg2 connection obj_root: path to the root of the object storage """ try: if isinstance(db, psycopg2.extensions.connection): self.db = Db(db) else: self.db = Db.connect(db) except psycopg2.OperationalError as e: raise StorageDBError(e) self.objstorage = get_objstorage(**objstorage) def check_config(self, *, check_write): """Check that the storage is configured and ready to go.""" if not self.objstorage.check_config(check_write=check_write): return False # Check permissions on one of the tables with self.db.transaction() as cur: if check_write: check = 'INSERT' else: check = 'SELECT' cur.execute( "select has_table_privilege(current_user, 'content', %s)", (check,) ) return cur.fetchone()[0] return True def content_add(self, content): """Add content blobs to the storage Note: in case of DB errors, objects might have already been added to the object storage and will not be removed. Since addition to the object storage is idempotent, that should not be a problem. Args: content (iterable): iterable of dictionaries representing individual pieces of content to add. Each dictionary has the following keys: - data (bytes): the actual content - length (int): content length (default: -1) - one key for each checksum algorithm in :data:`swh.model.hashutil.ALGORITHMS`, mapped to the corresponding checksum - status (str): one of visible, hidden, absent - reason (str): if status = absent, the reason why - origin (int): if status = absent, the origin we saw the content in """ db = self.db def _unique_key(hash, keys=CONTENT_HASH_KEYS): """Given a hash (tuple or dict), return a unique key from the aggregation of keys. """ if isinstance(hash, tuple): return hash return tuple([hash[k] for k in keys]) content_by_status = defaultdict(list) for d in content: if 'status' not in d: d['status'] = 'visible' if 'length' not in d: d['length'] = -1 content_by_status[d['status']].append(d) content_with_data = content_by_status['visible'] content_without_data = content_by_status['absent'] missing_content = set(self.content_missing(content_with_data)) missing_skipped = set(_unique_key(hashes) for hashes in self.skipped_content_missing( content_without_data)) with db.transaction() as cur: if missing_content: # create temporary table for metadata injection db.mktemp('content', cur) def add_to_objstorage(cont): self.objstorage.add(cont['data'], obj_id=cont['sha1']) content_filtered = (cont for cont in content_with_data if cont['sha1'] in missing_content) db.copy_to(content_filtered, 'tmp_content', db.content_get_metadata_keys, cur, item_cb=add_to_objstorage) # move metadata in place db.content_add_from_temp(cur) if missing_skipped: missing_filtered = (cont for cont in content_without_data if _unique_key(cont) in missing_skipped) db.mktemp('skipped_content', cur) db.copy_to(missing_filtered, 'tmp_skipped_content', db.skipped_content_keys, cur) # move metadata in place db.skipped_content_add_from_temp(cur) @db_transaction def content_update(self, content, keys=[], cur=None): """Update content blobs to the storage. Does nothing for unknown contents or skipped ones. Args: content (iterable): iterable of dictionaries representing individual pieces of content to update. Each dictionary has the following keys: - data (bytes): the actual content - length (int): content length (default: -1) - one key for each checksum algorithm in :data:`swh.model.hashutil.ALGORITHMS`, mapped to the corresponding checksum - status (str): one of visible, hidden, absent keys (list): List of keys (str) whose values needs an update, e.g., new hash column """ db = self.db # TODO: Add a check on input keys. How to properly implement # this? We don't know yet the new columns. db.mktemp('content') select_keys = list(set(db.content_get_metadata_keys).union(set(keys))) db.copy_to(content, 'tmp_content', select_keys, cur) db.content_update_from_temp(keys_to_update=keys, cur=cur) def content_get(self, content): """Retrieve in bulk contents and their data. Args: content: iterables of sha1 Yields: dict: Generates streams of contents as dict with their raw data: - sha1: sha1's content - data: bytes data of the content Raises: ValueError in case of too much contents are required. cf. BULK_BLOCK_CONTENT_LEN_MAX """ # FIXME: Improve on server module to slice the result if len(content) > BULK_BLOCK_CONTENT_LEN_MAX: raise ValueError( "Send at maximum %s contents." % BULK_BLOCK_CONTENT_LEN_MAX) for obj_id in content: try: data = self.objstorage.get(obj_id) except ObjNotFoundError: yield None continue yield {'sha1': obj_id, 'data': data} @db_transaction_generator def content_get_metadata(self, content, cur=None): """Retrieve content metadata in bulk Args: content: iterable of content identifiers (sha1) Returns: an iterable with content metadata corresponding to the given ids """ db = self.db db.store_tmp_bytea(content, cur) for content_metadata in db.content_get_metadata_from_temp(cur): yield dict(zip(db.content_get_metadata_keys, content_metadata)) @db_transaction_generator def content_missing(self, content, key_hash='sha1', cur=None): """List content missing from storage Args: - content: iterable of dictionaries containing one key for each - checksum algorithm in :data:`swh.model.hashutil.ALGORITHMS`, - mapped to the corresponding checksum, and a length key mapped - to the content length. - key_hash: the name of the hash used as key (default: 'sha1') + content ([dict]): iterable of dictionaries containing one + key for each checksum algorithm in + :data:`swh.model.hashutil.ALGORITHMS`, + mapped to the corresponding checksum, + and a length key mapped to the content + length. + + key_hash (str): name of the column to use as hash id + result (default: 'sha1') Returns: - iterable: missing ids + iterable ([bytes]): missing content ids (as per the + key_hash column) Raises: TODO: an exception when we get a hash collision. """ db = self.db keys = CONTENT_HASH_KEYS if key_hash not in CONTENT_HASH_KEYS: raise ValueError("key_hash should be one of %s" % keys) key_hash_idx = keys.index(key_hash) # Create temporary table for metadata injection db.mktemp('content', cur) db.copy_to(content, 'tmp_content', keys + ['length'], cur) for obj in db.content_missing_from_temp(cur): yield obj[key_hash_idx] @db_transaction_generator def content_missing_per_sha1(self, contents, cur=None): """List content missing from storage based only on sha1. Args: contents: Iterable of sha1 to check for absence. Returns: iterable: missing ids Raises: TODO: an exception when we get a hash collision. """ db = self.db db.store_tmp_bytea(contents, cur) for obj in db.content_missing_per_sha1_from_temp(cur): yield obj[0] @db_transaction_generator def skipped_content_missing(self, content, cur=None): """List skipped_content missing from storage Args: content: iterable of dictionaries containing the data for each checksum algorithm. Returns: iterable: missing signatures """ keys = CONTENT_HASH_KEYS db = self.db db.mktemp('skipped_content', cur) db.copy_to(content, 'tmp_skipped_content', keys + ['length', 'reason'], cur) yield from db.skipped_content_missing_from_temp(cur) @db_transaction def content_find(self, content, cur=None): """Find a content hash in db. Args: content: a dictionary representing one content hash, mapping checksum algorithm names (see swh.model.hashutil.ALGORITHMS) to checksum values Returns: a triplet (sha1, sha1_git, sha256) if the content exist or None otherwise. Raises: ValueError: in case the key of the dictionary is not sha1, sha1_git nor sha256. """ db = self.db if not set(content).intersection(ALGORITHMS): raise ValueError('content keys must contain at least one of: ' 'sha1, sha1_git, sha256, blake2s256') c = db.content_find(sha1=content.get('sha1'), sha1_git=content.get('sha1_git'), sha256=content.get('sha256'), blake2s256=content.get('blake2s256'), cur=cur) if c: return dict(zip(db.content_find_cols, c)) return None def directory_add(self, directories): """Add directories to the storage Args: directories (iterable): iterable of dictionaries representing the individual directories to add. Each dict has the following keys: - id (sha1_git): the id of the directory to add - entries (list): list of dicts for each entry in the directory. Each dict has the following keys: - name (bytes) - type (one of 'file', 'dir', 'rev'): type of the directory entry (file, directory, revision) - target (sha1_git): id of the object pointed at by the directory entry - perms (int): entry permissions """ dirs = set() dir_entries = { 'file': defaultdict(list), 'dir': defaultdict(list), 'rev': defaultdict(list), } for cur_dir in directories: dir_id = cur_dir['id'] dirs.add(dir_id) for src_entry in cur_dir['entries']: entry = src_entry.copy() entry['dir_id'] = dir_id dir_entries[entry['type']][dir_id].append(entry) dirs_missing = set(self.directory_missing(dirs)) if not dirs_missing: return db = self.db with db.transaction() as cur: # Copy directory ids dirs_missing_dict = ({'id': dir} for dir in dirs_missing) db.mktemp('directory', cur) db.copy_to(dirs_missing_dict, 'tmp_directory', ['id'], cur) # Copy entries for entry_type, entry_list in dir_entries.items(): entries = itertools.chain.from_iterable( entries_for_dir for dir_id, entries_for_dir in entry_list.items() if dir_id in dirs_missing) db.mktemp_dir_entry(entry_type) db.copy_to( entries, 'tmp_directory_entry_%s' % entry_type, ['target', 'name', 'perms', 'dir_id'], cur, ) # Do the final copy db.directory_add_from_temp(cur) @db_transaction_generator def directory_missing(self, directories, cur): """List directories missing from storage Args: directories (iterable): an iterable of directory ids Yields: missing directory ids """ db = self.db # Create temporary table for metadata injection db.mktemp('directory', cur) directories_dicts = ({'id': dir} for dir in directories) db.copy_to(directories_dicts, 'tmp_directory', ['id'], cur) for obj in db.directory_missing_from_temp(cur): yield obj[0] @db_transaction_generator def directory_get(self, directories, cur=None): """Get information on directories. Args: - directories: an iterable of directory ids Returns: List of directories as dict with keys and associated values. """ db = self.db keys = ('id', 'dir_entries', 'file_entries', 'rev_entries') db.mktemp('directory', cur) db.copy_to(({'id': dir_id} for dir_id in directories), 'tmp_directory', ['id'], cur) dirs = db.directory_get_from_temp(cur) for line in dirs: yield dict(zip(keys, line)) @db_transaction_generator def directory_ls(self, directory, recursive=False, cur=None): """Get entries for one directory. Args: - directory: the directory to list entries from. - recursive: if flag on, this list recursively from this directory. Returns: List of entries for such directory. """ db = self.db if recursive: res_gen = db.directory_walk(directory, cur=cur) else: res_gen = db.directory_walk_one(directory, cur=cur) for line in res_gen: yield dict(zip(db.directory_ls_cols, line)) @db_transaction def directory_entry_get_by_path(self, directory, paths, cur=None): """Get the directory entry (either file or dir) from directory with path. Args: - directory: sha1 of the top level directory - paths: path to lookup from the top level directory. From left (top) to right (bottom). Returns: The corresponding directory entry if found, None otherwise. """ db = self.db res = db.directory_entry_get_by_path(directory, paths, cur) if res: return dict(zip(db.directory_ls_cols, res)) def revision_add(self, revisions): """Add revisions to the storage Args: revisions (iterable): iterable of dictionaries representing the individual revisions to add. Each dict has the following keys: - id (sha1_git): id of the revision to add - date (datetime.DateTime): date the revision was written - date_offset (int): offset from UTC in minutes the revision was written - date_neg_utc_offset (boolean): whether a null date_offset represents a negative UTC offset - committer_date (datetime.DateTime): date the revision got added to the origin - committer_date_offset (int): offset from UTC in minutes the revision was added to the origin - committer_date_neg_utc_offset (boolean): whether a null committer_date_offset represents a negative UTC offset - type (one of 'git', 'tar'): type of the revision added - directory (sha1_git): the directory the revision points at - message (bytes): the message associated with the revision - author_name (bytes): the name of the revision author - author_email (bytes): the email of the revision author - committer_name (bytes): the name of the revision committer - committer_email (bytes): the email of the revision committer - metadata (jsonb): extra information as dictionary - synthetic (bool): revision's nature (tarball, directory creates synthetic revision) - parents (list of sha1_git): the parents of this revision """ db = self.db revisions_missing = set(self.revision_missing( set(revision['id'] for revision in revisions))) if not revisions_missing: return with db.transaction() as cur: db.mktemp_revision(cur) revisions_filtered = ( converters.revision_to_db(revision) for revision in revisions if revision['id'] in revisions_missing) parents_filtered = [] db.copy_to( revisions_filtered, 'tmp_revision', db.revision_add_cols, cur, lambda rev: parents_filtered.extend(rev['parents'])) db.revision_add_from_temp(cur) db.copy_to(parents_filtered, 'revision_history', ['id', 'parent_id', 'parent_rank'], cur) @db_transaction_generator def revision_missing(self, revisions, cur=None): """List revisions missing from storage Args: revisions (iterable): revision ids Yields: missing revision ids """ db = self.db db.store_tmp_bytea(revisions, cur) for obj in db.revision_missing_from_temp(cur): yield obj[0] @db_transaction_generator def revision_get(self, revisions, cur): """Get all revisions from storage Args: revisions: an iterable of revision ids Returns: iterable: an iterable of revisions as dictionaries (or None if the revision doesn't exist) """ db = self.db db.store_tmp_bytea(revisions, cur) for line in self.db.revision_get_from_temp(cur): data = converters.db_to_revision( dict(zip(db.revision_get_cols, line)) ) if not data['type']: yield None continue yield data @db_transaction_generator def revision_log(self, revisions, limit=None, cur=None): """Fetch revision entry from the given root revisions. Args: revisions: array of root revision to lookup limit: limitation on the output result. Default to None. Yields: List of revision log from such revisions root. """ db = self.db for line in db.revision_log(revisions, limit, cur): data = converters.db_to_revision( dict(zip(db.revision_get_cols, line)) ) if not data['type']: yield None continue yield data @db_transaction_generator def revision_shortlog(self, revisions, limit=None, cur=None): """Fetch the shortlog for the given revisions Args: revisions: list of root revisions to lookup limit: depth limitation for the output Yields: a list of (id, parents) tuples. """ db = self.db yield from db.revision_shortlog(revisions, limit, cur) @db_transaction_generator def revision_log_by(self, origin_id, branch_name=None, timestamp=None, limit=None, cur=None): """Fetch revision entry from the actual origin_id's latest revision. Args: origin_id: the origin id from which deriving the revision branch_name: (optional) occurrence's branch name timestamp: (optional) occurrence's time limit: (optional) depth limitation for the output. Default to None. Yields: The revision log starting from the revision derived from the (origin, branch_name, timestamp) combination if any. Returns: None if no revision matching this combination is found. """ db = self.db # Retrieve the revision by criterion revisions = list(db.revision_get_by( origin_id, branch_name, timestamp, limit=1)) if not revisions: return None revision_id = revisions[0][0] # otherwise, retrieve the revision log from that revision yield from self.revision_log([revision_id], limit) def release_add(self, releases): """Add releases to the storage Args: releases (iterable): iterable of dictionaries representing the individual releases to add. Each dict has the following keys: - id (sha1_git): id of the release to add - revision (sha1_git): id of the revision the release points to - date (datetime.DateTime): the date the release was made - date_offset (int): offset from UTC in minutes the release was made - date_neg_utc_offset (boolean): whether a null date_offset represents a negative UTC offset - name (bytes): the name of the release - comment (bytes): the comment associated with the release - author_name (bytes): the name of the release author - author_email (bytes): the email of the release author """ db = self.db release_ids = set(release['id'] for release in releases) releases_missing = set(self.release_missing(release_ids)) if not releases_missing: return with db.transaction() as cur: db.mktemp_release(cur) releases_filtered = ( converters.release_to_db(release) for release in releases if release['id'] in releases_missing ) db.copy_to(releases_filtered, 'tmp_release', db.release_add_cols, cur) db.release_add_from_temp(cur) @db_transaction_generator def release_missing(self, releases, cur=None): """List releases missing from storage Args: releases: an iterable of release ids Returns: a list of missing release ids """ db = self.db # Create temporary table for metadata injection db.store_tmp_bytea(releases, cur) for obj in db.release_missing_from_temp(cur): yield obj[0] @db_transaction_generator def release_get(self, releases, cur=None): """Given a list of sha1, return the releases's information Args: releases: list of sha1s Yields: releases: list of releases as dicts with the following keys: - id: origin's id - revision: origin's type - url: origin's url - lister: lister's uuid - project: project's uuid (FIXME, retrieve this information) Raises: ValueError: if the keys does not match (url and type) nor id. """ db = self.db # Create temporary table for metadata injection db.store_tmp_bytea(releases, cur) for release in db.release_get_from_temp(cur): yield converters.db_to_release( dict(zip(db.release_get_cols, release)) ) @db_transaction def snapshot_add(self, origin, visit, snapshot, back_compat=False, cur=None): """Add a snapshot for the given origin/visit couple Args: origin (int): id of the origin visit (int): id of the visit snapshot (dict): the snapshot to add to the visit, containing the following keys: - **id** (:class:`bytes`): id of the snapshot - **branches** (:class:`dict`): branches the snapshot contains, mapping the branch name (:class:`bytes`) to the branch target, itself a :class:`dict` (or ``None`` if the branch points to an unknown object) - **target_type** (:class:`str`): one of ``content``, ``directory``, ``revision``, ``release``, ``snapshot``, ``alias`` - **target** (:class:`bytes`): identifier of the target (currently a ``sha1_git`` for all object kinds, or the name of the target branch for aliases) back_compat (bool): whether to add the occurrences for backwards-compatibility """ db = self.db if not db.snapshot_exists(snapshot['id'], cur): db.mktemp_snapshot_branch(cur) db.copy_to( ( { 'name': name, 'target': info['target'] if info else None, 'target_type': info['target_type'] if info else None, } for name, info in snapshot['branches'].items() ), 'tmp_snapshot_branch', ['name', 'target', 'target_type'], cur, ) db.snapshot_add(origin, visit, snapshot['id'], cur) if not back_compat: return # TODO: drop this compat feature occurrences = [] for name, info in snapshot['branches'].items(): if not info: target = b'\x00' * 20 target_type = 'revision' elif info['target_type'] == 'alias': continue else: target = info['target'] target_type = info['target_type'] occurrences.append({ 'origin': origin, 'visit': visit, 'branch': name, 'target': target, 'target_type': target_type, }) self.occurrence_add(occurrences) @db_transaction def snapshot_get(self, snapshot_id, cur=None): """Get the snapshot with the given id Args: snapshot_id (bytes): id of the snapshot Returns: dict: a snapshot with two keys: id:: identifier for the snapshot branches:: a list of branches contained by the snapshot """ db = self.db branches = {} for branch in db.snapshot_get_by_id(snapshot_id, cur): branch = dict(zip(db.snapshot_get_cols, branch)) del branch['snapshot_id'] name = branch.pop('name') if branch == {'target': None, 'target_type': None}: branch = None branches[name] = branch if branches: return {'id': snapshot_id, 'branches': branches} if db.snapshot_exists(snapshot_id, cur): # empty snapshot return {'id': snapshot_id, 'branches': {}} return None @db_transaction def snapshot_get_by_origin_visit(self, origin, visit, cur=None): """Get the snapshot for the given origin visit Args: origin (int): the origin identifier visit (int): the visit identifier Returns: dict: a snapshot with two keys: id:: identifier for the snapshot branches:: a dictionary containing the snapshot branch information """ db = self.db snapshot_id = db.snapshot_get_by_origin_visit(origin, visit, cur) if snapshot_id: return self.snapshot_get(snapshot_id, cur=cur) else: # compatibility code during the snapshot migration origin_visit_info = self.origin_visit_get_by(origin, visit, cur=cur) if origin_visit_info is None: return None ret = {'id': None} ret['branches'] = origin_visit_info['occurrences'] return ret return None @db_transaction def snapshot_get_latest(self, origin, allowed_statuses=None, cur=None): """Get the latest snapshot for the given origin, optionally only from visits that have one of the given allowed_statuses. Args: origin (int): the origin identifier allowed_statuses (list of str): list of visit statuses considered to find the latest snapshot for the visit. For instance, ``allowed_statuses=['full']`` will only consider visits that have successfully run to completion. Returns: dict: a snapshot with two keys: id:: identifier for the snapshot branches:: a dictionary containing the snapshot branch information """ db = self.db origin_visit = db.origin_visit_get_latest_snapshot( origin, allowed_statuses=allowed_statuses, cur=cur) if origin_visit: origin_visit = dict(zip(db.origin_visit_get_cols, origin_visit)) return self.snapshot_get(origin_visit['snapshot'], cur=cur) @db_transaction def occurrence_add(self, occurrences, cur=None): """Add occurrences to the storage Args: occurrences: iterable of dictionaries representing the individual occurrences to add. Each dict has the following keys: - origin (int): id of the origin corresponding to the occurrence - visit (int): id of the visit corresponding to the occurrence - branch (str): the reference name of the occurrence - target (sha1_git): the id of the object pointed to by the occurrence - target_type (str): the type of object pointed to by the occurrence """ db = self.db db.mktemp_occurrence_history(cur) db.copy_to(occurrences, 'tmp_occurrence_history', ['origin', 'branch', 'target', 'target_type', 'visit'], cur) db.occurrence_history_add_from_temp(cur) @db_transaction_generator def occurrence_get(self, origin_id, cur=None): """Retrieve occurrence information per origin_id. Args: origin_id: The occurrence's origin. Yields: List of occurrences matching criterion. """ db = self.db for line in db.occurrence_get(origin_id, cur): yield { 'origin': line[0], 'branch': line[1], 'target': line[2], 'target_type': line[3], } @db_transaction def origin_visit_add(self, origin, ts, cur=None): """Add an origin_visit for the origin at ts with status 'ongoing'. Args: origin: Visited Origin id ts: timestamp of such visit Returns: dict: dictionary with keys origin and visit where: - origin: origin identifier - visit: the visit identifier for the new visit occurrence - ts (datetime.DateTime): the visit date """ if isinstance(ts, str): ts = dateutil.parser.parse(ts) return { 'origin': origin, 'visit': self.db.origin_visit_add(origin, ts, cur) } @db_transaction def origin_visit_update(self, origin, visit_id, status, metadata=None, cur=None): """Update an origin_visit's status. Args: origin: Visited Origin id visit_id: Visit's id status: Visit's new status metadata: Data associated to the visit Returns: None """ return self.db.origin_visit_update(origin, visit_id, status, metadata, cur) @db_transaction_generator def origin_visit_get(self, origin, last_visit=None, limit=None, cur=None): """Retrieve all the origin's visit's information. Args: origin (int): The occurrence's origin (identifier). last_visit (int): Starting point from which listing the next visits Default to None limit (int): Number of results to return from the last visit. Default to None Yields: List of visits. """ db = self.db for line in db.origin_visit_get_all( origin, last_visit=last_visit, limit=limit, cur=cur): data = dict(zip(self.db.origin_visit_get_cols, line)) yield data @db_transaction def origin_visit_get_by(self, origin, visit, cur=None): """Retrieve origin visit's information. Args: origin: The occurrence's origin (identifier). Returns: The information on that particular (origin, visit) """ db = self.db ori_visit = db.origin_visit_get(origin, visit, cur) if not ori_visit: return None ori_visit = dict(zip(self.db.origin_visit_get_cols, ori_visit)) if ori_visit['snapshot']: ori_visit['occurrences'] = self.snapshot_get(ori_visit['snapshot'], cur=cur)['branches'] return ori_visit # TODO: remove Backwards compatibility after snapshot migration occs = {} for occ in db.occurrence_by_origin_visit(origin, visit): _, branch_name, target, target_type = occ occs[branch_name] = { 'target': target, 'target_type': target_type } ori_visit['occurrences'] = occs return ori_visit @db_transaction_generator def revision_get_by(self, origin_id, branch_name=None, timestamp=None, limit=None, cur=None): """Given an origin_id, retrieve occurrences' list per given criterions. Args: origin_id: The origin to filter on. branch_name: (optional) branch name. timestamp: (optional) time. limit: (optional) limit Yields: List of occurrences matching the criterions or None if nothing is found. """ for line in self.db.revision_get_by(origin_id, branch_name, timestamp, limit=limit, cur=cur): data = converters.db_to_revision( dict(zip(self.db.revision_get_cols, line)) ) if not data['type']: yield None continue yield data def release_get_by(self, origin_id, limit=None): """Given an origin id, return all the tag objects pointing to heads of origin_id. Args: origin_id: the origin to filter on. limit: None by default Yields: List of releases matching the criterions or None if nothing is found. """ for line in self.db.release_get_by(origin_id, limit=limit): data = converters.db_to_release( dict(zip(self.db.release_get_cols, line)) ) yield data @db_transaction def object_find_by_sha1_git(self, ids, cur=None): """Return the objects found with the given ids. Args: ids: a generator of sha1_gits Returns: dict: a mapping from id to the list of objects found. Each object found is itself a dict with keys: - sha1_git: the input id - type: the type of object found - id: the id of the object found - object_id: the numeric id of the object found. """ db = self.db ret = {id: [] for id in ids} for retval in db.object_find_by_sha1_git(ids): if retval[1]: ret[retval[0]].append(dict(zip(db.object_find_by_sha1_git_cols, retval))) return ret origin_keys = ['id', 'type', 'url', 'lister', 'project'] @db_transaction def origin_get(self, origin, cur=None): """Return the origin either identified by its id or its tuple (type, url). Args: origin: dictionary representing the individual origin to find. This dict has either the keys type and url: - type (FIXME: enum TBD): the origin type ('git', 'wget', ...) - url (bytes): the url the origin points to or the id: - id: the origin id Returns: dict: the origin dictionary with the keys: - id: origin's id - type: origin's type - url: origin's url - lister: lister's uuid - project: project's uuid (FIXME, retrieve this information) Raises: ValueError: if the keys does not match (url and type) nor id. """ db = self.db origin_id = origin.get('id') if origin_id: # check lookup per id first ori = db.origin_get(origin_id, cur) elif 'type' in origin and 'url' in origin: # or lookup per type, url ori = db.origin_get_with(origin['type'], origin['url'], cur) else: # unsupported lookup raise ValueError('Origin must have either id or (type and url).') if ori: return dict(zip(self.origin_keys, ori)) return None @db_transaction_generator def origin_search(self, url_pattern, offset=0, limit=50, regexp=False, cur=None): """Search for origins whose urls contain a provided string pattern or match a provided regular expression. The search is performed in a case insensitive way. Args: url_pattern: the string pattern to search for in origin urls offset: number of found origins to skip before returning results limit: the maximum number of found origins to return regexp: if True, consider the provided pattern as a regular expression and return origins whose urls match it Returns: An iterable of dict containing origin information as returned by :meth:`swh.storage.storage.Storage.origin_get`. """ db = self.db for origin in db.origin_search(url_pattern, offset, limit, regexp, cur): yield dict(zip(self.origin_keys, origin)) @db_transaction def _person_add(self, person, cur=None): """Add a person in storage. Note: Internal function for now, do not use outside of this module. Do not do anything fancy in case a person already exists. Please adapt code if more checks are needed. Args: person: dictionary with keys name and email. Returns: Id of the new person. """ db = self.db return db.person_add(person) @db_transaction_generator def person_get(self, person, cur=None): """Return the persons identified by their ids. Args: person: array of ids. Returns: The array of persons corresponding of the ids. """ db = self.db for person in db.person_get(person): yield dict(zip(db.person_get_cols, person)) @db_transaction def origin_add(self, origins, cur=None): """Add origins to the storage Args: origins: list of dictionaries representing the individual origins, with the following keys: - type: the origin type ('git', 'svn', 'deb', ...) - url (bytes): the url the origin points to Returns: list: ids corresponding to the given origins """ ret = [] for origin in origins: ret.append(self.origin_add_one(origin, cur=cur)) return ret @db_transaction def origin_add_one(self, origin, cur=None): """Add origin to the storage Args: origin: dictionary representing the individual origin to add. This dict has the following keys: - type (FIXME: enum TBD): the origin type ('git', 'wget', ...) - url (bytes): the url the origin points to Returns: the id of the added origin, or of the identical one that already exists. """ db = self.db data = db.origin_get_with(origin['type'], origin['url'], cur) if data: return data[0] return db.origin_add(origin['type'], origin['url'], cur) @db_transaction def fetch_history_start(self, origin_id, cur=None): """Add an entry for origin origin_id in fetch_history. Returns the id of the added fetch_history entry """ fetch_history = { 'origin': origin_id, 'date': datetime.datetime.now(tz=datetime.timezone.utc), } return self.db.create_fetch_history(fetch_history, cur) @db_transaction def fetch_history_end(self, fetch_history_id, data, cur=None): """Close the fetch_history entry with id `fetch_history_id`, replacing its data with `data`. """ now = datetime.datetime.now(tz=datetime.timezone.utc) fetch_history = self.db.get_fetch_history(fetch_history_id, cur) if not fetch_history: raise ValueError('No fetch_history with id %d' % fetch_history_id) fetch_history['duration'] = now - fetch_history['date'] fetch_history.update(data) self.db.update_fetch_history(fetch_history, cur) @db_transaction def fetch_history_get(self, fetch_history_id, cur=None): """Get the fetch_history entry with id `fetch_history_id`. """ return self.db.get_fetch_history(fetch_history_id, cur) @db_transaction def entity_add(self, entities, cur=None): """Add the given entitites to the database (in entity_history). Args: entities (iterable): iterable of dictionaries with the following keys: - uuid (uuid): id of the entity - parent (uuid): id of the parent entity - name (str): name of the entity - type (str): type of entity (one of 'organization', 'group_of_entities', 'hosting', 'group_of_persons', 'person', 'project') - description (str, optional): description of the entity - homepage (str): url of the entity's homepage - active (bool): whether the entity is active - generated (bool): whether the entity was generated - lister_metadata (dict): lister-specific entity metadata - metadata (dict): other metadata for the entity - validity (datetime.DateTime array): timestamps at which we listed the entity. """ db = self.db cols = list(db.entity_history_cols) cols.remove('id') db.mktemp_entity_history() db.copy_to(entities, 'tmp_entity_history', cols, cur) db.entity_history_add_from_temp() @db_transaction_generator def entity_get_from_lister_metadata(self, entities, cur=None): """Fetch entities from the database, matching with the lister and associated metadata. Args: entities (iterable): dictionaries containing the lister metadata to look for. Useful keys are 'lister', 'type', 'id', ... Yields: fetched entities with all their attributes. If no match was found, the returned entity is None. """ db = self.db db.mktemp_entity_lister(cur) mapped_entities = [] for i, entity in enumerate(entities): mapped_entity = { 'id': i, 'lister_metadata': entity, } mapped_entities.append(mapped_entity) db.copy_to(mapped_entities, 'tmp_entity_lister', ['id', 'lister_metadata'], cur) cur.execute('''select id, %s from swh_entity_from_tmp_entity_lister() order by id''' % ','.join(db.entity_cols)) for id, *entity_vals in cur: fetched_entity = dict(zip(db.entity_cols, entity_vals)) if fetched_entity['uuid']: yield fetched_entity else: yield { 'uuid': None, 'lister_metadata': entities[i], } @db_transaction_generator def entity_get(self, uuid, cur=None): """Returns the list of entity per its uuid identifier and also its parent hierarchy. Args: uuid: entity's identifier Returns: List of entities starting with entity with uuid and the parent hierarchy from such entity. """ db = self.db for entity in db.entity_get(uuid, cur): yield dict(zip(db.entity_cols, entity)) @db_transaction def entity_get_one(self, uuid, cur=None): """Returns one entity using its uuid identifier. Args: uuid: entity's identifier Returns: the object corresponding to the given entity """ db = self.db entity = db.entity_get_one(uuid, cur) if entity: return dict(zip(db.entity_cols, entity)) else: return None @db_transaction def stat_counters(self, cur=None): """compute statistics about the number of tuples in various tables Returns: dict: a dictionary mapping textual labels (e.g., content) to integer values (e.g., the number of tuples in table content) """ return {k: v for (k, v) in self.db.stat_counters()} @db_transaction def origin_metadata_add(self, origin_id, ts, provider, tool, metadata, cur=None): """ Add an origin_metadata for the origin at ts with provenance and metadata. Args: origin_id (int): the origin's id for which the metadata is added ts (datetime): timestamp of the found metadata provider (int): the provider of metadata (ex:'hal') tool (int): tool used to extract metadata metadata (jsonb): the metadata retrieved at the time and location Returns: id (int): the origin_metadata unique id """ if isinstance(ts, str): ts = dateutil.parser.parse(ts) return self.db.origin_metadata_add(origin_id, ts, provider, tool, metadata, cur) @db_transaction_generator def origin_metadata_get_by(self, origin_id, provider_type=None, cur=None): """Retrieve list of all origin_metadata entries for the origin_id Args: origin_id (int): the unique origin identifier provider_type (str): (optional) type of provider Returns: list of dicts: the origin_metadata dictionary with the keys: - id (int): origin_metadata's id - origin_id (int): origin's id - discovery_date (datetime): timestamp of discovery - tool_id (int): metadata's extracting tool - metadata (jsonb) - provider_id (int): metadata's provider - provider_name (str) - provider_type (str) - provider_url (str) """ db = self.db for line in db.origin_metadata_get_by(origin_id, provider_type, cur): yield dict(zip(db.origin_metadata_get_cols, line)) @db_transaction_generator def tool_add(self, tools, cur=None): """Add new tools to the storage. Args: tools (iterable of :class:`dict`): Tool information to add to storage. Each tool is a :class:`dict` with the following keys: - name (:class:`str`): name of the tool - version (:class:`str`): version of the tool - configuration (:class:`dict`): configuration of the tool, must be json-encodable Returns: `iterable` of :class:`dict`: All the tools inserted in storage (including the internal ``id``). The order of the list is not guaranteed to match the order of the initial list. """ db = self.db db.mktemp_tool(cur) db.copy_to(tools, 'tmp_tool', ['name', 'version', 'configuration'], cur) tools = db.tool_add_from_temp(cur) for line in tools: yield dict(zip(db.tool_cols, line)) @db_transaction def tool_get(self, tool, cur=None): """Retrieve tool information. Args: tool (dict): Tool information we want to retrieve from storage. The dicts have the same keys as those used in :func:`tool_add`. Returns: dict: The full tool information if it exists (``id`` included), None otherwise. """ db = self.db tool_conf = tool['configuration'] if isinstance(tool_conf, dict): tool_conf = json.dumps(tool_conf) idx = db.tool_get(tool['name'], tool['version'], tool_conf) if not idx: return None return dict(zip(self.db.tool_cols, idx)) @db_transaction def metadata_provider_add(self, provider_name, provider_type, provider_url, metadata, cur=None): db = self.db return db.metadata_provider_add(provider_name, provider_type, provider_url, metadata, cur) @db_transaction def metadata_provider_get(self, provider_id, cur=None): db = self.db result = db.metadata_provider_get(provider_id) if not result: return None return dict(zip(self.db.metadata_provider_cols, result)) @db_transaction def metadata_provider_get_by(self, provider, cur=None): db = self.db result = db.metadata_provider_get_by(provider['provider_name'], provider['provider_url']) if not result: return None return dict(zip(self.db.metadata_provider_cols, result)) + + def diff_directories(self, from_dir, to_dir, track_renaming=False): + """Compute the list of file changes introduced between two arbitrary + directories (insertion / deletion / modification / renaming of files). + + Args: + from_dir (bytes): identifier of the directory to compare from + to_dir (bytes): identifier of the directory to compare to + track_renaming (bool): whether or not to track files renaming + + Returns: + A list of dict describing the introduced file changes + (see :func:`swh.storage.algos.diff.diff_directories` + for more details). + """ + return diff.diff_directories(self, from_dir, to_dir, track_renaming) + + def diff_revisions(self, from_rev, to_rev, track_renaming=False): + """Compute the list of file changes introduced between two arbitrary + revisions (insertion / deletion / modification / renaming of files). + + Args: + from_rev (bytes): identifier of the revision to compare from + to_rev (bytes): identifier of the revision to compare to + track_renaming (bool): whether or not to track files renaming + + Returns: + A list of dict describing the introduced file changes + (see :func:`swh.storage.algos.diff.diff_directories` + for more details). + """ + return diff.diff_revisions(self, from_rev, to_rev, track_renaming) + + def diff_revision(self, revision, track_renaming=False): + """Compute the list of file changes introduced by a specific revision + (insertion / deletion / modification / renaming of files) by comparing + it against its first parent. + + Args: + revision (bytes): identifier of the revision from which to + compute the list of files changes + track_renaming (bool): whether or not to track files renaming + + Returns: + A list of dict describing the introduced file changes + (see :func:`swh.storage.algos.diff.diff_directories` + for more details). + """ + return diff.diff_revision(self, revision, track_renaming) diff --git a/swh/storage/tests/algos/__init__.py b/swh/storage/tests/algos/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/swh/storage/tests/algos/test_diff.py b/swh/storage/tests/algos/test_diff.py new file mode 100644 index 000000000..0a2f6d660 --- /dev/null +++ b/swh/storage/tests/algos/test_diff.py @@ -0,0 +1,368 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +# flake8: noqa + +import unittest + +from nose.tools import istest, nottest +from unittest.mock import patch + +from swh.model.identifiers import directory_identifier +from swh.storage.algos import diff + + +class DirectoryModel(object): + """ + Quick and dirty directory model to ease the writing + of revision trees differential tests. + """ + def __init__(self, name=''): + self.data = {} + self.data['name'] = name + self.data['perms'] = 16384 + self.data['type'] = 'dir' + self.data['entries'] = [] + self.data['entry_idx'] = {} + + def __getitem__(self, item): + if item == 'target': + return directory_identifier(self) + else: + return self.data[item] + + def add_file(self, path, sha1=None): + path_parts = path.split(b'/') + if len(path_parts) == 1: + self['entry_idx'][path] = len(self['entries']) + self['entries'].append({ + 'target': sha1, + 'name': path, + 'perms': 33188, + 'type': 'file' + }) + else: + if not path_parts[0] in self['entry_idx']: + self['entry_idx'][path_parts[0]] = len(self['entries']) + self['entries'].append(DirectoryModel(path_parts[0])) + if path_parts[1]: + dir_idx = self['entry_idx'][path_parts[0]] + self['entries'][dir_idx].add_file(b'/'.join(path_parts[1:]), sha1) + + def get_hash_data(self, entry_hash): + if self['target'] == entry_hash: + ret = [] + for e in self['entries']: + ret.append({ + 'target': e['target'], + 'name': e['name'], + 'perms': e['perms'], + 'type': e['type'] + }) + return ret + else: + for e in self['entries']: + if e['type'] == 'file' and e['target'] == entry_hash: + return e + elif e['type'] == 'dir': + data = e.get_hash_data(entry_hash) + if data: + return data + return None + + def get_path_data(self, path): + path_parts = path.split(b'/') + entry_idx = self['entry_idx'][path_parts[0]] + entry = self['entries'][entry_idx] + if len(path_parts) == 1: + return { + 'target': entry['target'], + 'name': entry['name'], + 'perms': entry['perms'], + 'type': entry['type'] + } + else: + return entry.get_path_data(b'/'.join(path_parts[1:])) + + +@patch('swh.storage.algos.diff._get_rev') +@patch('swh.storage.algos.dir_iterators._get_dir') +class TestDiffRevisions(unittest.TestCase): + + @nottest + def diff_revisions(self, rev_from, rev_to, from_dir_model, to_dir_model, + expected_changes, mock_get_dir, mock_get_rev): + + def _get_rev(*args, **kwargs): + if args[1] == rev_from: + return {'directory': from_dir_model['target']} + else: + return {'directory': to_dir_model['target']} + + def _get_dir(*args, **kwargs): + return from_dir_model.get_hash_data(args[1]) or \ + to_dir_model.get_hash_data(args[1]) + + mock_get_rev.side_effect = _get_rev + mock_get_dir.side_effect = _get_dir + + changes = diff.diff_revisions(None, rev_from, rev_to, track_renaming=True) + + self.assertEqual(changes, expected_changes) + + @istest + def test_insert_delete(self, mock_get_dir, mock_get_rev): + rev_from = '898ff03e1e7925ecde3da66327d3cdc7e07625ba' + rev_to = '647c3d381e67490e82cdbbe6c96e46d5e1628ce2' + + from_dir_model = DirectoryModel() + + to_dir_model = DirectoryModel() + to_dir_model.add_file(b'file1', 'ea15f54ca215e7920c60f564315ebb7f911a5204') + to_dir_model.add_file(b'file2', '3e5faecb3836ffcadf82cc160787e35d4e2bec6a') + to_dir_model.add_file(b'file3', '2ae33b2984974d35eababe4890d37fbf4bce6b2c') + + expected_changes = \ + [{ + 'type': 'insert', + 'from': None, + 'from_path': None, + 'to': to_dir_model.get_path_data(b'file1'), + 'to_path': b'file1' + }, + { + 'type': 'insert', + 'from': None, + 'from_path': None, + 'to': to_dir_model.get_path_data(b'file2'), + 'to_path': b'file2' + }, + { + 'type': 'insert', + 'from': None, + 'from_path': None, + 'to': to_dir_model.get_path_data(b'file3'), + 'to_path': b'file3' + }] + + + self.diff_revisions(rev_from, rev_to, from_dir_model, + to_dir_model, expected_changes, + mock_get_dir, mock_get_rev) + + from_dir_model = DirectoryModel() + from_dir_model.add_file(b'file1', 'ea15f54ca215e7920c60f564315ebb7f911a5204') + from_dir_model.add_file(b'file2', '3e5faecb3836ffcadf82cc160787e35d4e2bec6a') + from_dir_model.add_file(b'file3', '2ae33b2984974d35eababe4890d37fbf4bce6b2c') + + to_dir_model = DirectoryModel() + + expected_changes = \ + [{ + 'type': 'delete', + 'from': from_dir_model.get_path_data(b'file1'), + 'from_path': b'file1', + 'to': None, + 'to_path': None + }, + { + 'type': 'delete', + 'from': from_dir_model.get_path_data(b'file2'), + 'from_path': b'file2', + 'to': None, + 'to_path': None + }, + { + 'type': 'delete', + 'from': from_dir_model.get_path_data(b'file3'), + 'from_path': b'file3', + 'to': None, + 'to_path': None + }] + + + self.diff_revisions(rev_from, rev_to, from_dir_model, + to_dir_model, expected_changes, + mock_get_dir, mock_get_rev) + + @istest + def test_onelevel_diff(self, mock_get_dir, mock_get_rev): + rev_from = '898ff03e1e7925ecde3da66327d3cdc7e07625ba' + rev_to = '647c3d381e67490e82cdbbe6c96e46d5e1628ce2' + + from_dir_model = DirectoryModel() + from_dir_model.add_file(b'file1', 'ea15f54ca215e7920c60f564315ebb7f911a5204') + from_dir_model.add_file(b'file2', 'f4a96b2000be83b61254d107046fa9777b17eb34') + from_dir_model.add_file(b'file3', 'd3c00f9396c6d0277727cec522ff6ad1ea0bc2da') + + to_dir_model = DirectoryModel() + to_dir_model.add_file(b'file2', '3ee0f38ee0ea23cc2c8c0b9d66b27be4596b002b') + to_dir_model.add_file(b'file3', 'd3c00f9396c6d0277727cec522ff6ad1ea0bc2da') + to_dir_model.add_file(b'file4', '40460b9653b1dc507e1b6eb333bd4500634bdffc') + + expected_changes = \ + [{ + 'type': 'delete', + 'from': from_dir_model.get_path_data(b'file1'), + 'from_path': b'file1', + 'to': None, + 'to_path': None}, + { + 'type': 'modify', + 'from': from_dir_model.get_path_data(b'file2'), + 'from_path': b'file2', + 'to': to_dir_model.get_path_data(b'file2'), + 'to_path': b'file2'}, + { + 'type': 'insert', + 'from': None, + 'from_path': None, + 'to': to_dir_model.get_path_data(b'file4'), + 'to_path': b'file4' + }] + + self.diff_revisions(rev_from, rev_to, from_dir_model, + to_dir_model, expected_changes, + mock_get_dir, mock_get_rev) + + @istest + def test_twolevels_diff(self, mock_get_dir, mock_get_rev): + rev_from = '898ff03e1e7925ecde3da66327d3cdc7e07625ba' + rev_to = '647c3d381e67490e82cdbbe6c96e46d5e1628ce2' + + from_dir_model = DirectoryModel() + from_dir_model.add_file(b'file1', 'ea15f54ca215e7920c60f564315ebb7f911a5204') + from_dir_model.add_file(b'dir1/file1', '8335fca266811bac7ae5c8e1621476b4cf4156b6') + from_dir_model.add_file(b'dir1/file2', 'a6127d909e79f1fcb28bbf220faf86e7be7831e5') + from_dir_model.add_file(b'dir1/file3', '18049b8d067ce1194a7e1cce26cfa3ae4242a43d') + from_dir_model.add_file(b'file2', 'd3c00f9396c6d0277727cec522ff6ad1ea0bc2da') + + to_dir_model = DirectoryModel() + to_dir_model.add_file(b'file1', '3ee0f38ee0ea23cc2c8c0b9d66b27be4596b002b') + to_dir_model.add_file(b'dir1/file2', 'de3548b32a8669801daa02143a66dae21fe852fd') + to_dir_model.add_file(b'dir1/file3', '18049b8d067ce1194a7e1cce26cfa3ae4242a43d') + to_dir_model.add_file(b'dir1/file4', 'f5c3f42aec5fe7b92276196c350cbadaf4c51f87') + to_dir_model.add_file(b'file2', 'd3c00f9396c6d0277727cec522ff6ad1ea0bc2da') + + expected_changes = \ + [{ + 'type': 'delete', + 'from': from_dir_model.get_path_data(b'dir1/file1'), + 'from_path': b'dir1/file1', + 'to': None, + 'to_path': None + }, + { + 'type': 'modify', + 'from': from_dir_model.get_path_data(b'dir1/file2'), + 'from_path': b'dir1/file2', + 'to': to_dir_model.get_path_data(b'dir1/file2'), + 'to_path': b'dir1/file2' + }, + { + 'type': 'insert', + 'from': None, + 'from_path': None, + 'to': to_dir_model.get_path_data(b'dir1/file4'), + 'to_path': b'dir1/file4' + }, + { + 'type': 'modify', + 'from': from_dir_model.get_path_data(b'file1'), + 'from_path': b'file1', + 'to': to_dir_model.get_path_data(b'file1'), + 'to_path': b'file1' + }] + + self.diff_revisions(rev_from, rev_to, from_dir_model, + to_dir_model, expected_changes, + mock_get_dir, mock_get_rev) + + @istest + def test_insert_delete_empty_dirs(self, mock_get_dir, mock_get_rev): + rev_from = '898ff03e1e7925ecde3da66327d3cdc7e07625ba' + rev_to = '647c3d381e67490e82cdbbe6c96e46d5e1628ce2' + + from_dir_model = DirectoryModel() + from_dir_model.add_file(b'dir3/file1', 'ea15f54ca215e7920c60f564315ebb7f911a5204') + + to_dir_model = DirectoryModel() + to_dir_model.add_file(b'dir3/file1', 'ea15f54ca215e7920c60f564315ebb7f911a5204') + to_dir_model.add_file(b'dir3/dir1/') + + expected_changes = \ + [{ + 'type': 'insert', + 'from': None, + 'from_path': None, + 'to': to_dir_model.get_path_data(b'dir3/dir1'), + 'to_path': b'dir3/dir1' + }] + + self.diff_revisions(rev_from, rev_to, from_dir_model, + to_dir_model, expected_changes, + mock_get_dir, mock_get_rev) + + from_dir_model = DirectoryModel() + from_dir_model.add_file(b'dir1/dir2/') + from_dir_model.add_file(b'dir1/file1', 'ea15f54ca215e7920c60f564315ebb7f911a5204') + + to_dir_model = DirectoryModel() + to_dir_model.add_file(b'dir1/file1', 'ea15f54ca215e7920c60f564315ebb7f911a5204') + + expected_changes = \ + [{ + 'type': 'delete', + 'from': from_dir_model.get_path_data(b'dir1/dir2'), + 'from_path': b'dir1/dir2', + 'to': None, + 'to_path': None + }] + + self.diff_revisions(rev_from, rev_to, from_dir_model, + to_dir_model, expected_changes, + mock_get_dir, mock_get_rev) + + @istest + def test_track_renaming(self, mock_get_dir, mock_get_rev): + rev_from = '898ff03e1e7925ecde3da66327d3cdc7e07625ba' + rev_to = '647c3d381e67490e82cdbbe6c96e46d5e1628ce2' + + from_dir_model = DirectoryModel() + from_dir_model.add_file(b'file1_oldname', 'ea15f54ca215e7920c60f564315ebb7f911a5204') + from_dir_model.add_file(b'dir1/file1_oldname', 'ea15f54ca215e7920c60f564315ebb7f911a5204') + from_dir_model.add_file(b'file2_oldname', 'd3c00f9396c6d0277727cec522ff6ad1ea0bc2da') + + to_dir_model = DirectoryModel() + to_dir_model.add_file(b'dir1/file1_newname', 'ea15f54ca215e7920c60f564315ebb7f911a5204') + to_dir_model.add_file(b'dir2/file1_newname', 'ea15f54ca215e7920c60f564315ebb7f911a5204') + to_dir_model.add_file(b'file2_newname', 'd3c00f9396c6d0277727cec522ff6ad1ea0bc2da') + + expected_changes = \ + [{ + 'type': 'rename', + 'from': from_dir_model.get_path_data(b'dir1/file1_oldname'), + 'from_path': b'dir1/file1_oldname', + 'to': to_dir_model.get_path_data(b'dir1/file1_newname'), + 'to_path': b'dir1/file1_newname' + }, + { + 'type': 'rename', + 'from': from_dir_model.get_path_data(b'file1_oldname'), + 'from_path': b'file1_oldname', + 'to': to_dir_model.get_path_data(b'dir2/file1_newname'), + 'to_path': b'dir2/file1_newname' + }, + { + 'type': 'rename', + 'from': from_dir_model.get_path_data(b'file2_oldname'), + 'from_path': b'file2_oldname', + 'to': to_dir_model.get_path_data(b'file2_newname'), + 'to_path': b'file2_newname' + }] + + self.diff_revisions(rev_from, rev_to, from_dir_model, + to_dir_model, expected_changes, + mock_get_dir, mock_get_rev) diff --git a/version.txt b/version.txt index a00288df8..c97df7a17 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.98-0-g9082524 \ No newline at end of file +v0.0.99-0-gd8ad992 \ No newline at end of file