diff --git a/sql/swh-func.sql b/sql/swh-func.sql --- a/sql/swh-func.sql +++ b/sql/swh-func.sql @@ -91,20 +91,6 @@ alter table tmp_release drop column object_id; $$; --- create a temporary table for occurrence_history -create or replace function swh_mktemp_occurrence_history() - returns void - language sql -as $$ - create temporary table tmp_occurrence_history( - like occurrence_history including defaults, - visit bigint not null - ) on commit drop; - alter table tmp_occurrence_history - drop column visits, - drop column object_id; -$$; - -- create a temporary table for entity_history, sans id create or replace function swh_mktemp_entity_history() returns void @@ -725,37 +711,6 @@ end $$; -create or replace function swh_occurrence_update_for_origin(origin_id bigint) - returns void - language sql -as $$ - delete from occurrence where origin = origin_id; - insert into occurrence (origin, branch, target, target_type) - select origin, branch, target, target_type - from occurrence_history - where origin = origin_id and - (select visit from origin_visit - where origin = origin_id - order by date desc - limit 1) = any(visits); -$$; - -create or replace function swh_occurrence_update_all() - returns void - language plpgsql -as $$ -declare - origin_id origin.id%type; -begin - for origin_id in - select distinct id from origin - loop - perform swh_occurrence_update_for_origin(origin_id); - end loop; - return; -end; -$$; - -- add a new origin_visit for origin origin_id at date. -- -- Returns the new visit id. @@ -773,49 +728,6 @@ returning visit; $$; --- add tmp_occurrence_history entries to occurrence_history --- --- operates in bulk: 0. swh_mktemp(occurrence_history), 1. COPY to tmp_occurrence_history, --- 2. call this function -create or replace function swh_occurrence_history_add() - returns void - language plpgsql -as $$ -declare - origin_id origin.id%type; -begin - -- Create or update occurrence_history - with occurrence_history_id_visit as ( - select tmp_occurrence_history.*, object_id, visits from tmp_occurrence_history - left join occurrence_history using(origin, branch, target, target_type) - ), - occurrences_to_update as ( - select object_id, visit from occurrence_history_id_visit where object_id is not null - ), - update_occurrences as ( - update occurrence_history - set visits = array(select unnest(occurrence_history.visits) as e - union - select occurrences_to_update.visit as e - order by e) - from occurrences_to_update - where occurrence_history.object_id = occurrences_to_update.object_id - ) - insert into occurrence_history (origin, branch, target, target_type, visits) - select origin, branch, target, target_type, ARRAY[visit] - from occurrence_history_id_visit - where object_id is null; - - -- update occurrence - for origin_id in - select distinct origin from tmp_occurrence_history - loop - perform swh_occurrence_update_for_origin(origin_id); - end loop; - return; -end -$$; - create or replace function swh_snapshot_add(origin bigint, visit bigint, snapshot_id snapshot.id%type) returns void language plpgsql @@ -932,23 +844,6 @@ $$; --- Walk the revision history starting from a given revision, until a matching --- occurrence is found. Return all occurrence information if one is found, NULL --- otherwise. -create or replace function swh_revision_find_occurrence(revision_id sha1_git) - returns occurrence - language sql - stable -as $$ - select origin, branch, target, target_type - from swh_revision_list_children(ARRAY[revision_id] :: bytea[]) as rev_list - left join occurrence_history occ_hist - on rev_list.id = occ_hist.target - where occ_hist.origin is not null and - occ_hist.target_type = 'revision' - limit 1; -$$; - -- Find the visit of origin id closest to date visit_date create or replace function swh_visit_find_by_date(origin bigint, visit_date timestamptz default NOW()) returns origin_visit @@ -985,84 +880,6 @@ $$; --- Retrieve occurrence by filtering on origin_id and optionally on --- branch_name and/or validity range -create or replace function swh_occurrence_get_by( - origin_id bigint, - branch_name bytea default NULL, - date timestamptz default NULL) - returns setof occurrence_history - language plpgsql -as $$ -declare - filters text[] := array[] :: text[]; -- AND-clauses used to filter content - visit_id bigint; - q text; -begin - if origin_id is null then - raise exception 'Needs an origin_id to get an occurrence.'; - end if; - filters := filters || format('origin = %L', origin_id); - if branch_name is not null then - filters := filters || format('branch = %L', branch_name); - end if; - if date is not null then - select visit from swh_visit_find_by_date(origin_id, date) into visit_id; - else - select visit from origin_visit where origin = origin_id order by origin_visit.date desc limit 1 into visit_id; - end if; - if visit_id is null then - return; - end if; - filters := filters || format('%L = any(visits)', visit_id); - - q = format('select * from occurrence_history where %s', - array_to_string(filters, ' and ')); - return query execute q; -end -$$; - - --- Retrieve revisions by occurrence criterion filtering -create or replace function swh_revision_get_by( - origin_id bigint, - branch_name bytea default NULL, - date timestamptz default NULL) - returns setof revision_entry - language sql - stable -as $$ - select r.id, r.date, r.date_offset, r.date_neg_utc_offset, - r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset, - r.type, r.directory, r.message, - a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic, - array(select rh.parent_id::bytea - from revision_history rh - where rh.id = r.id - order by rh.parent_rank - ) as parents, r.object_id - from swh_occurrence_get_by(origin_id, branch_name, date) as occ - inner join revision r on occ.target = r.id - left join person a on a.id = r.author - left join person c on c.id = r.committer; -$$; - --- Retrieve a release by occurrence criterion -create or replace function swh_release_get_by( - origin_id bigint) - returns setof release_entry - language sql - stable -as $$ - select r.id, r.target, r.target_type, r.date, r.date_offset, r.date_neg_utc_offset, - r.name, r.comment, r.synthetic, a.id as author_id, a.fullname as author_fullname, - a.name as author_name, a.email as author_email, r.object_id - from release r - inner join occurrence_history occ on occ.target = r.target - left join person a on a.id = r.author - where occ.origin = origin_id and occ.target_type = 'revision' and r.target_type = 'revision'; -$$; - -- Create entries in entity_history from tmp_entity_history -- -- TODO: do something smarter to compress the entries if the data @@ -1225,15 +1042,6 @@ $$; -create or replace function swh_occurrence_by_origin_visit(origin_id bigint, visit_id bigint) - returns setof occurrence - language sql - stable -as $$ - select origin, branch, target, target_type from occurrence_history - where origin = origin_id and visit_id = ANY(visits); -$$; - -- end revision_metadata functions -- origin_metadata functions create type origin_metadata_signature as ( @@ -1325,8 +1133,6 @@ 'directory_entry_dir', 'directory_entry_file', 'directory_entry_rev', - 'occurrence', - 'occurrence_history', 'origin', 'origin_visit', 'person', diff --git a/sql/swh-indexes.sql b/sql/swh-indexes.sql --- a/sql/swh-indexes.sql +++ b/sql/swh-indexes.sql @@ -18,6 +18,7 @@ alter table entity_history add primary key using index entity_history_pkey; + -- entity create unique index concurrently entity_pkey on entity(uuid); @@ -31,6 +32,7 @@ alter table entity add constraint entity_last_id_fkey foreign key (last_id) references entity_history(id) not valid; alter table entity validate constraint entity_last_id_fkey; + -- entity_equivalence create unique index concurrently entity_equivalence_pkey on entity_equivalence(entity1, entity2); @@ -44,6 +46,7 @@ alter table entity_equivalence add constraint "order_entities" check (entity1 < entity2) not valid; alter table entity_equivalence validate constraint order_entities; + -- listable_entity create unique index concurrently listable_entity_pkey on listable_entity(uuid); @@ -52,6 +55,7 @@ alter table listable_entity add constraint listable_entity_uuid_fkey foreign key (uuid) references entity(uuid) not valid; alter table listable_entity validate constraint listable_entity_uuid_fkey; + -- list_history create unique index concurrently list_history_pkey on list_history(id); @@ -60,7 +64,9 @@ alter table list_history add constraint list_history_entity_fkey foreign key (entity) references listable_entity(uuid) not valid; alter table list_history validate constraint list_history_entity_fkey; + -- origin + create unique index concurrently origin_pkey on origin(id); alter table origin add primary key using index origin_pkey; @@ -72,6 +78,7 @@ alter table origin add constraint origin_project_fkey foreign key (project) references entity(uuid) not valid; alter table origin validate constraint origin_project_fkey; + -- skipped_content alter table skipped_content add constraint skipped_content_sha1_sha1_git_sha256_key unique (sha1, sha1_git, sha256); @@ -85,6 +92,7 @@ alter table skipped_content add constraint skipped_content_origin_fkey foreign key (origin) references origin(id) not valid; alter table skipped_content validate constraint skipped_content_origin_fkey; + -- fetch_history create unique index concurrently fetch_history_pkey on fetch_history(id); @@ -93,6 +101,7 @@ alter table fetch_history add constraint fetch_history_origin_fkey foreign key (origin) references origin(id) not valid; alter table fetch_history validate constraint fetch_history_origin_fkey; + -- directory create unique index concurrently directory_pkey on directory(id); @@ -103,6 +112,7 @@ create index concurrently on directory using gin (rev_entries); create unique index concurrently on directory(object_id); + -- directory_entry_dir create unique index concurrently directory_entry_dir_pkey on directory_entry_dir(id); @@ -110,6 +120,7 @@ create unique index concurrently on directory_entry_dir(target, name, perms); + -- directory_entry_file create unique index concurrently directory_entry_file_pkey on directory_entry_file(id); @@ -117,6 +128,7 @@ create unique index concurrently on directory_entry_file(target, name, perms); + -- directory_entry_rev create unique index concurrently directory_entry_rev_pkey on directory_entry_rev(id); @@ -124,7 +136,9 @@ create unique index concurrently on directory_entry_rev(target, name, perms); + -- person + create unique index concurrently person_pkey on person(id); alter table person add primary key using index person_pkey; @@ -132,7 +146,9 @@ create index concurrently on person(name); create index concurrently on person(email); + -- revision + create unique index concurrently revision_pkey on revision(id); alter table revision add primary key using index revision_pkey; @@ -144,7 +160,9 @@ create index concurrently on revision(directory); create unique index concurrently on revision(object_id); + -- revision_history + create unique index concurrently revision_history_pkey on revision_history(id, parent_rank); alter table revision_history add primary key using index revision_history_pkey; @@ -153,13 +171,17 @@ alter table revision_history add constraint revision_history_id_fkey foreign key (id) references revision(id) not valid; alter table revision_history validate constraint revision_history_id_fkey; + -- snapshot + create unique index concurrently snapshot_pkey on snapshot(object_id); alter table snapshot add primary key using index snapshot_pkey; create unique index concurrently on snapshot(id); + -- snapshot_branch + create unique index concurrently snapshot_branch_pkey on snapshot_branch(object_id); alter table snapshot_branch add primary key using index snapshot_branch_pkey; @@ -171,7 +193,9 @@ create unique index concurrently on snapshot_branch (name) where target_type is null and target is null; + -- snapshot_branches + create unique index concurrently snapshot_branches_pkey on snapshot_branches(snapshot_id, branch_id); alter table snapshot_branches add primary key using index snapshot_branches_pkey; @@ -181,7 +205,9 @@ alter table snapshot_branches add constraint snapshot_branches_branch_id_fkey foreign key (branch_id) references snapshot_branch(object_id) not valid; alter table snapshot_branches validate constraint snapshot_branches_branch_id_fkey; + -- origin_visit + create unique index concurrently origin_visit_pkey on origin_visit(origin, visit); alter table origin_visit add primary key using index origin_visit_pkey; @@ -193,25 +219,9 @@ alter table origin_visit add constraint origin_visit_snapshot_id_fkey foreign key (snapshot_id) references snapshot(object_id) not valid; alter table origin_visit validate constraint origin_visit_snapshot_id_fkey; --- occurrence_history -create unique index concurrently occurrence_history_pkey on occurrence_history(object_id); -alter table occurrence_history add primary key using index occurrence_history_pkey; - -create index concurrently on occurrence_history(target, target_type); -create index concurrently on occurrence_history(origin, branch); -create unique index concurrently on occurrence_history(origin, branch, target, target_type); - -alter table occurrence_history add constraint occurrence_history_origin_fkey foreign key (origin) references origin(id) not valid; -alter table occurrence_history validate constraint occurrence_history_origin_fkey; - --- occurrence -create unique index concurrently occurrence_pkey on occurrence(origin, branch); -alter table occurrence add primary key using index occurrence_pkey; - -alter table occurrence add constraint occurrence_origin_fkey foreign key (origin) references origin(id) not valid; -alter table occurrence validate constraint occurrence_origin_fkey; -- release + create unique index concurrently release_pkey on release(id); alter table release add primary key using index release_pkey; @@ -221,19 +231,25 @@ alter table release add constraint release_author_fkey foreign key (author) references person(id) not valid; alter table release validate constraint release_author_fkey; + -- tool + create unique index tool_pkey on tool(id); alter table tool add primary key using index tool_pkey; create unique index on tool(name, version, configuration); + -- metadata_provider + create unique index concurrently metadata_provider_pkey on metadata_provider(id); alter table metadata_provider add primary key using index metadata_provider_pkey; create index concurrently on metadata_provider(provider_name, provider_url); + -- origin_metadata + create unique index concurrently origin_metadata_pkey on origin_metadata(id); alter table origin_metadata add primary key using index origin_metadata_pkey; @@ -248,10 +264,14 @@ alter table origin_metadata add constraint origin_metadata_tool_fkey foreign key (tool_id) references tool(id) not valid; alter table origin_metadata validate constraint origin_metadata_tool_fkey; + -- object_counts + create unique index concurrently object_counts_pkey on object_counts(object_type); alter table object_counts add primary key using index object_counts_pkey; + -- object_counts_bucketed + create unique index concurrently object_counts_bucketed_pkey on object_counts_bucketed(line); alter table object_counts_bucketed add primary key using index object_counts_bucketed_pkey; diff --git a/sql/swh-schema.sql b/sql/swh-schema.sql --- a/sql/swh-schema.sql +++ b/sql/swh-schema.sql @@ -1,11 +1,8 @@ --- ---- Software Heritage Data Model +--- SQL implementation of the Software Heritage data model --- --- drop schema if exists swh cascade; --- create schema swh; --- set search_path to swh; - +-- schema versions create table dbversion ( version int primary key, @@ -13,13 +10,15 @@ description text ); +-- latest schema version insert into dbversion(version, release, description) - values(120, now(), 'Work In Progress'); + values (121, now(), 'Work In Progress'); + --- a SHA1 checksum (not necessarily originating from Git) +-- a SHA1 checksum create domain sha1 as bytea check (length(value) = 20); --- a Git object ID, i.e., a SHA1 checksum +-- a Git object ID, i.e., a Git-style salted SHA1 checksum create domain sha1_git as bytea check (length(value) = 20); -- a SHA256 checksum @@ -34,6 +33,7 @@ -- a set of UNIX-like access permissions, as manipulated by, e.g., chmod create domain file_perms as int; + -- Checksums about actual file content. Note that the content itself is not -- stored in the DB, but on external (key-value) storage. A single checksum is -- used as key there, but the other can be used to verify that we do not inject @@ -145,12 +145,13 @@ ); --- An origin is a place, identified by an URL, where software can be found. We --- support different kinds of origins, e.g., git and other VCS repositories, --- web pages that list tarballs URLs (e.g., http://www.kernel.org), indirect --- tarball URLs (e.g., http://www.example.org/latest.tar.gz), etc. The key --- feature of an origin is that it can be *fetched* (wget, git clone, svn --- checkout, etc.) to retrieve all the contained software. +-- An origin is a place, identified by an URL, where software source code +-- artifacts can be found. We support different kinds of origins, e.g., git and +-- other VCS repositories, web pages that list tarballs URLs (e.g., +-- http://www.kernel.org), indirect tarball URLs (e.g., +-- http://www.example.org/latest.tar.gz), etc. The key feature of an origin is +-- that it can be *fetched* from (wget, git clone, svn checkout, etc.) to +-- retrieve all the contained software. create table origin ( id bigserial not null, @@ -160,13 +161,14 @@ project uuid ); --- Content we have seen but skipped for some reason. This table is --- separate from the content table as we might not have the sha1 --- checksum of that data (for instance when we inject git --- repositories, objects that are too big will be skipped here, and we --- will only know their sha1_git). 'reason' contains the reason the --- content was skipped. origin is a nullable column allowing to find --- out which origin contains that skipped content. + +-- Content blobs observed somewhere, but not ingested into the archive for +-- whatever reason. This table is separate from the content table as we might +-- not have the sha1 checksum of skipped contents (for instance when we inject +-- git repositories, objects that are too big will be skipped here, and we will +-- only know their sha1_git). 'reason' contains the reason the content was +-- skipped. origin is a nullable column allowing to find out which origin +-- contains that skipped content. create table skipped_content ( sha1 sha1, @@ -181,6 +183,7 @@ object_id bigserial ); + -- Log of all origin fetches (i.e., origin crawling) that have been done in the -- past, or are still ongoing. Similar to list_history, but for origins. create table fetch_history @@ -216,7 +219,7 @@ object_id bigserial -- short object identifier ); --- A directory entry pointing to a sub-directory. +-- A directory entry pointing to a (sub-)directory. create table directory_entry_dir ( id bigserial, @@ -225,7 +228,7 @@ perms file_perms -- unix-like permissions ); --- A directory entry pointing to a file. +-- A directory entry pointing to a file content. create table directory_entry_file ( id bigserial, @@ -243,6 +246,9 @@ perms file_perms -- unix-like permissions ); + +-- A person referenced by some source code artifacts, e.g., a VCS revision or +-- release metadata. create table person ( id bigserial, @@ -252,7 +258,8 @@ -- will usually be of the form 'name ' ); --- A snapshot of a software project at a specific point in time. + +-- The state of a source code tree at a specific point in time. -- -- Synonyms/mappings: -- * git / subversion / etc: commit @@ -269,18 +276,17 @@ committer_date timestamptz, committer_date_offset smallint, type revision_type not null, - directory sha1_git, -- file-system tree + directory sha1_git, -- source code "root" directory message bytea, author bigint, committer bigint, - synthetic boolean not null default false, -- true if synthetic (cf. swh-loader-tar) - metadata jsonb, -- extra metadata (tarball checksums, extra commit information, etc...) + synthetic boolean not null default false, -- true iff revision has been created by Software Heritage + metadata jsonb, -- extra metadata (tarball checksums, extra commit information, etc...) object_id bigserial, date_neg_utc_offset boolean, committer_date_neg_utc_offset boolean ); - -- either this table or the sha1_git[] column on the revision table create table revision_history ( @@ -290,7 +296,10 @@ -- parent position in merge commits, 0-based ); --- The timestamps at which Software Heritage has made a visit of the given origin. + +-- Crawling history of software origins visited by Software Heritage. Each +-- visit is a 3-way mapping between a software origin, a timestamp, and a +-- snapshot object capturing the full-state of the origin at visit time. create table origin_visit ( origin bigint not null, @@ -302,61 +311,43 @@ ); comment on column origin_visit.origin is 'Visited origin'; -comment on column origin_visit.visit is 'Visit number the visit occurred for that origin'; -comment on column origin_visit.date is 'Visit date for that origin'; -comment on column origin_visit.status is 'Visit status for that origin'; -comment on column origin_visit.metadata is 'Metadata associated with the visit'; -comment on column origin_visit.snapshot_id is 'id of the snapshot associated with the visit'; +comment on column origin_visit.visit is 'Sequential visit number for the origin'; +comment on column origin_visit.date is 'Visit timestamp'; +comment on column origin_visit.status is 'Visit result'; +comment on column origin_visit.metadata is 'Origin metadata at visit time'; +comment on column origin_visit.snapshot_id is 'Origin snapshot at visit time'; --- The content of software origins is indexed starting from top-level pointers --- called "branches". Every time we fetch some origin we store in this table --- where the branches pointed to at fetch time. --- --- Synonyms/mappings: --- * git: ref (in the "git update-ref" sense) -create table occurrence_history +-- A snapshot represents the entire state of a software origin as crawled by +-- Software Heritage. This table is a simple mapping between (public) intrinsic +-- snapshot identifiers and (private) numeric sequential identifiers. +create table snapshot ( - origin bigint not null, - branch bytea not null, -- e.g., b"master" (for VCS), or b"sid" (for Debian) - target sha1_git not null, -- ref target, e.g., commit id - target_type object_type not null, -- ref target type - visits bigint[] not null, -- the visits where that occurrence was valid. References - -- origin_visit(visit), where o_h.origin = origin_visit.origin. - object_id bigserial not null, -- short object identifier - snapshot_branch_id bigint + object_id bigserial not null, -- PK internal object identifier + id sha1_git -- snapshot intrinsic identifier ); --- Materialized view of occurrence_history, storing the *current* value of each --- branch, as last seen by SWH. -create table occurrence +-- Each snapshot associate "branch" names to other objects in the Software +-- Heritage Merkle DAG. This table describes branches as mappings between names +-- and target typed objects. +create table snapshot_branch ( - origin bigint, - branch bytea not null, - target sha1_git not null, - target_type object_type not null + object_id bigserial not null, -- PK internal object identifier + name bytea not null, -- branch name, e.g., "master" or "feature/drag-n-drop" + target bytea, -- target object identifier, e.g., a revision identifier + target_type snapshot_target -- target object type, e.g., "revision" ); - -create table snapshot ( - object_id bigserial not null, - id sha1_git -); - -create table snapshot_branch ( - object_id bigserial not null, - name bytea not null, - target bytea, - target_type snapshot_target -); - -create table snapshot_branches ( - snapshot_id bigint not null, - branch_id bigint not null +-- Mapping between snapshots and their branches. +create table snapshot_branches +( + snapshot_id bigint not null, -- snapshot identifier, ref. snapshot.object_id + branch_id bigint not null -- branch identifier, ref. snapshot_branch.object_id ); --- A "memorable" point in the development history of a project. +-- A "memorable" point in time in the development history of a software +-- project. -- -- Synonyms/mappings: -- * git: tag (of the annotated kind, otherwise they are just references) @@ -370,15 +361,16 @@ name bytea, comment bytea, author bigint, - synthetic boolean not null default false, -- true if synthetic (cf. swh-loader-tar) + synthetic boolean not null default false, -- true iff release has been created by Software Heritage object_id bigserial, target_type object_type not null, date_neg_utc_offset boolean ); --- Tools -create table tool ( +-- Tools +create table tool +( id serial not null, name text not null, version text not null, @@ -392,7 +384,8 @@ comment on column tool.configuration is 'Tool configuration: command line, flags, etc...'; -create table metadata_provider ( +create table metadata_provider +( id serial not null, provider_name text not null, provider_type text not null, @@ -409,11 +402,12 @@ -- Discovery of metadata during a listing, loading, deposit or external_catalog of an origin -- also provides a translation to a defined json schema using a translation tool (tool_id) -create table origin_metadata( - id bigserial not null, -- PK object identifier - origin_id bigint not null, -- references origin(id) - discovery_date timestamptz not null, -- when it was extracted - provider_id bigint not null, -- ex: 'hal', 'lister-github', 'loader-github' +create table origin_metadata +( + id bigserial not null, -- PK internal object identifier + origin_id bigint not null, -- references origin(id) + discovery_date timestamptz not null, -- when it was extracted + provider_id bigint not null, -- ex: 'hal', 'lister-github', 'loader-github' tool_id bigint not null, metadata jsonb not null ); @@ -426,18 +420,21 @@ comment on column origin_metadata.tool_id is 'the tool used for extracting metadata: lister-github, etc.'; comment on column origin_metadata.metadata is 'metadata in json format but with original terms'; + -- Keep a cache of object counts -create table object_counts ( +create table object_counts +( object_type text, -- table for which we're counting objects (PK) value bigint, -- count of objects in the table last_update timestamptz, -- last update for the object count in this table single_update boolean -- whether we update this table standalone (true) or through bucketed counts (false) ); -CREATE TABLE object_counts_bucketed ( - line serial NOT NULL, -- PK - object_type text NOT NULL, -- table for which we're counting objects - identifier text NOT NULL, -- identifier across which we're bucketing objects +create table object_counts_bucketed +( + line serial not null, -- PK + object_type text not null, -- table for which we're counting objects + identifier text not null, -- identifier across which we're bucketing objects bucket_start bytea, -- lower bound (inclusive) for the bucket bucket_end bytea, -- upper bound (exclusive) for the bucket value bigint, -- count of objects in the bucket