diff --git a/sql/swh-enums.sql b/sql/swh-enums.sql
index 46c323e62..7e0073e0f 100644
--- a/sql/swh-enums.sql
+++ b/sql/swh-enums.sql
@@ -1,51 +1,54 @@
 ---
 --- Software Heritage Data Types
 ---
 
 create type content_status as enum ('absent', 'visible', 'hidden');
 comment on type content_status is 'Content visibility';
 
 -- Types of entities.
 --
 -- - organization: a root entity, usually backed by a non-profit, a
 -- company, or another kind of "association". (examples: Software
 -- Heritage, Debian, GNU, GitHub)
 --
 -- - group_of_entities: used for hierarchies, doesn't need to have a
 -- concrete existence. (examples: GNU hosting facilities, Debian
 -- hosting facilities, GitHub users, ...)
 --
 -- - hosting: a hosting facility, can usually be listed to generate
 -- other data. (examples: GitHub git hosting, alioth.debian.org,
 -- snapshot.debian.org)
 --
 -- - group_of_persons: an entity representing a group of
 -- persons. (examples: a GitHub organization, a Debian team)
 --
 -- - person: an entity representing a person. (examples:
 -- a GitHub user, a Debian developer)
 --
 -- - project: an entity representing a software project. (examples: a
 -- GitHub project, Apache httpd, a Debian source package, ...)
 create type entity_type as enum (
   'organization',
   'group_of_entities',
   'hosting',
   'group_of_persons',
   'person',
   'project'
 );
 comment on type entity_type is 'Entity types';
 
 create type revision_type as enum ('git', 'tar', 'dsc', 'svn');
 comment on type revision_type is 'Possible revision types';
 
-create type object_type as enum ('content', 'directory', 'revision', 'release');
+create type object_type as enum ('content', 'directory', 'revision', 'release', 'snapshot');
 comment on type object_type is 'Data object types stored in data model';
 
+create type snapshot_target as enum ('content', 'directory', 'revision', 'release', 'snapshot', 'alias');
+comment on type snapshot_target is 'Types of targets for snapshot branches';
+
 create type origin_visit_status as enum (
   'ongoing',
   'full',
   'partial'
 );
 comment on type origin_visit_status IS 'Possible visit status';
diff --git a/sql/swh-func.sql b/sql/swh-func.sql
index 0a13d8b0c..4138278a6 100644
--- a/sql/swh-func.sql
+++ b/sql/swh-func.sql
@@ -1,1442 +1,1517 @@
 -- create a temporary table called tmp_TBLNAME, mimicking existing table
 -- TBLNAME
 --
 -- Args:
 --     tblname: name of the table to mimick
 create or replace function swh_mktemp(tblname regclass)
     returns void
     language plpgsql
 as $$
 begin
     execute format('
 	create temporary table tmp_%1$I
 	    (like %1$I including defaults)
 	    on commit drop;
       alter table tmp_%1$I drop column if exists object_id;
 	', tblname);
     return;
 end
 $$;
 
 -- create a temporary table for directory entries called tmp_TBLNAME,
 -- mimicking existing table TBLNAME with an extra dir_id (sha1_git)
 -- column, and dropping the id column.
 --
 -- This is used to create the tmp_directory_entry_<foo> tables.
 --
 -- Args:
 --     tblname: name of the table to mimick
 create or replace function swh_mktemp_dir_entry(tblname regclass)
     returns void
     language plpgsql
 as $$
 begin
     execute format('
 	create temporary table tmp_%1$I
 	    (like %1$I including defaults, dir_id sha1_git)
 	    on commit drop;
         alter table tmp_%1$I drop column id;
 	', tblname);
     return;
 end
 $$;
 
 
 -- create a temporary table for revisions called tmp_revisions,
 -- mimicking existing table revision, replacing the foreign keys to
 -- people with an email and name field
 --
 create or replace function swh_mktemp_revision()
     returns void
     language sql
 as $$
     create temporary table tmp_revision (
         like revision including defaults,
         author_fullname bytea,
         author_name bytea,
         author_email bytea,
         committer_fullname bytea,
         committer_name bytea,
         committer_email bytea
     ) on commit drop;
     alter table tmp_revision drop column author;
     alter table tmp_revision drop column committer;
     alter table tmp_revision drop column object_id;
 $$;
 
 
 -- create a temporary table for releases called tmp_release,
 -- mimicking existing table release, replacing the foreign keys to
 -- people with an email and name field
 --
 create or replace function swh_mktemp_release()
     returns void
     language sql
 as $$
     create temporary table tmp_release (
         like release including defaults,
         author_fullname bytea,
         author_name bytea,
         author_email bytea
     ) on commit drop;
     alter table tmp_release drop column author;
     alter table tmp_release drop column object_id;
 $$;
 
 -- create a temporary table with a single "bytea" column for fast object lookup.
 create or replace function swh_mktemp_bytea()
     returns void
     language sql
 as $$
     create temporary table tmp_bytea (
       id bytea
     ) on commit drop;
 $$;
 
 -- create a temporary table for occurrence_history
 create or replace function swh_mktemp_occurrence_history()
     returns void
     language sql
 as $$
     create temporary table tmp_occurrence_history(
         like occurrence_history including defaults,
         visit bigint not null
     ) on commit drop;
     alter table tmp_occurrence_history
       drop column visits,
       drop column object_id;
 $$;
 
 -- create a temporary table for entity_history, sans id
 create or replace function swh_mktemp_entity_history()
     returns void
     language sql
 as $$
     create temporary table tmp_entity_history (
         like entity_history including defaults) on commit drop;
     alter table tmp_entity_history drop column id;
 $$;
 
 -- create a temporary table for entities called tmp_entity_lister,
 -- with only the columns necessary for retrieving the uuid of a listed
 -- entity.
 create or replace function swh_mktemp_entity_lister()
     returns void
     language sql
 as $$
   create temporary table tmp_entity_lister (
     id              bigint,
     lister_metadata jsonb
   ) on commit drop;
 $$;
 
+-- create a temporary table for the branches of a snapshot
+create or replace function swh_mktemp_snapshot_branch()
+    returns void
+    language sql
+as $$
+  create temporary table tmp_snapshot_branch (
+      name bytea not null,
+      target bytea,
+      target_type snapshot_target
+  ) on commit drop;
+$$;
 
 create or replace function swh_mktemp_tool()
     returns void
     language sql
 as $$
     create temporary table tmp_tool (
       like tool including defaults
     ) on commit drop;
     alter table tmp_tool drop column id;
 $$;
 
 
 -- a content signature is a set of cryptographic checksums that we use to
 -- uniquely identify content, for the purpose of verifying if we already have
 -- some content or not during content injection
 create type content_signature as (
     sha1       sha1,
     sha1_git   sha1_git,
     sha256     sha256,
     blake2s256 blake2s256
 );
 
 
 -- check which entries of tmp_content are missing from content
 --
 -- operates in bulk: 0. swh_mktemp(content), 1. COPY to tmp_content,
 -- 2. call this function
 create or replace function swh_content_missing()
     returns setof content_signature
     language plpgsql
 as $$
 begin
     return query (
       select sha1, sha1_git, sha256, blake2s256 from tmp_content as tmp
       where not exists (
         select 1 from content as c
         where c.sha1 = tmp.sha1 and
               c.sha1_git = tmp.sha1_git and
               c.sha256 = tmp.sha256
       )
     );
     return;
 end
 $$;
 
 -- check which entries of tmp_content_sha1 are missing from content
 --
 -- operates in bulk: 0. swh_mktemp_content_sha1(), 1. COPY to tmp_content_sha1,
 -- 2. call this function
 create or replace function swh_content_missing_per_sha1()
     returns setof sha1
     language plpgsql
 as $$
 begin
     return query
            (select id::sha1
             from tmp_bytea as tmp
             where not exists
             (select 1 from content as c where c.sha1=tmp.id));
 end
 $$;
 
 
 -- check which entries of tmp_skipped_content are missing from skipped_content
 --
 -- operates in bulk: 0. swh_mktemp(skipped_content), 1. COPY to tmp_skipped_content,
 -- 2. call this function
 create or replace function swh_skipped_content_missing()
     returns setof content_signature
     language plpgsql
 as $$
 begin
     return query
 	select sha1, sha1_git, sha256, blake2s256 from tmp_skipped_content t
 	where not exists
 	(select 1 from skipped_content s where
 	    s.sha1 is not distinct from t.sha1 and
 	    s.sha1_git is not distinct from t.sha1_git and
 	    s.sha256 is not distinct from t.sha256);
     return;
 end
 $$;
 
 
 -- Look up content based on one or several different checksums. Return all
 -- content information if the content is found; a NULL row otherwise.
 --
 -- At least one checksum should be not NULL. If several are not NULL, they will
 -- be AND-ed together in the lookup query.
 --
 -- Note: this function is meant to be used to look up individual contents
 -- (e.g., for the web app), for batch lookup of missing content (e.g., to be
 -- added) see swh_content_missing
 create or replace function swh_content_find(
     sha1       sha1       default NULL,
     sha1_git   sha1_git   default NULL,
     sha256     sha256     default NULL,
     blake2s256 blake2s256 default NULL
 )
     returns content
     language plpgsql
 as $$
 declare
     con content;
     filters text[] := array[] :: text[];  -- AND-clauses used to filter content
     q text;
 begin
     if sha1 is not null then
         filters := filters || format('sha1 = %L', sha1);
     end if;
     if sha1_git is not null then
         filters := filters || format('sha1_git = %L', sha1_git);
     end if;
     if sha256 is not null then
         filters := filters || format('sha256 = %L', sha256);
     end if;
     if blake2s256 is not null then
         filters := filters || format('blake2s256 = %L', blake2s256);
     end if;
 
     if cardinality(filters) = 0 then
         return null;
     else
         q = format('select * from content where %s',
                    array_to_string(filters, ' and '));
         execute q into con;
 	return con;
     end if;
 end
 $$;
 
 
 -- add tmp_content entries to content, skipping duplicates
 --
 -- operates in bulk: 0. swh_mktemp(content), 1. COPY to tmp_content,
 -- 2. call this function
 create or replace function swh_content_add()
     returns void
     language plpgsql
 as $$
 begin
     insert into content (sha1, sha1_git, sha256, blake2s256, length, status)
         select distinct sha1, sha1_git, sha256, blake2s256, length, status
 	from tmp_content
 	where (sha1, sha1_git, sha256) in (
             select sha1, sha1_git, sha256
             from swh_content_missing()
         );
         -- TODO XXX use postgres 9.5 "UPSERT" support here, when available.
         -- Specifically, using "INSERT .. ON CONFLICT IGNORE" we can avoid
         -- the extra swh_content_missing() query here.
     return;
 end
 $$;
 
 
 -- add tmp_skipped_content entries to skipped_content, skipping duplicates
 --
 -- operates in bulk: 0. swh_mktemp(skipped_content), 1. COPY to tmp_skipped_content,
 -- 2. call this function
 create or replace function swh_skipped_content_add()
     returns void
     language plpgsql
 as $$
 begin
     insert into skipped_content (sha1, sha1_git, sha256, blake2s256, length, status, reason, origin)
         select distinct sha1, sha1_git, sha256, blake2s256, length, status, reason, origin
 	from tmp_skipped_content
 	where (coalesce(sha1, ''), coalesce(sha1_git, ''), coalesce(sha256, '')) in (
             select coalesce(sha1, ''), coalesce(sha1_git, ''), coalesce(sha256, '')
             from swh_skipped_content_missing()
         );
         -- TODO XXX use postgres 9.5 "UPSERT" support here, when available.
         -- Specifically, using "INSERT .. ON CONFLICT IGNORE" we can avoid
         -- the extra swh_content_missing() query here.
     return;
 end
 $$;
 
 -- Update content entries from temporary table.
 -- (columns are potential new columns added to the schema, this cannot be empty)
 --
 create or replace function swh_content_update(columns_update text[])
     returns void
     language plpgsql
 as $$
 declare
    query text;
    tmp_array text[];
 begin
     if array_length(columns_update, 1) = 0 then
         raise exception 'Please, provide the list of column names to update.';
     end if;
 
     tmp_array := array(select format('%1$s=t.%1$s', unnest) from unnest(columns_update));
 
     query = format('update content set %s
                     from tmp_content t where t.sha1 = content.sha1',
                     array_to_string(tmp_array, ', '));
 
     execute query;
 
     return;
 end
 $$;
 
 comment on function swh_content_update(text[]) IS 'Update existing content''s columns';
 
 -- check which entries of tmp_directory are missing from directory
 --
 -- operates in bulk: 0. swh_mktemp(directory), 1. COPY to tmp_directory,
 -- 2. call this function
 create or replace function swh_directory_missing()
     returns setof sha1_git
     language plpgsql
 as $$
 begin
     return query
 	select id from tmp_directory t
 	where not exists (
 	    select 1 from directory d
 	    where d.id = t.id);
     return;
 end
 $$;
 
 
 -- Retrieve information on directory from temporary table
 create or replace function swh_directory_get()
     returns setof directory
     language plpgsql
 as $$
 begin
     return query
 	select d.*
         from tmp_directory t
         inner join directory d on t.id = d.id;
     return;
 end
 $$;
 
 
 create type directory_entry_type as enum('file', 'dir', 'rev');
 
 
 -- Add tmp_directory_entry_* entries to directory_entry_* and directory,
 -- skipping duplicates in directory_entry_*.  This is a generic function that
 -- works on all kind of directory entries.
 --
 -- operates in bulk: 0. swh_mktemp_dir_entry('directory_entry_*'), 1 COPY to
 -- tmp_directory_entry_*, 2. call this function
 --
 -- Assumption: this function is used in the same transaction that inserts the
 -- context directory in table "directory".
 create or replace function swh_directory_entry_add(typ directory_entry_type)
     returns void
     language plpgsql
 as $$
 begin
     execute format('
     insert into directory_entry_%1$s (target, name, perms)
     select distinct t.target, t.name, t.perms
     from tmp_directory_entry_%1$s t
     where not exists (
     select 1
     from directory_entry_%1$s i
     where t.target = i.target and t.name = i.name and t.perms = i.perms)
    ', typ);
 
     execute format('
     with new_entries as (
 	select t.dir_id, array_agg(i.id) as entries
 	from tmp_directory_entry_%1$s t
 	inner join directory_entry_%1$s i
 	using (target, name, perms)
 	group by t.dir_id
     )
     update tmp_directory as d
     set %1$s_entries = new_entries.entries
     from new_entries
     where d.id = new_entries.dir_id
     ', typ);
 
     return;
 end
 $$;
 
 -- Insert the data from tmp_directory, tmp_directory_entry_file,
 -- tmp_directory_entry_dir, tmp_directory_entry_rev into their final
 -- tables.
 --
 -- Prerequisites:
 --  directory ids in tmp_directory
 --  entries in tmp_directory_entry_{file,dir,rev}
 --
 create or replace function swh_directory_add()
     returns void
     language plpgsql
 as $$
 begin
     perform swh_directory_entry_add('file');
     perform swh_directory_entry_add('dir');
     perform swh_directory_entry_add('rev');
 
     insert into directory
     select * from tmp_directory t
     where not exists (
         select 1 from directory d
 	where d.id = t.id);
 
     return;
 end
 $$;
 
 -- a directory listing entry with all the metadata
 --
 -- can be used to list a directory, and retrieve all the data in one go.
 create type directory_entry as
 (
   dir_id   sha1_git,     -- id of the parent directory
   type     directory_entry_type,  -- type of entry
   target   sha1_git,     -- id of target
   name     unix_path,    -- path name, relative to containing dir
   perms    file_perms,   -- unix-like permissions
   status   content_status,  -- visible or absent
   sha1     sha1,            -- content if sha1 if type is not dir
   sha1_git sha1_git,        -- content's sha1 git if type is not dir
   sha256   sha256,          -- content's sha256 if type is not dir
   length   bigint           -- content length if type is not dir
 );
 
 
 -- List a single level of directory walked_dir_id
 -- FIXME: order by name is not correct. For git, we need to order by
 -- lexicographic order but as if a trailing / is present in directory
 -- name
 create or replace function swh_directory_walk_one(walked_dir_id sha1_git)
     returns setof directory_entry
     language sql
     stable
 as $$
     with dir as (
 	select id as dir_id, dir_entries, file_entries, rev_entries
 	from directory
 	where id = walked_dir_id),
     ls_d as (select dir_id, unnest(dir_entries) as entry_id from dir),
     ls_f as (select dir_id, unnest(file_entries) as entry_id from dir),
     ls_r as (select dir_id, unnest(rev_entries) as entry_id from dir)
     (select dir_id, 'dir'::directory_entry_type as type,
             e.target, e.name, e.perms, NULL::content_status,
             NULL::sha1, NULL::sha1_git, NULL::sha256, NULL::bigint
      from ls_d
      left join directory_entry_dir e on ls_d.entry_id = e.id)
     union
     (select dir_id, 'file'::directory_entry_type as type,
             e.target, e.name, e.perms, c.status,
             c.sha1, c.sha1_git, c.sha256, c.length
      from ls_f
      left join directory_entry_file e on ls_f.entry_id = e.id
      left join content c on e.target = c.sha1_git)
     union
     (select dir_id, 'rev'::directory_entry_type as type,
             e.target, e.name, e.perms, NULL::content_status,
             NULL::sha1, NULL::sha1_git, NULL::sha256, NULL::bigint
      from ls_r
      left join directory_entry_rev e on ls_r.entry_id = e.id)
     order by name;
 $$;
 
 -- List recursively the revision directory arborescence
 create or replace function swh_directory_walk(walked_dir_id sha1_git)
     returns setof directory_entry
     language sql
     stable
 as $$
     with recursive entries as (
         select dir_id, type, target, name, perms, status, sha1, sha1_git,
                sha256, length
         from swh_directory_walk_one(walked_dir_id)
         union all
         select dir_id, type, target, (dirname || '/' || name)::unix_path as name,
                perms, status, sha1, sha1_git, sha256, length
         from (select (swh_directory_walk_one(dirs.target)).*, dirs.name as dirname
               from (select target, name from entries where type = 'dir') as dirs) as with_parent
     )
     select dir_id, type, target, name, perms, status, sha1, sha1_git, sha256, length
     from entries
 $$;
 
 create or replace function swh_revision_walk(revision_id sha1_git)
   returns setof directory_entry
   language sql
   stable
 as $$
   select dir_id, type, target, name, perms, status, sha1, sha1_git, sha256, length
   from swh_directory_walk((select directory from revision where id=revision_id))
 $$;
 
 COMMENT ON FUNCTION swh_revision_walk(sha1_git) IS 'Recursively list the revision targeted directory arborescence';
 
 
 -- Find a directory entry by its path
 create or replace function swh_find_directory_entry_by_path(
     walked_dir_id sha1_git,
     dir_or_content_path bytea[])
     returns directory_entry
     language plpgsql
 as $$
 declare
     end_index integer;
     paths bytea default '';
     path bytea;
     res bytea[];
     r record;
 begin
     end_index := array_upper(dir_or_content_path, 1);
     res[1] := walked_dir_id;
 
     for i in 1..end_index
     loop
         path := dir_or_content_path[i];
         -- concatenate path for patching the name in the result record (if we found it)
         if i = 1 then
             paths = path;
         else
             paths := paths || '/' || path;  -- concatenate paths
         end if;
 
         if i <> end_index then
             select *
             from swh_directory_walk_one(res[i] :: sha1_git)
             where name=path
             and type = 'dir'
             limit 1 into r;
         else
             select *
             from swh_directory_walk_one(res[i] :: sha1_git)
             where name=path
             limit 1 into r;
         end if;
 
         -- find the path
         if r is null then
            return null;
         else
             -- store the next dir to lookup the next local path from
             res[i+1] := r.target;
         end if;
     end loop;
 
     -- at this moment, r is the result. Patch its 'name' with the full path before returning it.
     r.name := paths;
     return r;
 end
 $$;
 
 -- List all revision IDs starting from a given revision, going back in time
 --
 -- TODO ordering: should be breadth-first right now (what do we want?)
 -- TODO ordering: ORDER BY parent_rank somewhere?
 create or replace function swh_revision_list(root_revisions bytea[], num_revs bigint default NULL)
     returns table (id sha1_git, parents bytea[])
     language sql
     stable
 as $$
     with recursive full_rev_list(id) as (
         (select id from revision where id = ANY(root_revisions))
         union
         (select h.parent_id
          from revision_history as h
          join full_rev_list on h.id = full_rev_list.id)
     ),
     rev_list as (select id from full_rev_list limit num_revs)
     select rev_list.id as id,
            array(select rh.parent_id::bytea
                  from revision_history rh
                  where rh.id = rev_list.id
                  order by rh.parent_rank
                 ) as parent
     from rev_list;
 $$;
 
 -- List all the children of a given revision
 create or replace function swh_revision_list_children(root_revisions bytea[], num_revs bigint default NULL)
     returns table (id sha1_git, parents bytea[])
     language sql
     stable
 as $$
     with recursive full_rev_list(id) as (
         (select id from revision where id = ANY(root_revisions))
         union
         (select h.id
          from revision_history as h
          join full_rev_list on h.parent_id = full_rev_list.id)
     ),
     rev_list as (select id from full_rev_list limit num_revs)
     select rev_list.id as id,
            array(select rh.parent_id::bytea
                  from revision_history rh
                  where rh.id = rev_list.id
                  order by rh.parent_rank
                 ) as parent
     from rev_list;
 $$;
 
 
 -- Detailed entry for a revision
 create type revision_entry as
 (
   id                             sha1_git,
   date                           timestamptz,
   date_offset                    smallint,
   date_neg_utc_offset            boolean,
   committer_date                 timestamptz,
   committer_date_offset          smallint,
   committer_date_neg_utc_offset  boolean,
   type                           revision_type,
   directory                      sha1_git,
   message                        bytea,
   author_id                      bigint,
   author_fullname                bytea,
   author_name                    bytea,
   author_email                   bytea,
   committer_id                   bigint,
   committer_fullname             bytea,
   committer_name                 bytea,
   committer_email                bytea,
   metadata                       jsonb,
   synthetic                      boolean,
   parents                        bytea[],
   object_id                      bigint
 );
 
 
 -- "git style" revision log. Similar to swh_revision_list(), but returning all
 -- information associated to each revision, and expanding authors/committers
 create or replace function swh_revision_log(root_revisions bytea[], num_revs bigint default NULL)
     returns setof revision_entry
     language sql
     stable
 as $$
     select t.id, r.date, r.date_offset, r.date_neg_utc_offset,
            r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset,
            r.type, r.directory, r.message,
            a.id, a.fullname, a.name, a.email,
            c.id, c.fullname, c.name, c.email,
            r.metadata, r.synthetic, t.parents, r.object_id
     from swh_revision_list(root_revisions, num_revs) as t
     left join revision r on t.id = r.id
     left join person a on a.id = r.author
     left join person c on c.id = r.committer;
 $$;
 
 
 -- Retrieve revisions from tmp_bytea in bulk
 create or replace function swh_revision_get()
     returns setof revision_entry
     language plpgsql
 as $$
 begin
     return query
         select r.id, r.date, r.date_offset, r.date_neg_utc_offset,
                r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset,
                r.type, r.directory, r.message,
                a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic,
          array(select rh.parent_id::bytea from revision_history rh where rh.id = t.id order by rh.parent_rank)
                    as parents, r.object_id
         from tmp_bytea t
         left join revision r on t.id = r.id
         left join person a on a.id = r.author
         left join person c on c.id = r.committer;
     return;
 end
 $$;
 
 -- List missing revisions from tmp_bytea
 create or replace function swh_revision_missing()
     returns setof sha1_git
     language plpgsql
 as $$
 begin
     return query
         select id::sha1_git from tmp_bytea t
 	where not exists (
 	    select 1 from revision r
 	    where r.id = t.id);
     return;
 end
 $$;
 
 -- Detailed entry for a release
 create type release_entry as
 (
   id                   sha1_git,
   target               sha1_git,
   target_type          object_type,
   date                 timestamptz,
   date_offset          smallint,
   date_neg_utc_offset  boolean,
   name                 bytea,
   comment              bytea,
   synthetic            boolean,
   author_id            bigint,
   author_fullname      bytea,
   author_name          bytea,
   author_email         bytea,
   object_id            bigint
 );
 
 -- Detailed entry for release
 create or replace function swh_release_get()
     returns setof release_entry
     language plpgsql
 as $$
 begin
     return query
         select r.id, r.target, r.target_type, r.date, r.date_offset, r.date_neg_utc_offset, r.name, r.comment,
                r.synthetic, p.id as author_id, p.fullname as author_fullname, p.name as author_name, p.email as author_email, r.object_id
         from tmp_bytea t
         inner join release r on t.id = r.id
         inner join person p on p.id = r.author;
     return;
 end
 $$;
 
 -- Create entries in person from tmp_revision
 create or replace function swh_person_add_from_revision()
     returns void
     language plpgsql
 as $$
 begin
     with t as (
         select author_fullname as fullname, author_name as name, author_email as email from tmp_revision
     union
         select committer_fullname as fullname, committer_name as name, committer_email as email from tmp_revision
     ) insert into person (fullname, name, email)
     select distinct fullname, name, email from t
     where not exists (
         select 1
         from person p
         where t.fullname = p.fullname
     );
     return;
 end
 $$;
 
 
 -- Create entries in revision from tmp_revision
 create or replace function swh_revision_add()
     returns void
     language plpgsql
 as $$
 begin
     perform swh_person_add_from_revision();
 
     insert into revision (id, date, date_offset, date_neg_utc_offset, committer_date, committer_date_offset, committer_date_neg_utc_offset, type, directory, message, author, committer, metadata, synthetic)
     select t.id, t.date, t.date_offset, t.date_neg_utc_offset, t.committer_date, t.committer_date_offset, t.committer_date_neg_utc_offset, t.type, t.directory, t.message, a.id, c.id, t.metadata, t.synthetic
     from tmp_revision t
     left join person a on a.fullname = t.author_fullname
     left join person c on c.fullname = t.committer_fullname;
     return;
 end
 $$;
 
 
 -- List missing releases from tmp_bytea
 create or replace function swh_release_missing()
     returns setof sha1_git
     language plpgsql
 as $$
 begin
   return query
     select id::sha1_git from tmp_bytea t
     where not exists (
       select 1 from release r
       where r.id = t.id);
 end
 $$;
 
 
 -- Create entries in person from tmp_release
 create or replace function swh_person_add_from_release()
     returns void
     language plpgsql
 as $$
 begin
     with t as (
         select distinct author_fullname as fullname, author_name as name, author_email as email from tmp_release
     ) insert into person (fullname, name, email)
     select fullname, name, email from t
     where not exists (
         select 1
         from person p
         where t.fullname = p.fullname
     );
     return;
 end
 $$;
 
 
 -- Create entries in release from tmp_release
 create or replace function swh_release_add()
     returns void
     language plpgsql
 as $$
 begin
     perform swh_person_add_from_release();
 
     insert into release (id, target, target_type, date, date_offset, date_neg_utc_offset, name, comment, author, synthetic)
     select t.id, t.target, t.target_type, t.date, t.date_offset, t.date_neg_utc_offset, t.name, t.comment, a.id, t.synthetic
     from tmp_release t
     left join person a on a.fullname = t.author_fullname;
     return;
 end
 $$;
 
 create or replace function swh_occurrence_update_for_origin(origin_id bigint)
   returns void
   language sql
 as $$
   delete from occurrence where origin = origin_id;
   insert into occurrence (origin, branch, target, target_type)
     select origin, branch, target, target_type
     from occurrence_history
     where origin = origin_id and
           (select visit from origin_visit
            where origin = origin_id
            order by date desc
            limit 1) = any(visits);
 $$;
 
 create or replace function swh_occurrence_update_all()
   returns void
   language plpgsql
 as $$
 declare
   origin_id origin.id%type;
 begin
   for origin_id in
     select distinct id from origin
   loop
     perform swh_occurrence_update_for_origin(origin_id);
   end loop;
   return;
 end;
 $$;
 
 -- add a new origin_visit for origin origin_id at date.
 --
 -- Returns the new visit id.
 create or replace function swh_origin_visit_add(origin_id bigint, date timestamptz)
     returns bigint
     language sql
 as $$
   with last_known_visit as (
     select coalesce(max(visit), 0) as visit
     from origin_visit
     where origin = origin_id
   )
   insert into origin_visit (origin, date, visit, status)
   values (origin_id, date, (select visit from last_known_visit) + 1, 'ongoing')
   returning visit;
 $$;
 
 -- add tmp_occurrence_history entries to occurrence_history
 --
 -- operates in bulk: 0. swh_mktemp(occurrence_history), 1. COPY to tmp_occurrence_history,
 -- 2. call this function
 create or replace function swh_occurrence_history_add()
     returns void
     language plpgsql
 as $$
 declare
   origin_id origin.id%type;
 begin
   -- Create or update occurrence_history
   with occurrence_history_id_visit as (
     select tmp_occurrence_history.*, object_id, visits from tmp_occurrence_history
     left join occurrence_history using(origin, branch, target, target_type)
   ),
   occurrences_to_update as (
     select object_id, visit from occurrence_history_id_visit where object_id is not null
   ),
   update_occurrences as (
     update occurrence_history
     set visits = array(select unnest(occurrence_history.visits) as e
                         union
                        select occurrences_to_update.visit as e
                        order by e)
     from occurrences_to_update
     where occurrence_history.object_id = occurrences_to_update.object_id
   )
   insert into occurrence_history (origin, branch, target, target_type, visits)
     select origin, branch, target, target_type, ARRAY[visit]
       from occurrence_history_id_visit
       where object_id is null;
 
   -- update occurrence
   for origin_id in
     select distinct origin from tmp_occurrence_history
   loop
     perform swh_occurrence_update_for_origin(origin_id);
   end loop;
   return;
 end
 $$;
 
+create or replace function swh_snapshot_add(origin bigint, visit bigint, snapshot_id snapshot.id%type)
+  returns void
+  language plpgsql
+as $$
+declare
+  snapshot_object_id snapshot.object_id%type;
+begin
+  select object_id from snapshot where id = snapshot_id into snapshot_object_id;
+  if snapshot_object_id is null then
+     insert into snapshot (id) values (snapshot_id) returning object_id into snapshot_object_id;
+     with all_branches(name, target_type, target) as (
+       select name, target_type, target from tmp_snapshot_branch
+     ), inserted as (
+       insert into snapshot_branch (name, target_type, target)
+       select name, target_type, target from all_branches
+       on conflict do nothing
+       returning object_id
+     )
+     insert into snapshot_branches (snapshot_id, branch_id)
+     select snapshot_object_id, object_id as branch_id from inserted
+     union all
+     select snapshot_object_id, object_id as branch_id
+       from all_branches ab
+       join snapshot_branch sb
+         on sb.name = ab.name
+           and sb.target_type is not distinct from ab.target_type
+           and sb.target is not distinct from ab.target;
+  end if;
+  update origin_visit ov
+    set snapshot_id = snapshot_object_id
+    where ov.origin=swh_snapshot_add.origin and ov.visit=swh_snapshot_add.visit;
+end;
+$$;
+
+create type snapshot_result as (
+  snapshot_id  sha1_git,
+  name         bytea,
+  target       bytea,
+  target_type  snapshot_target
+);
+
+create or replace function swh_snapshot_get_by_id(id snapshot.id%type)
+  returns setof snapshot_result
+  language sql
+  stable
+as $$
+  select
+    swh_snapshot_get_by_id.id as snapshot_id, name, target, target_type
+  from snapshot_branches
+  inner join snapshot_branch on snapshot_branches.branch_id = snapshot_branch.object_id
+  where snapshot_id = (select object_id from snapshot where snapshot.id = swh_snapshot_get_by_id.id)
+$$;
+
+create or replace function swh_snapshot_get_by_origin_visit(origin_id bigint, visit_id bigint)
+  returns snapshot.id%type
+  language sql
+  stable
+as $$
+  select snapshot.id
+  from origin_visit
+  left join snapshot
+  on snapshot.object_id = origin_visit.snapshot_id
+  where origin_visit.origin=origin_id and origin_visit.visit=visit_id;
+$$;
 
 -- Absolute path: directory reference + complete path relative to it
 create type content_dir as (
     directory  sha1_git,
     path       unix_path
 );
 
 
 -- Find the containing directory of a given content, specified by sha1
 -- (note: *not* sha1_git).
 --
 -- Return a pair (dir_it, path) where path is a UNIX path that, from the
 -- directory root, reach down to a file with the desired content. Return NULL
 -- if no match is found.
 --
 -- In case of multiple paths (i.e., pretty much always), an arbitrary one is
 -- chosen.
 create or replace function swh_content_find_directory(content_id sha1)
     returns content_dir
     language sql
     stable
 as $$
     with recursive path as (
 	-- Recursively build a path from the requested content to a root
 	-- directory. Each iteration returns a pair (dir_id, filename) where
 	-- filename is relative to dir_id. Stops when no parent directory can
 	-- be found.
 	(select dir.id as dir_id, dir_entry_f.name as name, 0 as depth
 	 from directory_entry_file as dir_entry_f
 	 join content on content.sha1_git = dir_entry_f.target
 	 join directory as dir on dir.file_entries @> array[dir_entry_f.id]
 	 where content.sha1 = content_id
 	 limit 1)
 	union all
 	(select dir.id as dir_id,
 		(dir_entry_d.name || '/' || path.name)::unix_path as name,
 		path.depth + 1
 	 from path
 	 join directory_entry_dir as dir_entry_d on dir_entry_d.target = path.dir_id
 	 join directory as dir on dir.dir_entries @> array[dir_entry_d.id]
 	 limit 1)
     )
     select dir_id, name from path order by depth desc limit 1;
 $$;
 
 
 -- Walk the revision history starting from a given revision, until a matching
 -- occurrence is found. Return all occurrence information if one is found, NULL
 -- otherwise.
 create or replace function swh_revision_find_occurrence(revision_id sha1_git)
     returns occurrence
     language sql
     stable
 as $$
 	select origin, branch, target, target_type
   from swh_revision_list_children(ARRAY[revision_id] :: bytea[]) as rev_list
 	left join occurrence_history occ_hist
   on rev_list.id = occ_hist.target
 	where occ_hist.origin is not null and
         occ_hist.target_type = 'revision'
 	limit 1;
 $$;
 
 -- Find the visit of origin id closest to date visit_date
 create or replace function swh_visit_find_by_date(origin bigint, visit_date timestamptz default NOW())
     returns origin_visit
     language sql
     stable
 as $$
   with closest_two_visits as ((
     select ov, (date - visit_date) as interval
     from origin_visit ov
     where ov.origin = origin
           and ov.date >= visit_date
     order by ov.date asc
     limit 1
   ) union (
     select ov, (visit_date - date) as interval
     from origin_visit ov
     where ov.origin = origin
           and ov.date < visit_date
     order by ov.date desc
     limit 1
   )) select (ov).* from closest_two_visits order by interval limit 1
 $$;
 
 -- Find the visit of origin id closest to date visit_date
 create or replace function swh_visit_get(origin bigint)
     returns origin_visit
     language sql
     stable
 as $$
     select *
     from origin_visit
     where origin=origin
     order by date desc
 $$;
 
 
 -- Retrieve occurrence by filtering on origin_id and optionally on
 -- branch_name and/or validity range
 create or replace function swh_occurrence_get_by(
        origin_id bigint,
        branch_name bytea default NULL,
        date timestamptz default NULL)
     returns setof occurrence_history
     language plpgsql
 as $$
 declare
     filters text[] := array[] :: text[];  -- AND-clauses used to filter content
     visit_id bigint;
     q text;
 begin
     if origin_id is null then
         raise exception 'Needs an origin_id to get an occurrence.';
     end if;
     filters := filters || format('origin = %L', origin_id);
     if branch_name is not null then
         filters := filters || format('branch = %L', branch_name);
     end if;
     if date is not null then
         select visit from swh_visit_find_by_date(origin_id, date) into visit_id;
     else
         select visit from origin_visit where origin = origin_id order by origin_visit.date desc limit 1 into visit_id;
     end if;
     if visit_id is null then
         return;
     end if;
     filters := filters || format('%L = any(visits)', visit_id);
 
     q = format('select * from occurrence_history where %s',
                array_to_string(filters, ' and '));
     return query execute q;
 end
 $$;
 
 
 -- Retrieve revisions by occurrence criterion filtering
 create or replace function swh_revision_get_by(
        origin_id bigint,
        branch_name bytea default NULL,
        date timestamptz default NULL)
     returns setof revision_entry
     language sql
     stable
 as $$
     select r.id, r.date, r.date_offset, r.date_neg_utc_offset,
         r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset,
         r.type, r.directory, r.message,
         a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic,
         array(select rh.parent_id::bytea
             from revision_history rh
             where rh.id = r.id
             order by rh.parent_rank
         ) as parents, r.object_id
     from swh_occurrence_get_by(origin_id, branch_name, date) as occ
     inner join revision r on occ.target = r.id
     left join person a on a.id = r.author
     left join person c on c.id = r.committer;
 $$;
 
 -- Retrieve a release by occurrence criterion
 create or replace function swh_release_get_by(
        origin_id bigint)
     returns setof release_entry
     language sql
     stable
 as $$
    select r.id, r.target, r.target_type, r.date, r.date_offset, r.date_neg_utc_offset,
         r.name, r.comment, r.synthetic, a.id as author_id, a.fullname as author_fullname,
         a.name as author_name, a.email as author_email, r.object_id
     from release r
     inner join occurrence_history occ on occ.target = r.target
     left join person a on a.id = r.author
     where occ.origin = origin_id and occ.target_type = 'revision' and r.target_type = 'revision';
 $$;
 
 
 create type object_found as (
     sha1_git   sha1_git,
     type       object_type,
     id         bytea,       -- sha1 or sha1_git depending on object_type
     object_id  bigint
 );
 
 -- Find objects by sha1_git, return their type and their main identifier
 create or replace function swh_object_find_by_sha1_git()
     returns setof object_found
     language plpgsql
 as $$
 begin
     return query
     with known_objects as ((
         select id as sha1_git, 'release'::object_type as type, id, object_id from release r
         where exists (select 1 from tmp_bytea t where t.id = r.id)
     ) union all (
         select id as sha1_git, 'revision'::object_type as type, id, object_id from revision r
         where exists (select 1 from tmp_bytea t where t.id = r.id)
     ) union all (
         select id as sha1_git, 'directory'::object_type as type, id, object_id from directory d
         where exists (select 1 from tmp_bytea t where t.id = d.id)
     ) union all (
         select sha1_git as sha1_git, 'content'::object_type as type, sha1 as id, object_id from content c
         where exists (select 1 from tmp_bytea t where t.id = c.sha1_git)
     ))
     select t.id::sha1_git as sha1_git, k.type, k.id, k.object_id from tmp_bytea t
       left join known_objects k on t.id = k.sha1_git;
 end
 $$;
 
 -- Create entries in entity_history from tmp_entity_history
 --
 -- TODO: do something smarter to compress the entries if the data
 -- didn't change.
 create or replace function swh_entity_history_add()
     returns void
     language plpgsql
 as $$
 begin
     insert into entity_history (
         uuid, parent, name, type, description, homepage, active, generated, lister_metadata, metadata, validity
     ) select * from tmp_entity_history;
     return;
 end
 $$;
 
 
 create or replace function swh_update_entity_from_entity_history()
     returns trigger
     language plpgsql
 as $$
 begin
     insert into entity (uuid, parent, name, type, description, homepage, active, generated,
       lister_metadata, metadata, last_seen, last_id)
       select uuid, parent, name, type, description, homepage, active, generated,
              lister_metadata, metadata, unnest(validity), id
       from entity_history
       where uuid = NEW.uuid
       order by unnest(validity) desc limit 1
     on conflict (uuid) do update set
       parent = EXCLUDED.parent,
       name = EXCLUDED.name,
       type = EXCLUDED.type,
       description = EXCLUDED.description,
       homepage = EXCLUDED.homepage,
       active = EXCLUDED.active,
       generated = EXCLUDED.generated,
       lister_metadata = EXCLUDED.lister_metadata,
       metadata = EXCLUDED.metadata,
       last_seen = EXCLUDED.last_seen,
       last_id = EXCLUDED.last_id;
 
     return null;
 end
 $$;
 
 create trigger update_entity
   after insert or update
   on entity_history
   for each row
   execute procedure swh_update_entity_from_entity_history();
 
 -- map an id of tmp_entity_lister to a full entity
 create type entity_id as (
     id               bigint,
     uuid             uuid,
     parent           uuid,
     name             text,
     type             entity_type,
     description      text,
     homepage         text,
     active           boolean,
     generated        boolean,
     lister_metadata  jsonb,
     metadata         jsonb,
     last_seen        timestamptz,
     last_id          bigint
 );
 
 -- find out the uuid of the entries of entity with the metadata
 -- contained in tmp_entity_lister
 create or replace function swh_entity_from_tmp_entity_lister()
     returns setof entity_id
     language plpgsql
 as $$
 begin
   return query
     select t.id, e.*
     from tmp_entity_lister t
     left join entity e
     on e.lister_metadata @> t.lister_metadata;
   return;
 end
 $$;
 
 create or replace function swh_entity_get(entity_uuid uuid)
     returns setof entity
     language sql
     stable
 as $$
   with recursive entity_hierarchy as (
   select e.*
     from entity e where uuid = entity_uuid
     union
     select p.*
     from entity_hierarchy e
     join entity p on e.parent = p.uuid
   )
   select *
   from entity_hierarchy;
 $$;
 
 
 -- Object listing by object_id
 
 create or replace function swh_content_list_by_object_id(
     min_excl bigint,
     max_incl bigint
 )
     returns setof content
     language sql
     stable
 as $$
     select * from content
     where object_id > min_excl and object_id <= max_incl
     order by object_id;
 $$;
 
 create or replace function swh_revision_list_by_object_id(
     min_excl bigint,
     max_incl bigint
 )
     returns setof revision_entry
     language sql
     stable
 as $$
     with revs as (
         select * from revision
         where object_id > min_excl and object_id <= max_incl
     )
     select r.id, r.date, r.date_offset, r.date_neg_utc_offset,
            r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset,
            r.type, r.directory, r.message,
            a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic,
            array(select rh.parent_id::bytea from revision_history rh where rh.id = r.id order by rh.parent_rank)
                as parents, r.object_id
     from revs r
     left join person a on a.id = r.author
     left join person c on c.id = r.committer
     order by r.object_id;
 $$;
 
 create or replace function swh_release_list_by_object_id(
     min_excl bigint,
     max_incl bigint
 )
     returns setof release_entry
     language sql
     stable
 as $$
     with rels as (
         select * from release
         where object_id > min_excl and object_id <= max_incl
     )
     select r.id, r.target, r.target_type, r.date, r.date_offset, r.date_neg_utc_offset, r.name, r.comment,
            r.synthetic, p.id as author_id, p.fullname as author_fullname, p.name as author_name, p.email as author_email, r.object_id
     from rels r
     left join person p on p.id = r.author
     order by r.object_id;
 $$;
 
 
 create or replace function swh_occurrence_by_origin_visit(origin_id bigint, visit_id bigint)
     returns setof occurrence
     language sql
     stable
 as $$
   select origin, branch, target, target_type from occurrence_history
   where origin = origin_id and visit_id = ANY(visits);
 $$;
 
 -- end revision_metadata functions
 -- origin_metadata functions
 create type origin_metadata_signature as (
     id bigint,
     origin_id bigint,
     discovery_date timestamptz,
     tool_id bigint,
     metadata jsonb,
     provider_id integer,
     provider_name text,
     provider_type text,
     provider_url  text
 );
 create or replace function swh_origin_metadata_get_by_origin(
        origin integer)
     returns setof origin_metadata_signature
     language sql
     stable
 as $$
     select om.id as id, origin_id, discovery_date, tool_id, om.metadata,
            mp.id as provider_id, provider_name, provider_type, provider_url
     from origin_metadata as om
     inner join metadata_provider mp on om.provider_id = mp.id
     where om.origin_id = origin
     order by discovery_date desc;
 $$;
 
 create or replace function swh_origin_metadata_get_by_provider_type(
        origin integer,
        type text)
     returns setof origin_metadata_signature
     language sql
     stable
 as $$
     select om.id as id, origin_id, discovery_date, tool_id, om.metadata,
            mp.id as provider_id, provider_name, provider_type, provider_url
     from origin_metadata as om
     inner join metadata_provider mp on om.provider_id = mp.id
     where om.origin_id = origin
     and mp.provider_type = type
     order by discovery_date desc;
 $$;
 -- end origin_metadata functions
 
 -- add tmp_tool entries to tool,
 -- skipping duplicates if any.
 --
 -- operates in bulk: 0. create temporary tmp_tool, 1. COPY to
 -- it, 2. call this function to insert and filtering out duplicates
 create or replace function swh_tool_add()
     returns setof tool
     language plpgsql
 as $$
 begin
       insert into tool(name, version, configuration)
       select name, version, configuration from tmp_tool tmp
       on conflict(name, version, configuration) do nothing;
 
       return query
           select id, name, version, configuration
           from tmp_tool join tool
               using(name, version, configuration);
 
       return;
 end
 $$;
 
 
 -- simple counter mapping a textual label to an integer value
 create type counter as (
     label  text,
     value  bigint
 );
 
 -- return statistics about the number of tuples in various SWH tables
 --
 -- Note: the returned values are based on postgres internal statistics
 -- (pg_class table), which are only updated daily (by autovacuum) or so
 create or replace function swh_stat_counters()
     returns setof counter
     language sql
     stable
 as $$
     select object_type as label, value as value
     from object_counts
     where object_type in (
         'content',
         'directory',
         'directory_entry_dir',
         'directory_entry_file',
         'directory_entry_rev',
         'occurrence',
         'occurrence_history',
         'origin',
         'origin_visit',
         'person',
         'entity',
         'entity_history',
         'release',
         'revision',
         'revision_history',
         'skipped_content'
     );
 $$;
 
 create or replace function swh_update_counter(object_type text)
     returns void
     language plpgsql
 as $$
 begin
     execute format('
 	insert into object_counts
     (value, last_update, object_type)
   values
     ((select count(*) from %1$I), NOW(), %1$L)
   on conflict (object_type) do update set
     value = excluded.value,
     last_update = excluded.last_update',
   object_type);
     return;
 end;
 $$;
diff --git a/sql/swh-indexes.sql b/sql/swh-indexes.sql
index 1f5c239fe..5987088a5 100644
--- a/sql/swh-indexes.sql
+++ b/sql/swh-indexes.sql
@@ -1,223 +1,253 @@
 -- content
 
 create unique index concurrently content_pkey on content(sha1);
 create unique index concurrently on content(sha1_git);
 create index concurrently on content(sha256);
 create index concurrently on content(blake2s256);
 create index concurrently on content(ctime);  -- TODO use a BRIN index here (postgres >= 9.5)
 create unique index concurrently on content(object_id);
 
 alter table content add primary key using index content_pkey;
 
 
 -- entity_history
 
 create unique index concurrently entity_history_pkey on entity_history(id);
 create index concurrently on entity_history(uuid);
 create index concurrently on entity_history(name);
 
 alter table entity_history add primary key using index entity_history_pkey;
 
 -- entity
 
 create unique index concurrently entity_pkey on entity(uuid);
 
 create index concurrently on entity(name);
 create index concurrently on entity using gin(lister_metadata jsonb_path_ops);
 
 alter table entity add primary key using index entity_pkey;
 alter table entity add constraint entity_parent_fkey foreign key (parent) references entity(uuid) deferrable initially deferred not valid;
 alter table entity validate constraint entity_parent_fkey;
 alter table entity add constraint entity_last_id_fkey foreign key (last_id) references entity_history(id) not valid;
 alter table entity validate constraint entity_last_id_fkey;
 
 -- entity_equivalence
 
 create unique index concurrently entity_equivalence_pkey on entity_equivalence(entity1, entity2);
 alter table entity_equivalence add primary key using index entity_equivalence_pkey;
 
 
 alter table entity_equivalence add constraint "entity_equivalence_entity1_fkey" foreign key (entity1) references entity(uuid) not valid;
 alter table entity_equivalence validate constraint entity_equivalence_entity1_fkey;
 alter table entity_equivalence add constraint "entity_equivalence_entity2_fkey" foreign key (entity2) references entity(uuid) not valid;
 alter table entity_equivalence validate constraint entity_equivalence_entity2_fkey;
 alter table entity_equivalence add constraint "order_entities" check (entity1 < entity2) not valid;
 alter table entity_equivalence validate constraint order_entities;
 
 -- listable_entity
 
 create unique index concurrently listable_entity_pkey on listable_entity(uuid);
 alter table listable_entity add primary key using index listable_entity_pkey;
 
 alter table listable_entity add constraint listable_entity_uuid_fkey foreign key (uuid) references entity(uuid) not valid;
 alter table listable_entity validate constraint listable_entity_uuid_fkey;
 
 -- list_history
 
 create unique index concurrently list_history_pkey on list_history(id);
 alter table list_history add primary key using index list_history_pkey;
 
 alter table list_history add constraint list_history_entity_fkey foreign key (entity) references listable_entity(uuid) not valid;
 alter table list_history validate constraint list_history_entity_fkey;
 
 -- origin
 create unique index concurrently origin_pkey on origin(id);
 alter table origin add primary key using index origin_pkey;
 
 create index concurrently on origin(type, url);
 
 alter table origin add constraint origin_lister_fkey foreign key (lister) references listable_entity(uuid) not valid;
 alter table origin validate constraint origin_lister_fkey;
 
 alter table origin add constraint origin_project_fkey foreign key (project) references entity(uuid) not valid;
 alter table origin validate constraint origin_project_fkey;
 
 -- skipped_content
 
 alter table skipped_content add constraint skipped_content_sha1_sha1_git_sha256_key unique (sha1, sha1_git, sha256);
 
 create index concurrently on skipped_content(sha1);
 create index concurrently on skipped_content(sha1_git);
 create index concurrently on skipped_content(sha256);
 create index concurrently on skipped_content(blake2s256);
 create unique index concurrently on skipped_content(object_id);
 
 alter table skipped_content add constraint skipped_content_origin_fkey foreign key (origin) references origin(id) not valid;
 alter table skipped_content validate constraint skipped_content_origin_fkey;
 
 -- fetch_history
 
 create unique index concurrently fetch_history_pkey on fetch_history(id);
 alter table fetch_history add primary key using index fetch_history_pkey;
 
 alter table fetch_history add constraint fetch_history_origin_fkey foreign key (origin) references origin(id) not valid;
 alter table fetch_history validate constraint fetch_history_origin_fkey;
 
 -- directory
 
 create unique index concurrently directory_pkey on directory(id);
 alter table directory add primary key using index directory_pkey;
 
 create index concurrently on directory using gin (dir_entries);
 create index concurrently on directory using gin (file_entries);
 create index concurrently on directory using gin (rev_entries);
 create unique index concurrently on directory(object_id);
 
 -- directory_entry_dir
 
 create unique index concurrently directory_entry_dir_pkey on directory_entry_dir(id);
 alter table directory_entry_dir add primary key using index directory_entry_dir_pkey;
 
 create unique index concurrently on directory_entry_dir(target, name, perms);
 
 -- directory_entry_file
 
 create unique index concurrently directory_entry_file_pkey on directory_entry_file(id);
 alter table directory_entry_file add primary key using index directory_entry_file_pkey;
 
 create unique index concurrently on directory_entry_file(target, name, perms);
 
 -- directory_entry_rev
 
 create unique index concurrently directory_entry_rev_pkey on directory_entry_rev(id);
 alter table directory_entry_rev add primary key using index directory_entry_rev_pkey;
 
 create unique index concurrently on directory_entry_rev(target, name, perms);
 
 -- person
 create unique index concurrently person_pkey on person(id);
 alter table person add primary key using index person_pkey;
 
 create unique index concurrently on person(fullname);
 create index concurrently on person(name);
 create index concurrently on person(email);
 
 -- revision
 create unique index concurrently revision_pkey on revision(id);
 alter table revision add primary key using index revision_pkey;
 
 alter table revision add constraint revision_author_fkey foreign key (author) references person(id) not valid;
 alter table revision validate constraint revision_author_fkey;
 alter table revision add constraint revision_committer_fkey foreign key (committer) references person(id) not valid;
 alter table revision validate constraint revision_committer_fkey;
 
 create index concurrently on revision(directory);
 create unique index concurrently on revision(object_id);
 
 -- revision_history
 create unique index concurrently revision_history_pkey on revision_history(id, parent_rank);
 alter table revision_history add primary key using index revision_history_pkey;
 
 create index concurrently on revision_history(parent_id);
 
 alter table revision_history add constraint revision_history_id_fkey foreign key (id) references revision(id) not valid;
 alter table revision_history validate constraint revision_history_id_fkey;
 
+-- snapshot
+create unique index concurrently snapshot_pkey on snapshot(object_id);
+alter table snapshot add primary key using index snapshot_pkey;
+
+create unique index concurrently on snapshot(id);
+
+-- snapshot_branch
+create unique index concurrently snapshot_branch_pkey on snapshot_branch(object_id);
+alter table snapshot_branch add primary key using index snapshot_branch_pkey;
+
+create unique index concurrently on snapshot_branch (target_type, target, name);
+alter table snapshot_branch add constraint snapshot_branch_target_check check ((target_type is null) = (target is null)) not valid;
+alter table snapshot_branch validate constraint snapshot_branch_target_check;
+alter table snapshot_branch add constraint snapshot_target_check check (target_type not in ('content', 'directory', 'revision', 'release', 'snapshot') or length(target) = 20) not valid;
+alter table snapshot_branch validate constraint snapshot_target_check;
+
+create unique index concurrently on snapshot_branch (name) where target_type is null and target is null;
+
+-- snapshot_branches
+create unique index concurrently snapshot_branches_pkey on snapshot_branches(snapshot_id, branch_id);
+alter table snapshot_branches add primary key using index snapshot_branches_pkey;
+
+alter table snapshot_branches add constraint snapshot_branches_snapshot_id_fkey foreign key (snapshot_id) references snapshot(object_id) not valid;
+alter table snapshot_branches validate constraint snapshot_branches_snapshot_id_fkey;
+
+alter table snapshot_branches add constraint snapshot_branches_branch_id_fkey foreign key (branch_id) references snapshot_branch(object_id) not valid;
+alter table snapshot_branches validate constraint snapshot_branches_branch_id_fkey;
+
 -- origin_visit
 create unique index concurrently origin_visit_pkey on origin_visit(origin, visit);
 alter table origin_visit add primary key using index origin_visit_pkey;
 
 create index concurrently on origin_visit(date);
 
 alter table origin_visit add constraint origin_visit_origin_fkey foreign key (origin) references origin(id) not valid;
 alter table origin_visit validate constraint origin_visit_origin_fkey;
 
+alter table origin_visit add constraint origin_visit_snapshot_id_fkey foreign key (snapshot_id) references snapshot(object_id) not valid;
+alter table origin_visit validate constraint origin_visit_snapshot_id_fkey;
+
 -- occurrence_history
 create unique index concurrently occurrence_history_pkey on occurrence_history(object_id);
 alter table occurrence_history add primary key using index occurrence_history_pkey;
 
 create index concurrently on occurrence_history(target, target_type);
 create index concurrently on occurrence_history(origin, branch);
 create unique index concurrently on occurrence_history(origin, branch, target, target_type);
 
 alter table occurrence_history add constraint occurrence_history_origin_fkey foreign key (origin) references origin(id) not valid;
 alter table occurrence_history validate constraint occurrence_history_origin_fkey;
 
 -- occurrence
 create unique index concurrently occurrence_pkey on occurrence(origin, branch);
 alter table occurrence add primary key using index occurrence_pkey;
 
 alter table occurrence add constraint occurrence_origin_fkey foreign key (origin) references origin(id) not valid;
 alter table occurrence validate constraint occurrence_origin_fkey;
 
-
 -- release
 create unique index concurrently release_pkey on release(id);
 alter table release add primary key using index release_pkey;
 
 create index concurrently on release(target, target_type);
 create unique index concurrently on release(object_id);
 
 alter table release add constraint release_author_fkey foreign key (author) references person(id) not valid;
 alter table release validate constraint release_author_fkey;
 
 -- tool
 create unique index tool_pkey on tool(id);
 alter table tool add primary key using index tool_pkey;
 
 create unique index on tool(name, version, configuration);
 
 -- metadata_provider
 create unique index concurrently metadata_provider_pkey on metadata_provider(id);
 alter table metadata_provider add primary key using index metadata_provider_pkey;
 
 create index concurrently on metadata_provider(provider_name, provider_url);
 
 -- origin_metadata
 create unique index concurrently origin_metadata_pkey on origin_metadata(id);
 alter table origin_metadata add primary key using index origin_metadata_pkey;
 
 create index concurrently on origin_metadata(origin_id, provider_id, tool_id);
 
 alter table origin_metadata add constraint origin_metadata_origin_fkey foreign key (origin_id) references origin(id) not valid;
 alter table origin_metadata validate constraint origin_metadata_origin_fkey;
 
 alter table origin_metadata add constraint origin_metadata_provider_fkey foreign key (provider_id) references metadata_provider(id) not valid;
 alter table origin_metadata validate constraint origin_metadata_provider_fkey;
 
 alter table origin_metadata add constraint origin_metadata_tool_fkey foreign key (tool_id) references tool(id) not valid;
 alter table origin_metadata validate constraint origin_metadata_tool_fkey;
 
 -- object_counts
 create unique index concurrently object_counts_pkey on object_counts(object_type);
 alter table object_counts add primary key using index object_counts_pkey;
diff --git a/sql/swh-schema.sql b/sql/swh-schema.sql
index 2faf7c2a9..26f42bc5f 100644
--- a/sql/swh-schema.sql
+++ b/sql/swh-schema.sql
@@ -1,412 +1,434 @@
 ---
 --- Software Heritage Data Model
 ---
 
 -- drop schema if exists swh cascade;
 -- create schema swh;
 -- set search_path to swh;
 
 create table dbversion
 (
   version     int primary key,
   release     timestamptz,
   description text
 );
 
 insert into dbversion(version, release, description)
-      values(114, now(), 'Work In Progress');
+      values(115, now(), 'Work In Progress');
 
 -- a SHA1 checksum (not necessarily originating from Git)
 create domain sha1 as bytea check (length(value) = 20);
 
 -- a Git object ID, i.e., a SHA1 checksum
 create domain sha1_git as bytea check (length(value) = 20);
 
 -- a SHA256 checksum
 create domain sha256 as bytea check (length(value) = 32);
 
 -- a blake2 checksum
 create domain blake2s256 as bytea check (length(value) = 32);
 
 -- UNIX path (absolute, relative, individual path component, etc.)
 create domain unix_path as bytea;
 
 -- a set of UNIX-like access permissions, as manipulated by, e.g., chmod
 create domain file_perms as int;
 
 -- Checksums about actual file content. Note that the content itself is not
 -- stored in the DB, but on external (key-value) storage. A single checksum is
 -- used as key there, but the other can be used to verify that we do not inject
 -- content collisions not knowingly.
 create table content
 (
   sha1       sha1 not null,
   sha1_git   sha1_git not null,
   sha256     sha256 not null,
   blake2s256 blake2s256,
   length     bigint not null,
   ctime      timestamptz not null default now(),
              -- creation time, i.e. time of (first) injection into the storage
   status     content_status not null default 'visible',
   object_id  bigserial
 );
 
 
 -- Entities constitute a typed hierarchy of organization, hosting
 -- facilities, groups, people and software projects.
 --
 -- Examples of entities: Software Heritage, Debian, GNU, GitHub,
 -- Apache, The Linux Foundation, the Debian Python Modules Team, the
 -- torvalds GitHub user, the torvalds/linux GitHub project.
 --
 -- The data model is hierarchical (via the parent attribute) and might
 -- store sub-branches of existing entities. The key feature of an
 -- entity is might be *listed* (if it is available in listable_entity)
 -- to retrieve information about its content, i.e: sub-entities,
 -- projects, origins.
 
 -- The history of entities. Allows us to keep historical metadata
 -- about entities.  The temporal invariant is the uuid. Root
 -- organization uuids are manually generated (and available in
 -- swh-data.sql).
 --
 -- For generated entities (generated = true), we can provide
 -- generation_metadata to allow listers to retrieve the uuids of previous
 -- iterations of the entity.
 --
 -- Inactive entities that have been active in the past (active =
 -- false) should register the timestamp at which we saw them
 -- deactivate, in a new entry of entity_history.
 create table entity_history
 (
   id               bigserial not null,
   uuid             uuid,
   parent           uuid,             -- should reference entity_history(uuid)
   name             text not null,
   type             entity_type not null,
   description      text,
   homepage         text,
   active           boolean not null, -- whether the entity was seen on the last listing
   generated        boolean not null, -- whether this entity has been generated by a lister
   lister_metadata  jsonb,            -- lister-specific metadata, used for queries
   metadata         jsonb,
   validity         timestamptz[]     -- timestamps at which we have seen this entity
 );
 
 -- The entity table provides a view of the latest information on a
 -- given entity. It is updated via a trigger on entity_history.
 create table entity
 (
   uuid             uuid not null,
   parent           uuid,
   name             text not null,
   type             entity_type not null,
   description      text,
   homepage         text,
   active           boolean not null, -- whether the entity was seen on the last listing
   generated        boolean not null, -- whether this entity has been generated by a lister
   lister_metadata  jsonb,            -- lister-specific metadata, used for queries
   metadata         jsonb,
   last_seen        timestamptz,      -- last listing time or disappearance time for active=false
   last_id          bigint            -- last listing id
 );
 
 -- Register the equivalence between two entities. Allows sideways
 -- navigation in the entity table
 create table entity_equivalence
 (
   entity1 uuid,
   entity2 uuid
 );
 
 -- Register a lister for a specific entity.
 create table listable_entity
 (
   uuid         uuid,
   enabled      boolean not null default true, -- do we list this entity automatically?
   list_engine  text,  -- crawler to be used to list entity's content
   list_url     text,  -- root URL to start the listing
   list_params  jsonb,  -- org-specific listing parameter
   latest_list  timestamptz  -- last time the entity's content has been listed
 );
 
 -- Log of all entity listings (i.e., entity crawling) that have been
 -- done in the past, or are still ongoing.
 create table list_history
 (
   id        bigserial not null,
   date      timestamptz not null,
   status    boolean,   -- true if and only if the listing has been successful
   result    jsonb,     -- more detailed return value, depending on status
   stdout    text,
   stderr    text,
   duration  interval,  -- fetch duration of NULL if still ongoing
   entity    uuid
 );
 
 
 -- An origin is a place, identified by an URL, where software can be found. We
 -- support different kinds of origins, e.g., git and other VCS repositories,
 -- web pages that list tarballs URLs (e.g., http://www.kernel.org), indirect
 -- tarball URLs (e.g., http://www.example.org/latest.tar.gz), etc. The key
 -- feature of an origin is that it can be *fetched* (wget, git clone, svn
 -- checkout, etc.) to retrieve all the contained software.
 create table origin
 (
   id       bigserial not null,
   type     text, -- TODO use an enum here (?)
   url      text not null,
   lister   uuid,
   project  uuid
 );
 
 -- Content we have seen but skipped for some reason. This table is
 -- separate from the content table as we might not have the sha1
 -- checksum of that data (for instance when we inject git
 -- repositories, objects that are too big will be skipped here, and we
 -- will only know their sha1_git). 'reason' contains the reason the
 -- content was skipped. origin is a nullable column allowing to find
 -- out which origin contains that skipped content.
 create table skipped_content
 (
   sha1       sha1,
   sha1_git   sha1_git,
   sha256     sha256,
   blake2s256 blake2s256,
   length     bigint not null,
   ctime      timestamptz not null default now(),
   status     content_status not null default 'absent',
   reason     text not null,
   origin     bigint,
   object_id  bigserial
 );
 
 -- Log of all origin fetches (i.e., origin crawling) that have been done in the
 -- past, or are still ongoing. Similar to list_history, but for origins.
 create table fetch_history
 (
   id        bigserial,
   origin    bigint,
   date      timestamptz not null,
   status    boolean,  -- true if and only if the fetch has been successful
   result    jsonb,     -- more detailed returned values, times, etc...
   stdout    text,
   stderr    text,     -- null when status is true, filled otherwise
   duration  interval  -- fetch duration of NULL if still ongoing
 );
 
 
 -- A file-system directory.  A directory is a list of directory entries (see
 -- tables: directory_entry_{dir,file}).
 --
 -- To list the contents of a directory:
 -- 1. list the contained directory_entry_dir using array dir_entries
 -- 2. list the contained directory_entry_file using array file_entries
 -- 3. list the contained directory_entry_rev using array rev_entries
 -- 4. UNION
 --
 -- Synonyms/mappings:
 -- * git: tree
 create table directory
 (
   id            sha1_git,
   dir_entries   bigint[],  -- sub-directories, reference directory_entry_dir
   file_entries  bigint[],  -- contained files, reference directory_entry_file
   rev_entries   bigint[],  -- mounted revisions, reference directory_entry_rev
   object_id     bigserial  -- short object identifier
 );
 
 -- A directory entry pointing to a sub-directory.
 create table directory_entry_dir
 (
   id      bigserial,
   target  sha1_git,   -- id of target directory
   name    unix_path,  -- path name, relative to containing dir
   perms   file_perms  -- unix-like permissions
 );
 
 -- A directory entry pointing to a file.
 create table directory_entry_file
 (
   id      bigserial,
   target  sha1_git,   -- id of target file
   name    unix_path,  -- path name, relative to containing dir
   perms   file_perms  -- unix-like permissions
 );
 
 -- A directory entry pointing to a revision.
 create table directory_entry_rev
 (
   id      bigserial,
   target  sha1_git,   -- id of target revision
   name    unix_path,  -- path name, relative to containing dir
   perms   file_perms  -- unix-like permissions
 );
 
 create table person
 (
   id        bigserial,
   name      bytea,          -- advisory: not null if we managed to parse a name
   email     bytea,          -- advisory: not null if we managed to parse an email
   fullname  bytea not null  -- freeform specification; what is actually used in the checksums
                             --     will usually be of the form 'name <email>'
 );
 
 -- A snapshot of a software project at a specific point in time.
 --
 -- Synonyms/mappings:
 -- * git / subversion / etc: commit
 -- * tarball: a specific tarball
 --
 -- Revisions are organized as DAGs. Each revision points to 0, 1, or more (in
 -- case of merges) parent revisions. Each revision points to a directory, i.e.,
 -- a file-system tree containing files and directories.
 create table revision
 (
   id                    sha1_git,
   date                  timestamptz,
   date_offset           smallint,
   committer_date        timestamptz,
   committer_date_offset smallint,
   type                  revision_type not null,
   directory             sha1_git,  -- file-system tree
   message               bytea,
   author                bigint,
   committer             bigint,
   synthetic             boolean not null default false,  -- true if synthetic (cf. swh-loader-tar)
   metadata              jsonb, -- extra metadata (tarball checksums, extra commit information, etc...)
   object_id             bigserial,
   date_neg_utc_offset   boolean,
   committer_date_neg_utc_offset boolean
 );
 
 
 -- either this table or the sha1_git[] column on the revision table
 create table revision_history
 (
   id           sha1_git,
   parent_id    sha1_git,
   parent_rank  int not null default 0
     -- parent position in merge commits, 0-based
 );
 
 -- The timestamps at which Software Heritage has made a visit of the given origin.
 create table origin_visit
 (
-  origin    bigint not null,
-  visit     bigint not null,
-  date      timestamptz not null,
-  status    origin_visit_status not null,
-  metadata  jsonb
+  origin       bigint not null,
+  visit        bigint not null,
+  date         timestamptz not null,
+  status       origin_visit_status not null,
+  metadata     jsonb,
+  snapshot_id  bigint
 );
 
 comment on column origin_visit.origin is 'Visited origin';
 comment on column origin_visit.visit is 'Visit number the visit occurred for that origin';
 comment on column origin_visit.date is 'Visit date for that origin';
 comment on column origin_visit.status is 'Visit status for that origin';
 comment on column origin_visit.metadata is 'Metadata associated with the visit';
+comment on column origin_visit.snapshot_id is 'id of the snapshot associated with the visit';
 
 
 -- The content of software origins is indexed starting from top-level pointers
 -- called "branches". Every time we fetch some origin we store in this table
 -- where the branches pointed to at fetch time.
 --
 -- Synonyms/mappings:
 -- * git: ref (in the "git update-ref" sense)
 create table occurrence_history
 (
   origin       bigint not null,
   branch       bytea not null,        -- e.g., b"master" (for VCS), or b"sid" (for Debian)
   target       sha1_git not null,     -- ref target, e.g., commit id
   target_type  object_type not null,  -- ref target type
   visits       bigint[] not null,     -- the visits where that occurrence was valid. References
                                       -- origin_visit(visit), where o_h.origin = origin_visit.origin.
-  object_id    bigserial not null     -- short object identifier
+  object_id    bigserial not null,    -- short object identifier
+  snapshot_branch_id bigint
 );
 
 -- Materialized view of occurrence_history, storing the *current* value of each
 -- branch, as last seen by SWH.
 create table occurrence
 (
   origin    bigint,
   branch    bytea not null,
   target    sha1_git not null,
   target_type object_type not null
 );
 
+
+create table snapshot (
+  object_id  bigserial not null,
+  id         sha1_git
+);
+
+create table snapshot_branch (
+  object_id    bigserial not null,
+  name         bytea not null,
+  target       bytea,
+  target_type  snapshot_target
+);
+
+create table snapshot_branches (
+  snapshot_id  bigint not null,
+  branch_id    bigint not null
+);
+
+
 -- A "memorable" point in the development history of a project.
 --
 -- Synonyms/mappings:
 -- * git: tag (of the annotated kind, otherwise they are just references)
 -- * tarball: the release version number
 create table release
 (
   id          sha1_git not null,
   target      sha1_git,
   date        timestamptz,
   date_offset smallint,
   name        bytea,
   comment     bytea,
   author      bigint,
   synthetic   boolean not null default false,  -- true if synthetic (cf. swh-loader-tar)
   object_id   bigserial,
   target_type object_type not null,
   date_neg_utc_offset  boolean
 );
 
 -- Tools
 
 create table tool (
   id serial not null,
   name text not null,
   version text not null,
   configuration jsonb
 );
 
 comment on table tool is 'Tool information';
 comment on column tool.id is 'Tool identifier';
 comment on column tool.version is 'Tool name';
 comment on column tool.version is 'Tool version';
 comment on column tool.configuration is 'Tool configuration: command line, flags, etc...';
 
 
 create table metadata_provider (
   id            serial not null,
   provider_name text   not null,
   provider_type text   not null,
   provider_url  text,
   metadata      jsonb
 );
 
 comment on table metadata_provider is 'Metadata provider information';
 comment on column metadata_provider.id is 'Provider''s identifier';
 comment on column metadata_provider.provider_name is 'Provider''s name';
 comment on column metadata_provider.provider_url is 'Provider''s url';
 comment on column metadata_provider.metadata is 'Other metadata about provider';
 
 
 -- Discovery of metadata during a listing, loading, deposit or external_catalog of an origin
 -- also provides a translation to a defined json schema using a translation tool (tool_id)
 create table origin_metadata(
   id             bigserial     not null,  -- PK object identifier
   origin_id      bigint        not null, -- references origin(id)
   discovery_date timestamptz   not null, -- when it was extracted
   provider_id    bigint        not null, -- ex: 'hal', 'lister-github', 'loader-github'
   tool_id        bigint        not null,
   metadata       jsonb         not null
 );
 
 comment on table origin_metadata is 'keeps all metadata found concerning an origin';
 comment on column origin_metadata.id is 'the origin_metadata object''s id';
 comment on column origin_metadata.origin_id is 'the origin id for which the metadata was found';
 comment on column origin_metadata.discovery_date is 'the date of retrieval';
 comment on column origin_metadata.provider_id is 'the metadata provider: github, openhub, deposit, etc.';
 comment on column origin_metadata.tool_id is 'the tool used for extracting metadata: lister-github, etc.';
 comment on column origin_metadata.metadata is 'metadata in json format but with original terms';
 
 -- Keep a cache of object counts
 create table object_counts (
   object_type text,
   value bigint,
   last_update timestamptz
 );
diff --git a/swh/storage/api/client.py b/swh/storage/api/client.py
index e9f9d9746..dcf55f6b1 100644
--- a/swh/storage/api/client.py
+++ b/swh/storage/api/client.py
@@ -1,202 +1,213 @@
 # Copyright (C) 2015-2017  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 
 from swh.core.api import SWHRemoteAPI
 
 from ..exc import StorageAPIError
 
 
 class RemoteStorage(SWHRemoteAPI):
     """Proxy to a remote storage API"""
     def __init__(self, url):
         super().__init__(api_exception=StorageAPIError, url=url)
 
     def check_config(self, *, check_write):
         return self.post('check_config', {'check_write': check_write})
 
     def content_add(self, content):
         return self.post('content/add', {'content': content})
 
     def content_update(self, content, keys=[]):
         return self.post('content/update', {'content': content,
                                             'keys': keys})
 
     def content_missing(self, content, key_hash='sha1'):
         return self.post('content/missing', {'content': content,
                                              'key_hash': key_hash})
 
     def content_missing_per_sha1(self, contents):
         return self.post('content/missing/sha1', {'contents': contents})
 
     def content_get(self, content):
         return self.post('content/data', {'content': content})
 
     def content_get_metadata(self, content):
         return self.post('content/metadata', {'content': content})
 
     def content_find(self, content):
         return self.post('content/present', {'content': content})
 
     def directory_add(self, directories):
         return self.post('directory/add', {'directories': directories})
 
     def directory_missing(self, directories):
         return self.post('directory/missing', {'directories': directories})
 
     def directory_get(self, directories):
         return self.post('directory', dict(directories=directories))
 
     def directory_ls(self, directory, recursive=False):
         return self.get('directory/ls', {'directory': directory,
                                          'recursive': recursive})
 
     def revision_get(self, revisions):
         return self.post('revision', {'revisions': revisions})
 
     def revision_get_by(self, origin_id, branch_name, timestamp, limit=None):
         return self.post('revision/by', dict(origin_id=origin_id,
                                              branch_name=branch_name,
                                              timestamp=timestamp,
                                              limit=limit))
 
     def revision_log(self, revisions, limit=None):
         return self.post('revision/log', {'revisions': revisions,
                                           'limit': limit})
 
     def revision_log_by(self, origin_id, branch_name, timestamp, limit=None):
         return self.post('revision/logby', {'origin_id': origin_id,
                                             'branch_name': branch_name,
                                             'timestamp': timestamp,
                                             'limit': limit})
 
     def revision_shortlog(self, revisions, limit=None):
         return self.post('revision/shortlog', {'revisions': revisions,
                                                'limit': limit})
 
     def revision_add(self, revisions):
         return self.post('revision/add', {'revisions': revisions})
 
     def revision_missing(self, revisions):
         return self.post('revision/missing', {'revisions': revisions})
 
     def release_add(self, releases):
         return self.post('release/add', {'releases': releases})
 
     def release_get(self, releases):
         return self.post('release', {'releases': releases})
 
     def release_get_by(self, origin_id, limit=None):
         return self.post('release/by', dict(origin_id=origin_id,
                                             limit=limit))
 
     def release_missing(self, releases):
         return self.post('release/missing', {'releases': releases})
 
     def object_find_by_sha1_git(self, ids):
         return self.post('object/find_by_sha1_git', {'ids': ids})
 
     def occurrence_get(self, origin_id):
         return self.post('occurrence', {'origin_id': origin_id})
 
     def occurrence_add(self, occurrences):
         return self.post('occurrence/add', {'occurrences': occurrences})
 
+    def snapshot_add(self, origin, visit, snapshot):
+        return self.post('snapshot/add', {'origin': origin, 'visit': visit,
+                                          'snapshot': snapshot})
+
+    def snapshot_get(self, snapshot_id):
+        return self.post('snapshot', {'snapshot_id': snapshot_id})
+
+    def snapshot_get_by_origin_visit(self, origin, visit):
+        return self.post('snapshot/by_origin_visit', {'origin': origin,
+                                                      'visit': visit})
+
     def origin_get(self, origin):
         return self.post('origin/get', {'origin': origin})
 
     def origin_search(self, url_pattern, offset=0, limit=50, regexp=False):
         return self.post('origin/search', {'url_pattern': url_pattern,
                                            'offset': offset,
                                            'limit': limit,
                                            'regexp': regexp})
 
     def origin_add(self, origins):
         return self.post('origin/add_multi', {'origins': origins})
 
     def origin_add_one(self, origin):
         return self.post('origin/add', {'origin': origin})
 
     def origin_visit_add(self, origin, ts):
         return self.post('origin/visit/add', {'origin': origin, 'ts': ts})
 
     def origin_visit_update(self, origin, visit_id, status, metadata=None):
         return self.post('origin/visit/update', {'origin': origin,
                                                  'visit_id': visit_id,
                                                  'status': status,
                                                  'metadata': metadata})
 
     def origin_visit_get(self, origin, last_visit=None, limit=None):
         return self.post('origin/visit/get', {
             'origin': origin, 'last_visit': last_visit, 'limit': limit})
 
     def origin_visit_get_by(self, origin, visit):
         return self.post('origin/visit/getby', {'origin': origin,
                                                 'visit': visit})
 
     def person_get(self, person):
         return self.post('person', {'person': person})
 
     def fetch_history_start(self, origin_id):
         return self.post('fetch_history/start', {'origin_id': origin_id})
 
     def fetch_history_end(self, fetch_history_id, data):
         return self.post('fetch_history/end',
                          {'fetch_history_id': fetch_history_id,
                           'data': data})
 
     def fetch_history_get(self, fetch_history_id):
         return self.get('fetch_history', {'id': fetch_history_id})
 
     def entity_add(self, entities):
         return self.post('entity/add', {'entities': entities})
 
     def entity_get(self, uuid):
         return self.post('entity/get', {'uuid': uuid})
 
     def entity_get_one(self, uuid):
         return self.get('entity', {'uuid': uuid})
 
     def entity_get_from_lister_metadata(self, entities):
         return self.post('entity/from_lister_metadata', {'entities': entities})
 
     def stat_counters(self):
         return self.get('stat/counters')
 
     def directory_entry_get_by_path(self, directory, paths):
         return self.post('directory/path', dict(directory=directory,
                                                 paths=paths))
 
     def tool_add(self, tools):
         return self.post('tool/add', {'tools': tools})
 
     def tool_get(self, tool):
         return self.post('tool/data', {'tool': tool})
 
     def origin_metadata_add(self, origin_id, ts, provider, tool, metadata):
         return self.post('origin/metadata/add', {'origin_id': origin_id,
                                                  'ts': ts,
                                                  'provider': provider,
                                                  'tool': tool,
                                                  'metadata': metadata})
 
     def origin_metadata_get_by(self, origin_id, provider_type=None):
         return self.post('origin/metadata/get', {
             'origin_id': origin_id,
             'provider_type': provider_type
         })
 
     def metadata_provider_add(self, provider_name, provider_type, provider_url,
                               metadata):
         return self.post('provider/add', {'provider_name': provider_name,
                                           'provider_type': provider_type,
                                           'provider_url': provider_url,
                                           'metadata': metadata})
 
     def metadata_provider_get(self, provider_id):
         return self.post('provider/get', {'provider_id': provider_id})
 
     def metadata_provider_get_by(self, provider):
         return self.post('provider/getby', {'provider': provider})
diff --git a/swh/storage/api/server.py b/swh/storage/api/server.py
index a33e06825..9d7b556b9 100644
--- a/swh/storage/api/server.py
+++ b/swh/storage/api/server.py
@@ -1,352 +1,368 @@
 # Copyright (C) 2015-2017  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import json
 import logging
 import click
 
 from flask import g, request
 
 from swh.core import config
 from swh.storage import get_storage
 from swh.core.api import (SWHServerAPIApp, decode_request,
                           error_handler,
                           encode_data_server as encode_data)
 
 DEFAULT_CONFIG_PATH = 'storage/storage'
 DEFAULT_CONFIG = {
     'storage': ('dict', {
         'cls': 'local',
         'args': {
             'db': 'dbname=softwareheritage-dev',
             'objstorage': {
                 'cls': 'pathslicing',
                 'args': {
                     'root': '/srv/softwareheritage/objects',
                     'slicing': '0:2/2:4/4:6',
                 },
             },
         },
     })
 }
 
 
 app = SWHServerAPIApp(__name__)
 
 
 @app.errorhandler(Exception)
 def my_error_handler(exception):
     return error_handler(exception, encode_data)
 
 
 @app.before_request
 def before_request():
     g.storage = get_storage(**app.config['storage'])
 
 
 @app.route('/')
 def index():
     return 'SWH Storage API server'
 
 
 @app.route('/check_config', methods=['POST'])
 def check_config():
     return encode_data(g.storage.check_config(**decode_request(request)))
 
 
 @app.route('/content/missing', methods=['POST'])
 def content_missing():
     return encode_data(g.storage.content_missing(**decode_request(request)))
 
 
 @app.route('/content/missing/sha1', methods=['POST'])
 def content_missing_per_sha1():
     return encode_data(g.storage.content_missing_per_sha1(
         **decode_request(request)))
 
 
 @app.route('/content/present', methods=['POST'])
 def content_find():
     return encode_data(g.storage.content_find(**decode_request(request)))
 
 
 @app.route('/content/add', methods=['POST'])
 def content_add():
     return encode_data(g.storage.content_add(**decode_request(request)))
 
 
 @app.route('/content/update', methods=['POST'])
 def content_update():
     return encode_data(g.storage.content_update(**decode_request(request)))
 
 
 @app.route('/content/data', methods=['POST'])
 def content_get():
     return encode_data(g.storage.content_get(**decode_request(request)))
 
 
 @app.route('/content/metadata', methods=['POST'])
 def content_get_metadata():
     return encode_data(g.storage.content_get_metadata(
         **decode_request(request)))
 
 
 @app.route('/directory', methods=['POST'])
 def directory_get():
     return encode_data(g.storage.directory_get(**decode_request(request)))
 
 
 @app.route('/directory/missing', methods=['POST'])
 def directory_missing():
     return encode_data(g.storage.directory_missing(**decode_request(request)))
 
 
 @app.route('/directory/add', methods=['POST'])
 def directory_add():
     return encode_data(g.storage.directory_add(**decode_request(request)))
 
 
 @app.route('/directory/path', methods=['POST'])
 def directory_entry_get_by_path():
     return encode_data(g.storage.directory_entry_get_by_path(
         **decode_request(request)))
 
 
 @app.route('/directory/ls', methods=['GET'])
 def directory_ls():
     dir = request.args['directory'].encode('utf-8', 'surrogateescape')
     rec = json.loads(request.args.get('recursive', 'False').lower())
     return encode_data(g.storage.directory_ls(dir, recursive=rec))
 
 
 @app.route('/revision/add', methods=['POST'])
 def revision_add():
     return encode_data(g.storage.revision_add(**decode_request(request)))
 
 
 @app.route('/revision', methods=['POST'])
 def revision_get():
     return encode_data(g.storage.revision_get(**decode_request(request)))
 
 
 @app.route('/revision/by', methods=['POST'])
 def revision_get_by():
     return encode_data(g.storage.revision_get_by(**decode_request(request)))
 
 
 @app.route('/revision/log', methods=['POST'])
 def revision_log():
     return encode_data(g.storage.revision_log(**decode_request(request)))
 
 
 @app.route('/revision/logby', methods=['POST'])
 def revision_log_by():
     return encode_data(g.storage.revision_log_by(**decode_request(request)))
 
 
 @app.route('/revision/shortlog', methods=['POST'])
 def revision_shortlog():
     return encode_data(g.storage.revision_shortlog(**decode_request(request)))
 
 
 @app.route('/revision/missing', methods=['POST'])
 def revision_missing():
     return encode_data(g.storage.revision_missing(**decode_request(request)))
 
 
 @app.route('/release/add', methods=['POST'])
 def release_add():
     return encode_data(g.storage.release_add(**decode_request(request)))
 
 
 @app.route('/release', methods=['POST'])
 def release_get():
     return encode_data(g.storage.release_get(**decode_request(request)))
 
 
 @app.route('/release/by', methods=['POST'])
 def release_get_by():
     return encode_data(g.storage.release_get_by(**decode_request(request)))
 
 
 @app.route('/release/missing', methods=['POST'])
 def release_missing():
     return encode_data(g.storage.release_missing(**decode_request(request)))
 
 
 @app.route('/object/find_by_sha1_git', methods=['POST'])
 def object_find_by_sha1_git():
     return encode_data(g.storage.object_find_by_sha1_git(
         **decode_request(request)))
 
 
 @app.route('/occurrence', methods=['POST'])
 def occurrence_get():
     return encode_data(g.storage.occurrence_get(**decode_request(request)))
 
 
 @app.route('/occurrence/add', methods=['POST'])
 def occurrence_add():
     return encode_data(g.storage.occurrence_add(**decode_request(request)))
 
 
+@app.route('/snapshot/add', methods=['POST'])
+def snapshot_add():
+    return encode_data(g.storage.snapshot_add(**decode_request(request)))
+
+
+@app.route('/snapshot', methods=['POST'])
+def snapshot_get():
+    return encode_data(g.storage.snapshot_get(**decode_request(request)))
+
+
+@app.route('/snapshot/by_origin_visit', methods=['POST'])
+def snapshot_get_by_origin_visit():
+    return encode_data(g.storage.snapshot_get_by_origin_visit(
+        **decode_request(request)))
+
+
 @app.route('/origin/get', methods=['POST'])
 def origin_get():
     return encode_data(g.storage.origin_get(**decode_request(request)))
 
 
 @app.route('/origin/search', methods=['POST'])
 def origin_search():
     return encode_data(g.storage.origin_search(**decode_request(request)))
 
 
 @app.route('/origin/add_multi', methods=['POST'])
 def origin_add():
     return encode_data(g.storage.origin_add(**decode_request(request)))
 
 
 @app.route('/origin/add', methods=['POST'])
 def origin_add_one():
     return encode_data(g.storage.origin_add_one(**decode_request(request)))
 
 
 @app.route('/origin/visit/get', methods=['POST'])
 def origin_visit_get():
     return encode_data(g.storage.origin_visit_get(**decode_request(request)))
 
 
 @app.route('/origin/visit/getby', methods=['POST'])
 def origin_visit_get_by():
     return encode_data(
         g.storage.origin_visit_get_by(**decode_request(request)))
 
 
 @app.route('/origin/visit/add', methods=['POST'])
 def origin_visit_add():
     return encode_data(g.storage.origin_visit_add(**decode_request(request)))
 
 
 @app.route('/origin/visit/update', methods=['POST'])
 def origin_visit_update():
     return encode_data(g.storage.origin_visit_update(
         **decode_request(request)))
 
 
 @app.route('/person', methods=['POST'])
 def person_get():
     return encode_data(g.storage.person_get(**decode_request(request)))
 
 
 @app.route('/fetch_history', methods=['GET'])
 def fetch_history_get():
     return encode_data(g.storage.fetch_history_get(request.args['id']))
 
 
 @app.route('/fetch_history/start', methods=['POST'])
 def fetch_history_start():
     return encode_data(
         g.storage.fetch_history_start(**decode_request(request)))
 
 
 @app.route('/fetch_history/end', methods=['POST'])
 def fetch_history_end():
     return encode_data(
         g.storage.fetch_history_end(**decode_request(request)))
 
 
 @app.route('/entity/add', methods=['POST'])
 def entity_add():
     return encode_data(
         g.storage.entity_add(**decode_request(request)))
 
 
 @app.route('/entity/get', methods=['POST'])
 def entity_get():
     return encode_data(
         g.storage.entity_get(**decode_request(request)))
 
 
 @app.route('/entity', methods=['GET'])
 def entity_get_one():
     return encode_data(g.storage.entity_get_one(request.args['uuid']))
 
 
 @app.route('/entity/from_lister_metadata', methods=['POST'])
 def entity_from_lister_metadata():
     return encode_data(
         g.storage.entity_get_from_lister_metadata(**decode_request(request)))
 
 
 @app.route('/tool/data', methods=['POST'])
 def tool_get():
     return encode_data(g.storage.tool_get(
         **decode_request(request)))
 
 
 @app.route('/tool/add', methods=['POST'])
 def tool_add():
     return encode_data(g.storage.tool_add(
         **decode_request(request)))
 
 
 @app.route('/origin/metadata/add', methods=['POST'])
 def origin_metadata_add():
     return encode_data(g.storage.origin_metadata_add(**decode_request(
                                                        request)))
 
 
 @app.route('/origin/metadata/get', methods=['POST'])
 def origin_metadata_get_by():
     return encode_data(g.storage.origin_metadata_get_by(**decode_request(
                                                        request)))
 
 
 @app.route('/provider/add', methods=['POST'])
 def metadata_provider_add():
     return encode_data(g.storage.metadata_provider_add(**decode_request(
                                                        request)))
 
 
 @app.route('/provider/get', methods=['POST'])
 def metadata_provider_get():
     return encode_data(g.storage.metadata_provider_get(**decode_request(
                                                        request)))
 
 
 @app.route('/provider/getby', methods=['POST'])
 def metadata_provider_get_by():
     return encode_data(g.storage.metadata_provider_get_by(**decode_request(
                                                        request)))
 
 
 @app.route('/stat/counters', methods=['GET'])
 def stat_counters():
     return encode_data(g.storage.stat_counters())
 
 
 def run_from_webserver(environ, start_response,
                        config_path=DEFAULT_CONFIG_PATH):
     """Run the WSGI app from the webserver, loading the configuration."""
     cfg = config.load_named_config(config_path, DEFAULT_CONFIG)
     app.config.update(cfg)
     handler = logging.StreamHandler()
     app.logger.addHandler(handler)
     return app(environ, start_response)
 
 
 @click.command()
 @click.argument('config-path', required=1)
 @click.option('--host', default='0.0.0.0', help="Host to run the server")
 @click.option('--port', default=5002, type=click.INT,
               help="Binding port of the server")
 @click.option('--debug/--nodebug', default=True,
               help="Indicates if the server should run in debug mode")
 def launch(config_path, host, port, debug):
     app.config.update(config.read(config_path, DEFAULT_CONFIG))
     app.run(host, port=int(port), debug=bool(debug))
 
 
 if __name__ == '__main__':
     launch()
diff --git a/swh/storage/db.py b/swh/storage/db.py
index 97064a18f..a6345b4c5 100644
--- a/swh/storage/db.py
+++ b/swh/storage/db.py
@@ -1,944 +1,985 @@
 # Copyright (C) 2015-2017  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import binascii
 import datetime
 import enum
 import functools
 import json
 import psycopg2
 import psycopg2.extras
 import select
 import tempfile
 
 from contextlib import contextmanager
 
 
 TMP_CONTENT_TABLE = 'tmp_content'
 
 
 psycopg2.extras.register_uuid()
 
 
 def stored_procedure(stored_proc):
     """decorator to execute remote stored procedure, specified as argument
 
     Generally, the body of the decorated function should be empty. If it is
     not, the stored procedure will be executed first; the function body then.
 
     """
     def wrap(meth):
         @functools.wraps(meth)
         def _meth(self, *args, **kwargs):
             cur = kwargs.get('cur', None)
             self._cursor(cur).execute('SELECT %s()' % stored_proc)
             meth(self, *args, **kwargs)
         return _meth
     return wrap
 
 
 def jsonize(value):
     """Convert a value to a psycopg2 JSON object if necessary"""
     if isinstance(value, dict):
         return psycopg2.extras.Json(value)
 
     return value
 
 
 def entry_to_bytes(entry):
     """Convert an entry coming from the database to bytes"""
     if isinstance(entry, memoryview):
         return entry.tobytes()
     if isinstance(entry, list):
         return [entry_to_bytes(value) for value in entry]
     return entry
 
 
 def line_to_bytes(line):
     """Convert a line coming from the database to bytes"""
     if not line:
         return line
     if isinstance(line, dict):
         return {k: entry_to_bytes(v) for k, v in line.items()}
     return line.__class__(entry_to_bytes(entry) for entry in line)
 
 
 def cursor_to_bytes(cursor):
     """Yield all the data from a cursor as bytes"""
     yield from (line_to_bytes(line) for line in cursor)
 
 
 class BaseDb:
     """Base class for swh.storage.*Db.
 
     cf. swh.storage.db.Db, swh.archiver.db.ArchiverDb
 
     """
 
     @classmethod
     def connect(cls, *args, **kwargs):
         """factory method to create a DB proxy
 
         Accepts all arguments of psycopg2.connect; only some specific
         possibilities are reported below.
 
         Args:
             connstring: libpq2 connection string
 
         """
         conn = psycopg2.connect(*args, **kwargs)
         return cls(conn)
 
     def _cursor(self, cur_arg):
         """get a cursor: from cur_arg if given, or a fresh one otherwise
 
         meant to avoid boilerplate if/then/else in methods that proxy stored
         procedures
 
         """
         if cur_arg is not None:
             return cur_arg
         # elif self.cur is not None:
         #     return self.cur
         else:
             return self.conn.cursor()
 
     def __init__(self, conn):
         """create a DB proxy
 
         Args:
             conn: psycopg2 connection to the SWH DB
 
         """
         self.conn = conn
 
     @contextmanager
     def transaction(self):
         """context manager to execute within a DB transaction
 
         Yields:
             a psycopg2 cursor
 
         """
         with self.conn.cursor() as cur:
             try:
                 yield cur
                 self.conn.commit()
             except:
                 if not self.conn.closed:
                     self.conn.rollback()
                 raise
 
     def copy_to(self, items, tblname, columns, cur=None, item_cb=None):
         """Copy items' entries to table tblname with columns information.
 
         Args:
             items (dict): dictionary of data to copy over tblname
             tblname (str): Destination table's name
             columns ([str]): keys to access data in items and also the
               column names in the destination table.
             item_cb (fn): optional function to apply to items's entry
 
         """
         def escape(data):
             if data is None:
                 return ''
             if isinstance(data, bytes):
                 return '\\x%s' % binascii.hexlify(data).decode('ascii')
             elif isinstance(data, str):
                 return '"%s"' % data.replace('"', '""')
             elif isinstance(data, datetime.datetime):
                 # We escape twice to make sure the string generated by
                 # isoformat gets escaped
                 return escape(data.isoformat())
             elif isinstance(data, dict):
                 return escape(json.dumps(data))
             elif isinstance(data, list):
                 return escape("{%s}" % ','.join(escape(d) for d in data))
             elif isinstance(data, psycopg2.extras.Range):
                 # We escape twice here too, so that we make sure
                 # everything gets passed to copy properly
                 return escape(
                     '%s%s,%s%s' % (
                         '[' if data.lower_inc else '(',
                         '-infinity' if data.lower_inf else escape(data.lower),
                         'infinity' if data.upper_inf else escape(data.upper),
                         ']' if data.upper_inc else ')',
                     )
                 )
             elif isinstance(data, enum.IntEnum):
                 return escape(int(data))
             else:
                 # We don't escape here to make sure we pass literals properly
                 return str(data)
         with tempfile.TemporaryFile('w+') as f:
             for d in items:
                 if item_cb is not None:
                     item_cb(d)
                 line = [escape(d.get(k)) for k in columns]
                 f.write(','.join(line))
                 f.write('\n')
             f.seek(0)
             self._cursor(cur).copy_expert('COPY %s (%s) FROM STDIN CSV' % (
                 tblname, ', '.join(columns)), f)
 
     def mktemp(self, tblname, cur=None):
         self._cursor(cur).execute('SELECT swh_mktemp(%s)', (tblname,))
 
 
 class Db(BaseDb):
     """Proxy to the SWH DB, with wrappers around stored procedures
 
     """
     def mktemp_dir_entry(self, entry_type, cur=None):
         self._cursor(cur).execute('SELECT swh_mktemp_dir_entry(%s)',
                                   (('directory_entry_%s' % entry_type),))
 
     @stored_procedure('swh_mktemp_revision')
     def mktemp_revision(self, cur=None): pass
 
     @stored_procedure('swh_mktemp_release')
     def mktemp_release(self, cur=None): pass
 
     @stored_procedure('swh_mktemp_occurrence_history')
     def mktemp_occurrence_history(self, cur=None): pass
 
+    @stored_procedure('swh_mktemp_snapshot_branch')
+    def mktemp_snapshot_branch(self, cur=None): pass
+
     @stored_procedure('swh_mktemp_entity_lister')
     def mktemp_entity_lister(self, cur=None): pass
 
     @stored_procedure('swh_mktemp_entity_history')
     def mktemp_entity_history(self, cur=None): pass
 
     @stored_procedure('swh_mktemp_bytea')
     def mktemp_bytea(self, cur=None): pass
 
     def register_listener(self, notify_queue, cur=None):
         """Register a listener for NOTIFY queue `notify_queue`"""
         self._cursor(cur).execute("LISTEN %s" % notify_queue)
 
     def listen_notifies(self, timeout):
         """Listen to notifications for `timeout` seconds"""
         if select.select([self.conn], [], [], timeout) == ([], [], []):
             return
         else:
             self.conn.poll()
             while self.conn.notifies:
                 yield self.conn.notifies.pop(0)
 
     @stored_procedure('swh_content_add')
     def content_add_from_temp(self, cur=None): pass
 
     @stored_procedure('swh_directory_add')
     def directory_add_from_temp(self, cur=None): pass
 
     @stored_procedure('swh_skipped_content_add')
     def skipped_content_add_from_temp(self, cur=None): pass
 
     @stored_procedure('swh_revision_add')
     def revision_add_from_temp(self, cur=None): pass
 
     @stored_procedure('swh_release_add')
     def release_add_from_temp(self, cur=None): pass
 
     @stored_procedure('swh_occurrence_history_add')
     def occurrence_history_add_from_temp(self, cur=None): pass
 
     @stored_procedure('swh_entity_history_add')
     def entity_history_add_from_temp(self, cur=None): pass
 
     def store_tmp_bytea(self, ids, cur=None):
         """Store the given identifiers in a new tmp_bytea table"""
         cur = self._cursor(cur)
 
         self.mktemp_bytea(cur)
         self.copy_to(({'id': elem} for elem in ids), 'tmp_bytea',
                      ['id'], cur)
 
     def content_update_from_temp(self, keys_to_update, cur=None):
         cur = self._cursor(cur)
         cur.execute("""select swh_content_update(ARRAY[%s] :: text[])""" %
                     keys_to_update)
 
     content_get_metadata_keys = [
         'sha1', 'sha1_git', 'sha256', 'blake2s256', 'length', 'status']
 
     skipped_content_keys = [
         'sha1', 'sha1_git', 'sha256', 'blake2s256',
         'length', 'reason', 'status', 'origin']
 
     def content_get_metadata_from_temp(self, cur=None):
         cur = self._cursor(cur)
         cur.execute("""select t.id as sha1, %s from tmp_bytea t
                        left join content on t.id = content.sha1
                     """ % ', '.join(self.content_get_metadata_keys[1:]))
 
         yield from cursor_to_bytes(cur)
 
     def content_missing_from_temp(self, cur=None):
         cur = self._cursor(cur)
 
         cur.execute("""SELECT sha1, sha1_git, sha256, blake2s256
                        FROM swh_content_missing()""")
 
         yield from cursor_to_bytes(cur)
 
     def content_missing_per_sha1_from_temp(self, cur=None):
         cur = self._cursor(cur)
 
         cur.execute("""SELECT *
                        FROM swh_content_missing_per_sha1()""")
 
         yield from cursor_to_bytes(cur)
 
     def skipped_content_missing_from_temp(self, cur=None):
         cur = self._cursor(cur)
 
         cur.execute("""SELECT sha1, sha1_git, sha256, blake2s256
                        FROM swh_skipped_content_missing()""")
 
         yield from cursor_to_bytes(cur)
 
     def occurrence_get(self, origin_id, cur=None):
         """Retrieve latest occurrence's information by origin_id.
 
         """
         cur = self._cursor(cur)
 
         cur.execute("""SELECT origin, branch, target, target_type,
                               (select max(date) from origin_visit
                                where origin=%s) as date
                        FROM occurrence
                        WHERE origin=%s
                     """,
                     (origin_id, origin_id))
 
         yield from cursor_to_bytes(cur)
 
+    def snapshot_exists(self, snapshot_id, cur=None):
+        """Check whether a snapshot with the given id exists"""
+        cur = self._cursor(cur)
+
+        cur.execute("""SELECT 1 FROM snapshot where id=%s""", (snapshot_id,))
+
+        return bool(cur.fetchone())
+
+    def snapshot_add(self, origin, visit, snapshot_id, cur=None):
+        """Add a snapshot for origin/visit from the temporary table"""
+        cur = self._cursor(cur)
+
+        cur.execute("""SELECT swh_snapshot_add(%s, %s, %s)""",
+                    (origin, visit, snapshot_id))
+
+    snapshot_get_cols = ['snapshot_id', 'name', 'target', 'target_type']
+
+    def snapshot_get_by_id(self, snapshot_id, cur=None):
+        cur = self._cursor(cur)
+        query = """\
+           SELECT %s FROM swh_snapshot_get_by_id(%%s)
+        """ % ', '.join(self.snapshot_get_cols)
+
+        cur.execute(query, (snapshot_id,))
+
+        yield from cursor_to_bytes(cur)
+
+    def snapshot_get_by_origin_visit(self, origin_id, visit_id, cur=None):
+        cur = self._cursor(cur)
+        query = """\
+           SELECT swh_snapshot_get_by_origin_visit(%s, %s)
+        """
+
+        cur.execute(query, (origin_id, visit_id))
+        ret = cur.fetchone()
+        if ret:
+            return line_to_bytes(ret)[0]
+
     content_find_cols = ['sha1', 'sha1_git', 'sha256', 'blake2s256', 'length',
                          'ctime', 'status']
 
     def content_find(self, sha1=None, sha1_git=None, sha256=None,
                      blake2s256=None, cur=None):
         """Find the content optionally on a combination of the following
         checksums sha1, sha1_git, sha256 or blake2s256.
 
         Args:
             sha1: sha1 content
             git_sha1: the sha1 computed `a la git` sha1 of the content
             sha256: sha256 content
             blake2s256: blake2s256 content
 
         Returns:
             The tuple (sha1, sha1_git, sha256, blake2s256) if found or None.
 
         """
         cur = self._cursor(cur)
 
         cur.execute("""SELECT %s
                        FROM swh_content_find(%%s, %%s, %%s, %%s)
                        LIMIT 1""" % ','.join(self.content_find_cols),
                     (sha1, sha1_git, sha256, blake2s256))
 
         content = line_to_bytes(cur.fetchone())
         if set(content) == {None}:
             return None
         else:
             return content
 
     def directory_get_from_temp(self, cur=None):
         cur = self._cursor(cur)
         cur.execute('''SELECT id, file_entries, dir_entries, rev_entries
                        FROM swh_directory_get()''')
         yield from cursor_to_bytes(cur)
 
     def directory_missing_from_temp(self, cur=None):
         cur = self._cursor(cur)
         cur.execute('SELECT * FROM swh_directory_missing()')
         yield from cursor_to_bytes(cur)
 
     directory_ls_cols = ['dir_id', 'type', 'target', 'name', 'perms',
                          'status', 'sha1', 'sha1_git', 'sha256', 'length']
 
     def directory_walk_one(self, directory, cur=None):
         cur = self._cursor(cur)
         cols = ', '.join(self.directory_ls_cols)
         query = 'SELECT %s FROM swh_directory_walk_one(%%s)' % cols
         cur.execute(query, (directory,))
         yield from cursor_to_bytes(cur)
 
     def directory_walk(self, directory, cur=None):
         cur = self._cursor(cur)
         cols = ', '.join(self.directory_ls_cols)
         query = 'SELECT %s FROM swh_directory_walk(%%s)' % cols
         cur.execute(query, (directory,))
         yield from cursor_to_bytes(cur)
 
     def directory_entry_get_by_path(self, directory, paths, cur=None):
         """Retrieve a directory entry by path.
 
         """
         cur = self._cursor(cur)
 
         cols = ', '.join(self.directory_ls_cols)
         query = (
             'SELECT %s FROM swh_find_directory_entry_by_path(%%s, %%s)' % cols)
         cur.execute(query, (directory, paths))
 
         data = cur.fetchone()
         if set(data) == {None}:
             return None
         return line_to_bytes(data)
 
     def revision_missing_from_temp(self, cur=None):
         cur = self._cursor(cur)
 
         cur.execute('SELECT id FROM swh_revision_missing() as r(id)')
 
         yield from cursor_to_bytes(cur)
 
     revision_add_cols = [
         'id', 'date', 'date_offset', 'date_neg_utc_offset', 'committer_date',
         'committer_date_offset', 'committer_date_neg_utc_offset', 'type',
         'directory', 'message', 'author_fullname', 'author_name',
         'author_email', 'committer_fullname', 'committer_name',
         'committer_email', 'metadata', 'synthetic',
     ]
 
     revision_get_cols = revision_add_cols + [
         'author_id', 'committer_id', 'parents']
 
     def origin_visit_add(self, origin, ts, cur=None):
         """Add a new origin_visit for origin origin at timestamp ts with
         status 'ongoing'.
 
         Args:
             origin: origin concerned by the visit
             ts: the date of the visit
 
         Returns:
             The new visit index step for that origin
 
         """
         cur = self._cursor(cur)
         self._cursor(cur).execute('SELECT swh_origin_visit_add(%s, %s)',
                                   (origin, ts))
         return cur.fetchone()[0]
 
     def origin_visit_update(self, origin, visit_id, status,
                             metadata, cur=None):
         """Update origin_visit's status."""
         cur = self._cursor(cur)
         update = """UPDATE origin_visit
                     SET status=%s, metadata=%s
                     WHERE origin=%s AND visit=%s"""
         cur.execute(update, (status, jsonize(metadata), origin, visit_id))
 
     origin_visit_get_cols = ['origin', 'visit', 'date', 'status', 'metadata']
 
     def origin_visit_get_all(self, origin_id,
                              last_visit=None, limit=None, cur=None):
         """Retrieve all visits for origin with id origin_id.
 
         Args:
             origin_id: The occurrence's origin
 
         Yields:
             The occurrence's history visits
 
         """
         cur = self._cursor(cur)
 
         if last_visit:
             extra_condition = 'and visit > %s'
             args = (origin_id, last_visit, limit)
         else:
             extra_condition = ''
             args = (origin_id, limit)
 
         query = """\
         SELECT %s
         FROM origin_visit
         WHERE origin=%%s %s
         order by date, visit asc
         limit %%s""" % (
             ', '.join(self.origin_visit_get_cols), extra_condition
         )
 
         cur.execute(query, args)
 
         yield from cursor_to_bytes(cur)
 
     def origin_visit_get(self, origin_id, visit_id, cur=None):
         """Retrieve information on visit visit_id of origin origin_id.
 
         Args:
             origin_id: the origin concerned
             visit_id: The visit step for that origin
 
         Returns:
             The origin_visit information
 
         """
         cur = self._cursor(cur)
 
         query = """\
             SELECT %s
             FROM origin_visit
             WHERE origin = %%s AND visit = %%s
             """ % (', '.join(self.origin_visit_get_cols))
 
         cur.execute(query, (origin_id, visit_id))
         r = cur.fetchall()
         if not r:
             return None
         return line_to_bytes(r[0])
 
     occurrence_cols = ['origin', 'branch', 'target', 'target_type']
 
     def occurrence_by_origin_visit(self, origin_id, visit_id, cur=None):
         """Retrieve all occurrences for a particular origin_visit.
 
         Args:
             origin_id: the origin concerned
             visit_id: The visit step for that origin
 
         Yields:
             The occurrence's history visits
 
         """
         cur = self._cursor(cur)
 
         query = """\
             SELECT %s
             FROM swh_occurrence_by_origin_visit(%%s, %%s)
             """ % (', '.join(self.occurrence_cols))
 
         cur.execute(query, (origin_id, visit_id))
         yield from cursor_to_bytes(cur)
 
     def revision_get_from_temp(self, cur=None):
         cur = self._cursor(cur)
         query = 'SELECT %s FROM swh_revision_get()' % (
             ', '.join(self.revision_get_cols))
         cur.execute(query)
         yield from cursor_to_bytes(cur)
 
     def revision_log(self, root_revisions, limit=None, cur=None):
         cur = self._cursor(cur)
 
         query = """SELECT %s
                    FROM swh_revision_log(%%s, %%s)
                 """ % ', '.join(self.revision_get_cols)
 
         cur.execute(query, (root_revisions, limit))
         yield from cursor_to_bytes(cur)
 
     revision_shortlog_cols = ['id', 'parents']
 
     def revision_shortlog(self, root_revisions, limit=None, cur=None):
         cur = self._cursor(cur)
 
         query = """SELECT %s
                    FROM swh_revision_list(%%s, %%s)
                 """ % ', '.join(self.revision_shortlog_cols)
 
         cur.execute(query, (root_revisions, limit))
         yield from cursor_to_bytes(cur)
 
     def release_missing_from_temp(self, cur=None):
         cur = self._cursor(cur)
         cur.execute('SELECT id FROM swh_release_missing() as r(id)')
         yield from cursor_to_bytes(cur)
 
     object_find_by_sha1_git_cols = ['sha1_git', 'type', 'id', 'object_id']
 
     def object_find_by_sha1_git(self, ids, cur=None):
         cur = self._cursor(cur)
 
         self.store_tmp_bytea(ids, cur)
         query = 'select %s from swh_object_find_by_sha1_git()' % (
             ', '.join(self.object_find_by_sha1_git_cols)
         )
         cur.execute(query)
 
         yield from cursor_to_bytes(cur)
 
     def stat_counters(self, cur=None):
         cur = self._cursor(cur)
         cur.execute('SELECT * FROM swh_stat_counters()')
         yield from cur
 
     fetch_history_cols = ['origin', 'date', 'status', 'result', 'stdout',
                           'stderr', 'duration']
 
     def create_fetch_history(self, fetch_history, cur=None):
         """Create a fetch_history entry with the data in fetch_history"""
         cur = self._cursor(cur)
         query = '''INSERT INTO fetch_history (%s)
                    VALUES (%s) RETURNING id''' % (
             ','.join(self.fetch_history_cols),
             ','.join(['%s'] * len(self.fetch_history_cols))
         )
         cur.execute(query, [fetch_history.get(col) for col in
                             self.fetch_history_cols])
 
         return cur.fetchone()[0]
 
     def get_fetch_history(self, fetch_history_id, cur=None):
         """Get a fetch_history entry with the given id"""
         cur = self._cursor(cur)
         query = '''SELECT %s FROM fetch_history WHERE id=%%s''' % (
             ', '.join(self.fetch_history_cols),
         )
         cur.execute(query, (fetch_history_id,))
 
         data = cur.fetchone()
 
         if not data:
             return None
 
         ret = {'id': fetch_history_id}
         for i, col in enumerate(self.fetch_history_cols):
             ret[col] = data[i]
 
         return ret
 
     def update_fetch_history(self, fetch_history, cur=None):
         """Update the fetch_history entry from the data in fetch_history"""
         cur = self._cursor(cur)
         query = '''UPDATE fetch_history
                    SET %s
                    WHERE id=%%s''' % (
             ','.join('%s=%%s' % col for col in self.fetch_history_cols)
         )
         cur.execute(query, [jsonize(fetch_history.get(col)) for col in
                             self.fetch_history_cols + ['id']])
 
     base_entity_cols = ['uuid', 'parent', 'name', 'type',
                         'description', 'homepage', 'active',
                         'generated', 'lister_metadata',
                         'metadata']
 
     entity_cols = base_entity_cols + ['last_seen', 'last_id']
     entity_history_cols = base_entity_cols + ['id', 'validity']
 
     def origin_add(self, type, url, cur=None):
         """Insert a new origin and return the new identifier."""
         insert = """INSERT INTO origin (type, url) values (%s, %s)
                     RETURNING id"""
 
         cur.execute(insert, (type, url))
         return cur.fetchone()[0]
 
     origin_cols = ['id', 'type', 'url', 'lister', 'project']
 
     def origin_get_with(self, type, url, cur=None):
         """Retrieve the origin id from its type and url if found."""
         cur = self._cursor(cur)
 
         query = """SELECT %s
                    FROM origin
                    WHERE type=%%s AND url=%%s
                 """ % ','.join(self.origin_cols)
 
         cur.execute(query, (type, url))
         data = cur.fetchone()
         if data:
             return line_to_bytes(data)
         return None
 
     def origin_get(self, id, cur=None):
         """Retrieve the origin per its identifier.
 
         """
         cur = self._cursor(cur)
 
         query = """SELECT %s
                    FROM origin WHERE id=%%s
                 """ % ','.join(self.origin_cols)
 
         cur.execute(query, (id,))
         data = cur.fetchone()
         if data:
             return line_to_bytes(data)
         return None
 
     def origin_search(self, url_pattern, offset=0, limit=50,
                       regexp=False, cur=None):
         """Search for origins whose urls contain a provided string pattern
         or match a provided regular expression.
         The search is performed in a case insensitive way.
 
         Args:
             url_pattern: the string pattern to search for in origin urls
             offset: number of found origins to skip before returning results
             limit: the maximum number of found origins to return
             regexp: if True, consider the provided pattern as a regular
                 expression and returns origins whose urls match it
 
         """
         cur = self._cursor(cur)
         origin_cols = ','.join(self.origin_cols)
         query = """SELECT %s
                    FROM origin WHERE url %s %%s
                    ORDER BY id
                    OFFSET %%s LIMIT %%s"""
 
         if not regexp:
             query = query % (origin_cols, 'ILIKE')
             query_params = ('%'+url_pattern+'%', offset, limit)
         else:
             query = query % (origin_cols, '~*')
             query_params = (url_pattern, offset, limit)
 
         cur.execute(query, query_params)
         yield from cursor_to_bytes(cur)
 
     person_cols = ['fullname', 'name', 'email']
     person_get_cols = person_cols + ['id']
 
     def person_add(self, person, cur=None):
         """Add a person identified by its name and email.
 
         Returns:
             The new person's id
 
         """
         cur = self._cursor(cur)
 
         query_new_person = '''\
         INSERT INTO person(%s)
         VALUES (%s)
         RETURNING id''' % (
             ', '.join(self.person_cols),
             ', '.join('%s' for i in range(len(self.person_cols)))
         )
         cur.execute(query_new_person,
                     [person[col] for col in self.person_cols])
         return cur.fetchone()[0]
 
     def person_get(self, ids, cur=None):
         """Retrieve the persons identified by the list of ids.
 
         """
         cur = self._cursor(cur)
 
         query = """SELECT %s
                    FROM person
                    WHERE id IN %%s""" % ', '.join(self.person_get_cols)
 
         cur.execute(query, (tuple(ids),))
         yield from cursor_to_bytes(cur)
 
     release_add_cols = [
         'id', 'target', 'target_type', 'date', 'date_offset',
         'date_neg_utc_offset', 'name', 'comment', 'synthetic',
         'author_fullname', 'author_name', 'author_email',
     ]
     release_get_cols = release_add_cols + ['author_id']
 
     def release_get_from_temp(self, cur=None):
         cur = self._cursor(cur)
         query = '''
         SELECT %s
             FROM swh_release_get()
         ''' % ', '.join(self.release_get_cols)
         cur.execute(query)
         yield from cursor_to_bytes(cur)
 
     def release_get_by(self,
                        origin_id,
                        limit=None,
                        cur=None):
         """Retrieve a release by occurrence criterion (only origin right now)
 
         Args:
             - origin_id: The origin to look for.
 
         """
         cur = self._cursor(cur)
         query = """
         SELECT %s
             FROM swh_release_get_by(%%s)
             LIMIT %%s
         """ % ', '.join(self.release_get_cols)
         cur.execute(query, (origin_id, limit))
         yield from cursor_to_bytes(cur)
 
     def revision_get_by(self,
                         origin_id,
                         branch_name,
                         datetime,
                         limit=None,
                         cur=None):
         """Retrieve a revision by occurrence criterion.
 
         Args:
             - origin_id: The origin to look for
             - branch_name: the branch name to look for
             - datetime: the lower bound of timerange to look for.
             - limit: limit number of results to return
             The upper bound being now.
         """
         cur = self._cursor(cur)
         if branch_name and isinstance(branch_name, str):
             branch_name = branch_name.encode('utf-8')
 
         query = '''
         SELECT %s
             FROM swh_revision_get_by(%%s, %%s, %%s)
             LIMIT %%s
         ''' % ', '.join(self.revision_get_cols)
 
         cur.execute(query, (origin_id, branch_name, datetime, limit))
         yield from cursor_to_bytes(cur)
 
     def entity_get(self, uuid, cur=None):
         """Retrieve the entity and its parent hierarchy chain per uuid.
 
         """
         cur = self._cursor(cur)
         cur.execute("""SELECT %s
                        FROM swh_entity_get(%%s)""" % (
                            ', '.join(self.entity_cols)),
                     (uuid, ))
         yield from cursor_to_bytes(cur)
 
     def entity_get_one(self, uuid, cur=None):
         """Retrieve a single entity given its uuid.
 
         """
         cur = self._cursor(cur)
         cur.execute("""SELECT %s
                        FROM entity
                        WHERE uuid = %%s""" % (
                            ', '.join(self.entity_cols)),
                     (uuid, ))
         data = cur.fetchone()
         if not data:
             return None
         return line_to_bytes(data)
 
     def origin_metadata_add(self, origin, ts, provider, tool,
                             metadata, cur=None):
         """ Add an origin_metadata for the origin at ts with provider, tool and
         metadata.
 
         Args:
             origin (int): the origin's id for which the metadata is added
             ts (datetime): time when the metadata was found
             provider (int): the metadata provider identifier
             tool (int): the tool's identifier used to extract metadata
             metadata (jsonb): the metadata retrieved at the time and location
 
         Returns:
             id (int): the origin_metadata unique id
 
         """
         cur = self._cursor(cur)
         insert = """INSERT INTO origin_metadata (origin_id, discovery_date,
                     provider_id, tool_id, metadata) values (%s, %s, %s, %s, %s)
                     RETURNING id"""
         cur.execute(insert, (origin, ts, provider, tool, jsonize(metadata)))
 
         return cur.fetchone()[0]
 
     origin_metadata_get_cols = ['id', 'origin_id', 'discovery_date',
                                 'tool_id', 'metadata', 'provider_id',
                                 'provider_name', 'provider_type',
                                 'provider_url']
 
     def origin_metadata_get_by(self, origin_id, provider_type=None, cur=None):
         """Retrieve all origin_metadata entries for one origin_id
 
         """
         cur = self._cursor(cur)
         if not provider_type:
             query = '''SELECT %s
                        FROM swh_origin_metadata_get_by_origin(
                             %%s)''' % (','.join(
                                           self.origin_metadata_get_cols))
 
             cur.execute(query, (origin_id, ))
 
         else:
             query = '''SELECT %s
                        FROM swh_origin_metadata_get_by_provider_type(
                             %%s, %%s)''' % (','.join(
                                           self.origin_metadata_get_cols))
 
             cur.execute(query, (origin_id, provider_type))
 
         yield from cursor_to_bytes(cur)
 
     tool_cols = ['id', 'name', 'version', 'configuration']
 
     @stored_procedure('swh_mktemp_tool')
     def mktemp_tool(self, cur=None):
         pass
 
     def tool_add_from_temp(self, cur=None):
         cur = self._cursor(cur)
         cur.execute("SELECT %s from swh_tool_add()" % (
             ','.join(self.tool_cols), ))
         yield from cursor_to_bytes(cur)
 
     def tool_get(self, name, version, configuration, cur=None):
         cur = self._cursor(cur)
         cur.execute('''select %s
                        from tool
                        where name=%%s and
                              version=%%s and
                              configuration=%%s''' % (
                                  ','.join(self.tool_cols)),
                     (name, version, configuration))
 
         data = cur.fetchone()
         if not data:
             return None
         return line_to_bytes(data)
 
     metadata_provider_cols = ['id', 'provider_name', 'provider_type',
                               'provider_url', 'metadata']
 
     def metadata_provider_add(self, provider_name, provider_type,
                               provider_url, metadata, cur=None):
         """Insert a new provider and return the new identifier."""
         cur = self._cursor(cur)
         insert = """INSERT INTO metadata_provider (provider_name, provider_type,
                     provider_url, metadata) values (%s, %s, %s, %s)
                     RETURNING id"""
 
         cur.execute(insert, (provider_name, provider_type, provider_url,
                     jsonize(metadata)))
         return cur.fetchone()[0]
 
     def metadata_provider_get(self, provider_id, cur=None):
         cur = self._cursor(cur)
         cur.execute('''select %s
                        from metadata_provider
                        where provider_id=%%s ''' % (
                                  ','.join(self.metadata_provider_cols)),
                     (provider_id, ))
 
         data = cur.fetchone()
         if not data:
             return None
         return line_to_bytes(data)
 
     def metadata_provider_get_by(self, provider_name, provider_url,
                                  cur=None):
         cur = self._cursor(cur)
         cur.execute('''select %s
                        from metadata_provider
                        where provider_name=%%s and
                              provider_url=%%s''' % (
                                  ','.join(self.metadata_provider_cols)),
                     (provider_name, provider_url))
 
         data = cur.fetchone()
         if not data:
             return None
         return line_to_bytes(data)
diff --git a/swh/storage/storage.py b/swh/storage/storage.py
index 0e2bef867..fdd73a99d 100644
--- a/swh/storage/storage.py
+++ b/swh/storage/storage.py
@@ -1,1424 +1,1552 @@
 # Copyright (C) 2015-2017  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 
 from collections import defaultdict
 import datetime
 import itertools
 import json
 import dateutil.parser
 import psycopg2
 
 from . import converters
 from .common import db_transaction_generator, db_transaction
 from .db import Db
 from .exc import StorageDBError
 
 from swh.model.hashutil import ALGORITHMS
 from swh.objstorage import get_objstorage
 from swh.objstorage.exc import ObjNotFoundError
 
 # Max block size of contents to return
 BULK_BLOCK_CONTENT_LEN_MAX = 10000
 
 
 CONTENT_HASH_KEYS = ['sha1', 'sha1_git', 'sha256', 'blake2s256']
 
 
 class Storage():
     """SWH storage proxy, encompassing DB and object storage
 
     """
 
     def __init__(self, db, objstorage):
         """
         Args:
             db_conn: either a libpq connection string, or a psycopg2 connection
             obj_root: path to the root of the object storage
 
         """
         try:
             if isinstance(db, psycopg2.extensions.connection):
                 self.db = Db(db)
             else:
                 self.db = Db.connect(db)
         except psycopg2.OperationalError as e:
             raise StorageDBError(e)
 
         self.objstorage = get_objstorage(**objstorage)
 
     def check_config(self, *, check_write):
         """Check that the storage is configured and ready to go."""
 
         if not self.objstorage.check_config(check_write=check_write):
             return False
 
         # Check permissions on one of the tables
         with self.db.transaction() as cur:
             if check_write:
                 check = 'INSERT'
             else:
                 check = 'SELECT'
 
             cur.execute(
                 "select has_table_privilege(current_user, 'content', %s)",
                 (check,)
             )
             return cur.fetchone()[0]
 
         return True
 
     def content_add(self, content):
         """Add content blobs to the storage
 
         Note: in case of DB errors, objects might have already been added to
         the object storage and will not be removed. Since addition to the
         object storage is idempotent, that should not be a problem.
 
         Args:
             content (iterable): iterable of dictionaries representing
                 individual pieces of content to add. Each dictionary has the
                 following keys:
 
                 - data (bytes): the actual content
                 - length (int): content length (default: -1)
                 - one key for each checksum algorithm in
                   :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
                   corresponding checksum
                 - status (str): one of visible, hidden, absent
                 - reason (str): if status = absent, the reason why
                 - origin (int): if status = absent, the origin we saw the
                   content in
 
         """
         db = self.db
 
         def _unique_key(hash, keys=CONTENT_HASH_KEYS):
             """Given a hash (tuple or dict), return a unique key from the
                aggregation of keys.
 
             """
             if isinstance(hash, tuple):
                 return hash
             return tuple([hash[k] for k in keys])
 
         content_by_status = defaultdict(list)
         for d in content:
             if 'status' not in d:
                 d['status'] = 'visible'
             if 'length' not in d:
                 d['length'] = -1
             content_by_status[d['status']].append(d)
 
         content_with_data = content_by_status['visible']
         content_without_data = content_by_status['absent']
 
         missing_content = set(self.content_missing(content_with_data))
         missing_skipped = set(_unique_key(hashes) for hashes
                               in self.skipped_content_missing(
                                   content_without_data))
 
         with db.transaction() as cur:
             if missing_content:
                 # create temporary table for metadata injection
                 db.mktemp('content', cur)
 
                 def add_to_objstorage(cont):
                     self.objstorage.add(cont['data'],
                                         obj_id=cont['sha1'])
 
                 content_filtered = (cont for cont in content_with_data
                                     if cont['sha1'] in missing_content)
 
                 db.copy_to(content_filtered, 'tmp_content',
                            db.content_get_metadata_keys,
                            cur, item_cb=add_to_objstorage)
 
                 # move metadata in place
                 db.content_add_from_temp(cur)
 
             if missing_skipped:
                 missing_filtered = (cont for cont in content_without_data
                                     if _unique_key(cont) in missing_skipped)
 
                 db.mktemp('skipped_content', cur)
                 db.copy_to(missing_filtered, 'tmp_skipped_content',
                            db.skipped_content_keys, cur)
 
                 # move metadata in place
                 db.skipped_content_add_from_temp(cur)
 
     @db_transaction
     def content_update(self, content, keys=[], cur=None):
         """Update content blobs to the storage. Does nothing for unknown
         contents or skipped ones.
 
         Args:
             content (iterable): iterable of dictionaries representing
                 individual pieces of content to update. Each dictionary has the
                 following keys:
 
                 - data (bytes): the actual content
                 - length (int): content length (default: -1)
                 - one key for each checksum algorithm in
                   :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
                   corresponding checksum
                 - status (str): one of visible, hidden, absent
 
             keys (list): List of keys (str) whose values needs an update, e.g.,
                 new hash column
 
         """
         db = self.db
 
         # TODO: Add a check on input keys. How to properly implement
         # this? We don't know yet the new columns.
 
         db.mktemp('content')
         select_keys = list(set(db.content_get_metadata_keys).union(set(keys)))
         db.copy_to(content, 'tmp_content', select_keys, cur)
         db.content_update_from_temp(keys_to_update=keys,
                                     cur=cur)
 
     def content_get(self, content):
         """Retrieve in bulk contents and their data.
 
         Args:
             content: iterables of sha1
 
         Yields:
             dict: Generates streams of contents as dict with their raw data:
 
                 - sha1: sha1's content
                 - data: bytes data of the content
 
         Raises:
             ValueError in case of too much contents are required.
             cf. BULK_BLOCK_CONTENT_LEN_MAX
 
         """
         # FIXME: Improve on server module to slice the result
         if len(content) > BULK_BLOCK_CONTENT_LEN_MAX:
             raise ValueError(
                 "Send at maximum %s contents." % BULK_BLOCK_CONTENT_LEN_MAX)
 
         for obj_id in content:
             try:
                 data = self.objstorage.get(obj_id)
             except ObjNotFoundError:
                 yield None
                 continue
 
             yield {'sha1': obj_id, 'data': data}
 
     @db_transaction_generator
     def content_get_metadata(self, content, cur=None):
         """Retrieve content metadata in bulk
 
         Args:
             content: iterable of content identifiers (sha1)
 
         Returns:
             an iterable with content metadata corresponding to the given ids
         """
         db = self.db
 
         db.store_tmp_bytea(content, cur)
 
         for content_metadata in db.content_get_metadata_from_temp(cur):
             yield dict(zip(db.content_get_metadata_keys, content_metadata))
 
     @db_transaction_generator
     def content_missing(self, content, key_hash='sha1', cur=None):
         """List content missing from storage
 
         Args:
             content: iterable of dictionaries containing one key for each
                 checksum algorithm in :data:`swh.model.hashutil.ALGORITHMS`,
                 mapped to the corresponding checksum, and a length key mapped
                 to the content length.
             key_hash: the name of the hash used as key (default: 'sha1')
 
         Returns:
             iterable: missing ids
 
         Raises:
             TODO: an exception when we get a hash collision.
 
         """
         db = self.db
 
         keys = CONTENT_HASH_KEYS
 
         if key_hash not in CONTENT_HASH_KEYS:
             raise ValueError("key_hash should be one of %s" % keys)
 
         key_hash_idx = keys.index(key_hash)
 
         # Create temporary table for metadata injection
         db.mktemp('content', cur)
 
         db.copy_to(content, 'tmp_content', keys + ['length'], cur)
 
         for obj in db.content_missing_from_temp(cur):
             yield obj[key_hash_idx]
 
     @db_transaction_generator
     def content_missing_per_sha1(self, contents, cur=None):
         """List content missing from storage based only on sha1.
 
         Args:
             contents: Iterable of sha1 to check for absence.
 
         Returns:
             iterable: missing ids
 
         Raises:
             TODO: an exception when we get a hash collision.
 
         """
         db = self.db
 
         db.store_tmp_bytea(contents, cur)
         for obj in db.content_missing_per_sha1_from_temp(cur):
             yield obj[0]
 
     @db_transaction_generator
     def skipped_content_missing(self, content, cur=None):
         """List skipped_content missing from storage
 
         Args:
             content: iterable of dictionaries containing the data for each
                 checksum algorithm.
 
         Returns:
             iterable: missing signatures
 
         """
         keys = CONTENT_HASH_KEYS
 
         db = self.db
 
         db.mktemp('skipped_content', cur)
         db.copy_to(content, 'tmp_skipped_content',
                    keys + ['length', 'reason'], cur)
 
         yield from db.skipped_content_missing_from_temp(cur)
 
     @db_transaction
     def content_find(self, content, cur=None):
         """Find a content hash in db.
 
         Args:
             content: a dictionary representing one content hash, mapping
                 checksum algorithm names (see swh.model.hashutil.ALGORITHMS) to
                 checksum values
 
         Returns:
             a triplet (sha1, sha1_git, sha256) if the content exist
             or None otherwise.
 
         Raises:
             ValueError: in case the key of the dictionary is not sha1, sha1_git
                 nor sha256.
 
         """
         db = self.db
 
         if not set(content).intersection(ALGORITHMS):
             raise ValueError('content keys must contain at least one of: '
                              'sha1, sha1_git, sha256, blake2s256')
 
         c = db.content_find(sha1=content.get('sha1'),
                             sha1_git=content.get('sha1_git'),
                             sha256=content.get('sha256'),
                             blake2s256=content.get('blake2s256'),
                             cur=cur)
         if c:
             return dict(zip(db.content_find_cols, c))
         return None
 
     def directory_add(self, directories):
         """Add directories to the storage
 
         Args:
             directories (iterable): iterable of dictionaries representing the
                 individual directories to add. Each dict has the following
                 keys:
 
                 - id (sha1_git): the id of the directory to add
                 - entries (list): list of dicts for each entry in the
                       directory.  Each dict has the following keys:
 
                       - name (bytes)
                       - type (one of 'file', 'dir', 'rev'): type of the
                         directory entry (file, directory, revision)
                       - target (sha1_git): id of the object pointed at by the
                         directory entry
                       - perms (int): entry permissions
         """
         dirs = set()
         dir_entries = {
             'file': defaultdict(list),
             'dir': defaultdict(list),
             'rev': defaultdict(list),
         }
 
         for cur_dir in directories:
             dir_id = cur_dir['id']
             dirs.add(dir_id)
             for src_entry in cur_dir['entries']:
                 entry = src_entry.copy()
                 entry['dir_id'] = dir_id
                 dir_entries[entry['type']][dir_id].append(entry)
 
         dirs_missing = set(self.directory_missing(dirs))
         if not dirs_missing:
             return
 
         db = self.db
         with db.transaction() as cur:
             # Copy directory ids
             dirs_missing_dict = ({'id': dir} for dir in dirs_missing)
             db.mktemp('directory', cur)
             db.copy_to(dirs_missing_dict, 'tmp_directory', ['id'], cur)
 
             # Copy entries
             for entry_type, entry_list in dir_entries.items():
                 entries = itertools.chain.from_iterable(
                     entries_for_dir
                     for dir_id, entries_for_dir
                     in entry_list.items()
                     if dir_id in dirs_missing)
 
                 db.mktemp_dir_entry(entry_type)
 
                 db.copy_to(
                     entries,
                     'tmp_directory_entry_%s' % entry_type,
                     ['target', 'name', 'perms', 'dir_id'],
                     cur,
                 )
 
             # Do the final copy
             db.directory_add_from_temp(cur)
 
     @db_transaction_generator
     def directory_missing(self, directories, cur):
         """List directories missing from storage
 
         Args:
             directories (iterable): an iterable of directory ids
 
         Yields:
             missing directory ids
 
         """
         db = self.db
 
         # Create temporary table for metadata injection
         db.mktemp('directory', cur)
 
         directories_dicts = ({'id': dir} for dir in directories)
 
         db.copy_to(directories_dicts, 'tmp_directory', ['id'], cur)
 
         for obj in db.directory_missing_from_temp(cur):
             yield obj[0]
 
     @db_transaction_generator
     def directory_get(self,
                       directories,
                       cur=None):
         """Get information on directories.
 
         Args:
             - directories: an iterable of directory ids
 
         Returns:
             List of directories as dict with keys and associated values.
 
         """
         db = self.db
         keys = ('id', 'dir_entries', 'file_entries', 'rev_entries')
 
         db.mktemp('directory', cur)
         db.copy_to(({'id': dir_id} for dir_id in directories),
                    'tmp_directory', ['id'], cur)
 
         dirs = db.directory_get_from_temp(cur)
         for line in dirs:
             yield dict(zip(keys, line))
 
     @db_transaction_generator
     def directory_ls(self, directory, recursive=False, cur=None):
         """Get entries for one directory.
 
         Args:
             - directory: the directory to list entries from.
             - recursive: if flag on, this list recursively from this directory.
 
         Returns:
             List of entries for such directory.
 
         """
         db = self.db
 
         if recursive:
             res_gen = db.directory_walk(directory, cur=cur)
         else:
             res_gen = db.directory_walk_one(directory, cur=cur)
 
         for line in res_gen:
             yield dict(zip(db.directory_ls_cols, line))
 
     @db_transaction
     def directory_entry_get_by_path(self, directory, paths, cur=None):
         """Get the directory entry (either file or dir) from directory with path.
 
         Args:
             - directory: sha1 of the top level directory
             - paths: path to lookup from the top level directory. From left
               (top) to right (bottom).
 
         Returns:
             The corresponding directory entry if found, None otherwise.
 
         """
         db = self.db
 
         res = db.directory_entry_get_by_path(directory, paths, cur)
         if res:
             return dict(zip(db.directory_ls_cols, res))
 
     def revision_add(self, revisions):
         """Add revisions to the storage
 
         Args:
             revisions (iterable): iterable of dictionaries representing the
                 individual revisions to add. Each dict has the following keys:
 
                 - id (sha1_git): id of the revision to add
                 - date (datetime.DateTime): date the revision was written
                 - date_offset (int): offset from UTC in minutes the revision
                   was written
                 - date_neg_utc_offset (boolean): whether a null date_offset
                   represents a negative UTC offset
                 - committer_date (datetime.DateTime): date the revision got
                   added to the origin
                 - committer_date_offset (int): offset from UTC in minutes the
                   revision was added to the origin
                 - committer_date_neg_utc_offset (boolean): whether a null
                   committer_date_offset represents a negative UTC offset
                 - type (one of 'git', 'tar'): type of the revision added
                 - directory (sha1_git): the directory the revision points at
                 - message (bytes): the message associated with the revision
                 - author_name (bytes): the name of the revision author
                 - author_email (bytes): the email of the revision author
                 - committer_name (bytes): the name of the revision committer
                 - committer_email (bytes): the email of the revision committer
                 - metadata (jsonb): extra information as dictionary
                 - synthetic (bool): revision's nature (tarball, directory
                   creates synthetic revision)
                 - parents (list of sha1_git): the parents of this revision
 
         """
         db = self.db
 
         revisions_missing = set(self.revision_missing(
             set(revision['id'] for revision in revisions)))
 
         if not revisions_missing:
             return
 
         with db.transaction() as cur:
             db.mktemp_revision(cur)
 
             revisions_filtered = (
                 converters.revision_to_db(revision) for revision in revisions
                 if revision['id'] in revisions_missing)
 
             parents_filtered = []
 
             db.copy_to(
                 revisions_filtered, 'tmp_revision', db.revision_add_cols,
                 cur,
                 lambda rev: parents_filtered.extend(rev['parents']))
 
             db.revision_add_from_temp(cur)
 
             db.copy_to(parents_filtered, 'revision_history',
                        ['id', 'parent_id', 'parent_rank'], cur)
 
     @db_transaction_generator
     def revision_missing(self, revisions, cur=None):
         """List revisions missing from storage
 
         Args:
             revisions (iterable): revision ids
 
         Yields:
             missing revision ids
 
         """
         db = self.db
 
         db.store_tmp_bytea(revisions, cur)
 
         for obj in db.revision_missing_from_temp(cur):
             yield obj[0]
 
     @db_transaction_generator
     def revision_get(self, revisions, cur):
         """Get all revisions from storage
 
         Args:
             revisions: an iterable of revision ids
 
         Returns:
             iterable: an iterable of revisions as dictionaries (or None if the
                 revision doesn't exist)
 
         """
 
         db = self.db
 
         db.store_tmp_bytea(revisions, cur)
 
         for line in self.db.revision_get_from_temp(cur):
             data = converters.db_to_revision(
                 dict(zip(db.revision_get_cols, line))
             )
             if not data['type']:
                 yield None
                 continue
             yield data
 
     @db_transaction_generator
     def revision_log(self, revisions, limit=None, cur=None):
         """Fetch revision entry from the given root revisions.
 
         Args:
             revisions: array of root revision to lookup
             limit: limitation on the output result. Default to None.
 
         Yields:
             List of revision log from such revisions root.
 
         """
         db = self.db
 
         for line in db.revision_log(revisions, limit, cur):
             data = converters.db_to_revision(
                 dict(zip(db.revision_get_cols, line))
             )
             if not data['type']:
                 yield None
                 continue
             yield data
 
     @db_transaction_generator
     def revision_shortlog(self, revisions, limit=None, cur=None):
         """Fetch the shortlog for the given revisions
 
         Args:
             revisions: list of root revisions to lookup
             limit: depth limitation for the output
 
         Yields:
             a list of (id, parents) tuples.
 
         """
 
         db = self.db
 
         yield from db.revision_shortlog(revisions, limit, cur)
 
     @db_transaction_generator
     def revision_log_by(self, origin_id, branch_name=None, timestamp=None,
                         limit=None, cur=None):
         """Fetch revision entry from the actual origin_id's latest revision.
 
         Args:
             origin_id: the origin id from which deriving the revision
             branch_name: (optional) occurrence's branch name
             timestamp: (optional) occurrence's time
             limit: (optional) depth limitation for the
                 output. Default to None.
 
         Yields:
             The revision log starting from the revision derived from
             the (origin, branch_name, timestamp) combination if any.
 
         Returns:
             None if no revision matching this combination is found.
 
         """
         db = self.db
 
         # Retrieve the revision by criterion
         revisions = list(db.revision_get_by(
             origin_id, branch_name, timestamp, limit=1))
 
         if not revisions:
             return None
 
         revision_id = revisions[0][0]
         # otherwise, retrieve the revision log from that revision
         yield from self.revision_log([revision_id], limit)
 
     def release_add(self, releases):
         """Add releases to the storage
 
         Args:
             releases (iterable): iterable of dictionaries representing the
                 individual releases to add. Each dict has the following keys:
 
                 - id (sha1_git): id of the release to add
                 - revision (sha1_git): id of the revision the release points to
                 - date (datetime.DateTime): the date the release was made
                 - date_offset (int): offset from UTC in minutes the release was
                   made
                 - date_neg_utc_offset (boolean): whether a null date_offset
                   represents a negative UTC offset
                 - name (bytes): the name of the release
                 - comment (bytes): the comment associated with the release
                 - author_name (bytes): the name of the release author
                 - author_email (bytes): the email of the release author
 
         """
         db = self.db
 
         release_ids = set(release['id'] for release in releases)
         releases_missing = set(self.release_missing(release_ids))
 
         if not releases_missing:
             return
 
         with db.transaction() as cur:
             db.mktemp_release(cur)
 
             releases_filtered = (
                 converters.release_to_db(release) for release in releases
                 if release['id'] in releases_missing
             )
 
             db.copy_to(releases_filtered, 'tmp_release', db.release_add_cols,
                        cur)
 
             db.release_add_from_temp(cur)
 
     @db_transaction_generator
     def release_missing(self, releases, cur=None):
         """List releases missing from storage
 
         Args:
             releases: an iterable of release ids
 
         Returns:
             a list of missing release ids
 
         """
         db = self.db
 
         # Create temporary table for metadata injection
         db.store_tmp_bytea(releases, cur)
 
         for obj in db.release_missing_from_temp(cur):
             yield obj[0]
 
     @db_transaction_generator
     def release_get(self, releases, cur=None):
         """Given a list of sha1, return the releases's information
 
         Args:
             releases: list of sha1s
 
         Yields:
             releases: list of releases as dicts with the following keys:
 
             - id: origin's id
             - revision: origin's type
             - url: origin's url
             - lister: lister's uuid
             - project: project's uuid (FIXME, retrieve this information)
 
         Raises:
             ValueError: if the keys does not match (url and type) nor id.
 
         """
         db = self.db
 
         # Create temporary table for metadata injection
         db.store_tmp_bytea(releases, cur)
 
         for release in db.release_get_from_temp(cur):
             yield converters.db_to_release(
                 dict(zip(db.release_get_cols, release))
             )
 
+    @db_transaction
+    def snapshot_add(self, origin, visit, snapshot, cur=None):
+        """Add a snapshot for the given origin/visit couple
+
+        Args:
+            origin (int): id of the origin
+            visit (int): id of the visit
+            snapshot (dict): the snapshot to add to the visit, containing the
+              following keys:
+
+              - **id** (:class:`bytes`): id of the snapshot
+              - **branches** (:class:`dict`): branches the snapshot contains,
+                mapping the branch name (:class:`bytes`) to the branch target,
+                itself a :class:`dict` (or ``None`` if the branch points to an
+                unknown object)
+
+                - **target_type** (:class:`str`): one of ``content``,
+                  ``directory``, ``revision``, ``release``,
+                  ``snapshot``, ``alias``
+                - **target** (:class:`bytes`): identifier of the target
+                  (currently a ``sha1_git`` for all object kinds, or the name
+                  of the target branch for aliases)
+        """
+        db = self.db
+
+        if not db.snapshot_exists(snapshot['id'], cur):
+            db.mktemp_snapshot_branch(cur)
+            db.copy_to(
+                (
+                    {
+                        'name': name,
+                        'target': info['target'] if info else None,
+                        'target_type': info['target_type'] if info else None,
+                    }
+                    for name, info in snapshot['branches'].items()
+                ),
+                'tmp_snapshot_branch',
+                ['name', 'target', 'target_type'],
+                cur,
+            )
+
+        db.snapshot_add(origin, visit, snapshot['id'], cur)
+
+        # TODO: drop this compat feature
+        occurrences = []
+        for name, info in snapshot['branches'].items():
+            if not info:
+                target = b'\x00' * 20
+                target_type = 'revision'
+            elif info['target_type'] == 'alias':
+                continue
+            else:
+                target = info['target']
+                target_type = info['target_type']
+
+            occurrences.append({
+                'origin': origin,
+                'visit': visit,
+                'branch': name,
+                'target': target,
+                'target_type': target_type,
+            })
+
+        self.occurrence_add(occurrences)
+
+    @db_transaction
+    def snapshot_get(self, snapshot_id, cur=None):
+        """Get the snapshot with the given id
+
+        Args:
+           snapshot_id (bytes): id of the snapshot
+        Returns:
+           dict: a snapshot with two keys:
+             id:: identifier for the snapshot
+             branches:: a list of branches contained by the snapshot
+
+        """
+        db = self.db
+
+        branches = {}
+        for branch in db.snapshot_get_by_id(snapshot_id, cur):
+            branch = dict(zip(db.snapshot_get_cols, branch))
+            del branch['snapshot_id']
+            name = branch.pop('name')
+            if branch == {'target': None, 'target_type': None}:
+                branch = None
+            branches[name] = branch
+
+        if branches:
+            return {'id': snapshot_id, 'branches': branches}
+
+        if db.snapshot_exists(snapshot_id, cur):
+            # empty snapshot
+            return {'id': snapshot_id, 'branches': {}}
+
+        return None
+
+    @db_transaction
+    def snapshot_get_by_origin_visit(self, origin, visit, cur=None):
+        """Get the snapshot for the given origin visit
+
+        Args:
+           origin (int): the origin identifier
+           visit (int): the visit identifier
+        Returns:
+           dict: a snapshot with two keys:
+             id:: identifier for the snapshot
+             branches:: a dictionary containing the snapshot branch information
+
+        """
+        db = self.db
+
+        snapshot_id = db.snapshot_get_by_origin_visit(origin, visit, cur)
+
+        if snapshot_id:
+            return self.snapshot_get(snapshot_id, cur=cur)
+        else:
+            # compatibility code during the snapshot migration
+            origin_visit_info = self.origin_visit_get_by(origin, visit,
+                                                         cur=cur)
+            if origin_visit_info is None:
+                return None
+            ret = {'id': None}
+            ret['branches'] = origin_visit_info['occurrences']
+            return ret
+
+        return None
+
     @db_transaction
     def occurrence_add(self, occurrences, cur=None):
         """Add occurrences to the storage
 
         Args:
             occurrences: iterable of dictionaries representing the individual
                 occurrences to add. Each dict has the following keys:
 
                 - origin (int): id of the origin corresponding to the
                   occurrence
                 - visit (int): id of the visit corresponding to the
                   occurrence
                 - branch (str): the reference name of the occurrence
                 - target (sha1_git): the id of the object pointed to by
                   the occurrence
                 - target_type (str): the type of object pointed to by the
                   occurrence
 
         """
         db = self.db
 
         db.mktemp_occurrence_history(cur)
         db.copy_to(occurrences, 'tmp_occurrence_history',
                    ['origin', 'branch', 'target', 'target_type', 'visit'], cur)
 
         db.occurrence_history_add_from_temp(cur)
 
     @db_transaction_generator
     def occurrence_get(self, origin_id, cur=None):
         """Retrieve occurrence information per origin_id.
 
         Args:
             origin_id: The occurrence's origin.
 
         Yields:
             List of occurrences matching criterion.
 
         """
         db = self.db
         for line in db.occurrence_get(origin_id, cur):
             yield {
                 'origin': line[0],
                 'branch': line[1],
                 'target': line[2],
                 'target_type': line[3],
             }
 
     @db_transaction
     def origin_visit_add(self, origin, ts, cur=None):
         """Add an origin_visit for the origin at ts with status 'ongoing'.
 
         Args:
             origin: Visited Origin id
             ts: timestamp of such visit
 
         Returns:
             dict: dictionary with keys origin and visit where:
 
             - origin: origin identifier
             - visit: the visit identifier for the new visit occurrence
             - ts (datetime.DateTime): the visit date
 
         """
         if isinstance(ts, str):
             ts = dateutil.parser.parse(ts)
 
         return {
             'origin': origin,
             'visit': self.db.origin_visit_add(origin, ts, cur)
         }
 
     @db_transaction
     def origin_visit_update(self, origin, visit_id, status, metadata=None,
                             cur=None):
         """Update an origin_visit's status.
 
         Args:
             origin: Visited Origin id
             visit_id: Visit's id
             status: Visit's new status
             metadata: Data associated to the visit
 
         Returns:
             None
 
         """
         return self.db.origin_visit_update(origin, visit_id, status, metadata,
                                            cur)
 
     @db_transaction_generator
     def origin_visit_get(self, origin, last_visit=None, limit=None, cur=None):
         """Retrieve all the origin's visit's information.
 
         Args:
             origin (int): The occurrence's origin (identifier).
             last_visit (int): Starting point from which listing the next visits
                 Default to None
             limit (int): Number of results to return from the last visit.
                 Default to None
 
         Yields:
             List of visits.
 
         """
         db = self.db
         for line in db.origin_visit_get_all(
                 origin, last_visit=last_visit, limit=limit, cur=cur):
             data = dict(zip(self.db.origin_visit_get_cols, line))
             yield data
 
     @db_transaction
     def origin_visit_get_by(self, origin, visit, cur=None):
         """Retrieve origin visit's information.
 
         Args:
             origin: The occurrence's origin (identifier).
 
         Returns:
             The information on that particular (origin, visit)
 
         """
         db = self.db
 
         ori_visit = db.origin_visit_get(origin, visit, cur)
         if not ori_visit:
             return None
 
         ori_visit = dict(zip(self.db.origin_visit_get_cols, ori_visit))
 
         occs = {}
         for occ in db.occurrence_by_origin_visit(origin, visit):
             _, branch_name, target, target_type = occ
             occs[branch_name] = {
                 'target': target,
                 'target_type': target_type
             }
 
         ori_visit.update({
             'occurrences': occs
         })
 
         return ori_visit
 
     @db_transaction_generator
     def revision_get_by(self,
                         origin_id,
                         branch_name=None,
                         timestamp=None,
                         limit=None,
                         cur=None):
         """Given an origin_id, retrieve occurrences' list per given criterions.
 
         Args:
             origin_id: The origin to filter on.
             branch_name: (optional) branch name.
             timestamp: (optional) time.
             limit: (optional) limit
 
         Yields:
             List of occurrences matching the criterions or None if nothing is
             found.
 
         """
         for line in self.db.revision_get_by(origin_id,
                                             branch_name,
                                             timestamp,
                                             limit=limit,
                                             cur=cur):
             data = converters.db_to_revision(
                 dict(zip(self.db.revision_get_cols, line))
             )
             if not data['type']:
                 yield None
                 continue
             yield data
 
     def release_get_by(self, origin_id, limit=None):
         """Given an origin id, return all the tag objects pointing to heads of
         origin_id.
 
         Args:
             origin_id: the origin to filter on.
             limit: None by default
 
         Yields:
             List of releases matching the criterions or None if nothing is
             found.
 
         """
 
         for line in self.db.release_get_by(origin_id, limit=limit):
             data = converters.db_to_release(
                 dict(zip(self.db.release_get_cols, line))
             )
             yield data
 
     @db_transaction
     def object_find_by_sha1_git(self, ids, cur=None):
         """Return the objects found with the given ids.
 
         Args:
             ids: a generator of sha1_gits
 
         Returns:
             dict: a mapping from id to the list of objects found. Each object
             found is itself a dict with keys:
 
             - sha1_git: the input id
             - type: the type of object found
             - id: the id of the object found
             - object_id: the numeric id of the object found.
 
         """
         db = self.db
 
         ret = {id: [] for id in ids}
 
         for retval in db.object_find_by_sha1_git(ids):
             if retval[1]:
                 ret[retval[0]].append(dict(zip(db.object_find_by_sha1_git_cols,
                                                retval)))
 
         return ret
 
     origin_keys = ['id', 'type', 'url', 'lister', 'project']
 
     @db_transaction
     def origin_get(self, origin, cur=None):
         """Return the origin either identified by its id or its tuple
         (type, url).
 
         Args:
             origin: dictionary representing the individual origin to find.
                 This dict has either the keys type and url:
 
                 - type (FIXME: enum TBD): the origin type ('git', 'wget', ...)
                 - url (bytes): the url the origin points to
 
                 or the id:
 
                 - id: the origin id
 
         Returns:
             dict: the origin dictionary with the keys:
 
             - id: origin's id
             - type: origin's type
             - url: origin's url
             - lister: lister's uuid
             - project: project's uuid (FIXME, retrieve this information)
 
         Raises:
             ValueError: if the keys does not match (url and type) nor id.
 
         """
         db = self.db
 
         origin_id = origin.get('id')
         if origin_id:  # check lookup per id first
             ori = db.origin_get(origin_id, cur)
         elif 'type' in origin and 'url' in origin:  # or lookup per type, url
             ori = db.origin_get_with(origin['type'], origin['url'], cur)
         else:  # unsupported lookup
             raise ValueError('Origin must have either id or (type and url).')
 
         if ori:
             return dict(zip(self.origin_keys, ori))
         return None
 
     @db_transaction_generator
     def origin_search(self, url_pattern, offset=0, limit=50,
                       regexp=False, cur=None):
         """Search for origins whose urls contain a provided string pattern
         or match a provided regular expression.
         The search is performed in a case insensitive way.
 
         Args:
             url_pattern: the string pattern to search for in origin urls
             offset: number of found origins to skip before returning results
             limit: the maximum number of found origins to return
             regexp: if True, consider the provided pattern as a regular
                 expression and return origins whose urls match it
 
         Returns:
             An iterable of dict containing origin information as returned
             by :meth:`swh.storage.storage.Storage.origin_get`.
         """
         db = self.db
 
         for origin in db.origin_search(url_pattern, offset, limit,
                                        regexp, cur):
             yield dict(zip(self.origin_keys, origin))
 
     @db_transaction
     def _person_add(self, person, cur=None):
         """Add a person in storage.
 
         Note: Internal function for now, do not use outside of this module.
 
         Do not do anything fancy in case a person already exists.
         Please adapt code if more checks are needed.
 
         Args:
             person: dictionary with keys name and email.
 
         Returns:
             Id of the new person.
 
         """
         db = self.db
 
         return db.person_add(person)
 
     @db_transaction_generator
     def person_get(self, person, cur=None):
         """Return the persons identified by their ids.
 
         Args:
             person: array of ids.
 
         Returns:
             The array of persons corresponding of the ids.
 
         """
         db = self.db
 
         for person in db.person_get(person):
             yield dict(zip(db.person_get_cols, person))
 
     @db_transaction
     def origin_add(self, origins, cur=None):
         """Add origins to the storage
 
         Args:
             origins: list of dictionaries representing the individual origins,
                 with the following keys:
 
                 - type: the origin type ('git', 'svn', 'deb', ...)
                 - url (bytes): the url the origin points to
 
         Returns:
             list: ids corresponding to the given origins
 
         """
 
         ret = []
         for origin in origins:
             ret.append(self.origin_add_one(origin, cur=cur))
 
         return ret
 
     @db_transaction
     def origin_add_one(self, origin, cur=None):
         """Add origin to the storage
 
         Args:
             origin: dictionary representing the individual origin to add. This
                 dict has the following keys:
 
                 - type (FIXME: enum TBD): the origin type ('git', 'wget', ...)
                 - url (bytes): the url the origin points to
 
         Returns:
             the id of the added origin, or of the identical one that already
             exists.
 
         """
         db = self.db
 
         data = db.origin_get_with(origin['type'], origin['url'], cur)
         if data:
             return data[0]
 
         return db.origin_add(origin['type'], origin['url'], cur)
 
     @db_transaction
     def fetch_history_start(self, origin_id, cur=None):
         """Add an entry for origin origin_id in fetch_history. Returns the id
         of the added fetch_history entry
         """
         fetch_history = {
             'origin': origin_id,
             'date': datetime.datetime.now(tz=datetime.timezone.utc),
         }
 
         return self.db.create_fetch_history(fetch_history, cur)
 
     @db_transaction
     def fetch_history_end(self, fetch_history_id, data, cur=None):
         """Close the fetch_history entry with id `fetch_history_id`, replacing
            its data with `data`.
         """
         now = datetime.datetime.now(tz=datetime.timezone.utc)
         fetch_history = self.db.get_fetch_history(fetch_history_id, cur)
 
         if not fetch_history:
             raise ValueError('No fetch_history with id %d' % fetch_history_id)
 
         fetch_history['duration'] = now - fetch_history['date']
 
         fetch_history.update(data)
 
         self.db.update_fetch_history(fetch_history, cur)
 
     @db_transaction
     def fetch_history_get(self, fetch_history_id, cur=None):
         """Get the fetch_history entry with id `fetch_history_id`.
         """
         return self.db.get_fetch_history(fetch_history_id, cur)
 
     @db_transaction
     def entity_add(self, entities, cur=None):
         """Add the given entitites to the database (in entity_history).
 
         Args:
             entities (iterable): iterable of dictionaries with the following
                 keys:
 
                 - uuid (uuid): id of the entity
                 - parent (uuid): id of the parent entity
                 - name (str): name of the entity
                 - type (str): type of entity (one of 'organization',
                   'group_of_entities', 'hosting', 'group_of_persons', 'person',
                   'project')
                 - description (str, optional): description of the entity
                 - homepage (str): url of the entity's homepage
                 - active (bool): whether the entity is active
                 - generated (bool): whether the entity was generated
                 - lister_metadata (dict): lister-specific entity metadata
                 - metadata (dict): other metadata for the entity
                 - validity (datetime.DateTime array): timestamps at which we
                   listed the entity.
 
         """
         db = self.db
 
         cols = list(db.entity_history_cols)
         cols.remove('id')
 
         db.mktemp_entity_history()
         db.copy_to(entities, 'tmp_entity_history', cols, cur)
         db.entity_history_add_from_temp()
 
     @db_transaction_generator
     def entity_get_from_lister_metadata(self, entities, cur=None):
         """Fetch entities from the database, matching with the lister and
            associated metadata.
 
         Args:
             entities (iterable): dictionaries containing the lister metadata to
                look for. Useful keys are 'lister', 'type', 'id', ...
 
         Yields:
             fetched entities with all their attributes. If no match was found,
             the returned entity is None.
 
         """
 
         db = self.db
 
         db.mktemp_entity_lister(cur)
 
         mapped_entities = []
         for i, entity in enumerate(entities):
             mapped_entity = {
                 'id': i,
                 'lister_metadata': entity,
             }
             mapped_entities.append(mapped_entity)
 
         db.copy_to(mapped_entities, 'tmp_entity_lister',
                    ['id', 'lister_metadata'], cur)
 
         cur.execute('''select id, %s
                        from swh_entity_from_tmp_entity_lister()
                        order by id''' %
                     ','.join(db.entity_cols))
 
         for id, *entity_vals in cur:
             fetched_entity = dict(zip(db.entity_cols, entity_vals))
             if fetched_entity['uuid']:
                 yield fetched_entity
             else:
                 yield {
                     'uuid': None,
                     'lister_metadata': entities[i],
                 }
 
     @db_transaction_generator
     def entity_get(self, uuid, cur=None):
         """Returns the list of entity per its uuid identifier and also its
         parent hierarchy.
 
         Args:
             uuid: entity's identifier
 
         Returns:
             List of entities starting with entity with uuid and the parent
             hierarchy from such entity.
 
         """
         db = self.db
         for entity in db.entity_get(uuid, cur):
             yield dict(zip(db.entity_cols, entity))
 
     @db_transaction
     def entity_get_one(self, uuid, cur=None):
         """Returns one entity using its uuid identifier.
 
         Args:
             uuid: entity's identifier
 
         Returns:
             the object corresponding to the given entity
 
         """
         db = self.db
         entity = db.entity_get_one(uuid, cur)
         if entity:
             return dict(zip(db.entity_cols, entity))
         else:
             return None
 
     @db_transaction
     def stat_counters(self, cur=None):
         """compute statistics about the number of tuples in various tables
 
         Returns:
             dict: a dictionary mapping textual labels (e.g., content) to
             integer values (e.g., the number of tuples in table content)
 
         """
         return {k: v for (k, v) in self.db.stat_counters()}
 
     @db_transaction
     def origin_metadata_add(self, origin_id, ts, provider, tool, metadata,
                             cur=None):
         """ Add an origin_metadata for the origin at ts with provenance and
         metadata.
 
         Args:
             origin_id (int): the origin's id for which the metadata is added
             ts (datetime): timestamp of the found metadata
             provider (int): the provider of metadata (ex:'hal')
             tool (int): tool used to extract metadata
             metadata (jsonb): the metadata retrieved at the time and location
 
         Returns:
             id (int): the origin_metadata unique id
         """
         if isinstance(ts, str):
             ts = dateutil.parser.parse(ts)
 
         return self.db.origin_metadata_add(origin_id, ts, provider, tool,
                                            metadata, cur)
 
     @db_transaction_generator
     def origin_metadata_get_by(self, origin_id, provider_type=None, cur=None):
         """Retrieve list of all origin_metadata entries for the origin_id
 
         Args:
             origin_id (int): the unique origin identifier
             provider_type (str): (optional) type of provider
 
         Returns:
             list of dicts: the origin_metadata dictionary with the keys:
 
             - id (int): origin_metadata's id
             - origin_id (int): origin's id
             - discovery_date (datetime): timestamp of discovery
             - tool_id (int): metadata's extracting tool
             - metadata (jsonb)
             - provider_id (int): metadata's provider
             - provider_name (str)
             - provider_type (str)
             - provider_url (str)
 
         """
         db = self.db
         for line in db.origin_metadata_get_by(origin_id, provider_type, cur):
             yield dict(zip(db.origin_metadata_get_cols, line))
 
     @db_transaction_generator
     def tool_add(self, tools, cur=None):
         """Add new tools to the storage.
 
         Args:
             tools ([dict]): List of dictionary representing tool to
             insert in the db. Dictionary with the following keys::
 
                 tool_name (str): tool's name
                 tool_version (str): tool's version
                 tool_configuration (dict): tool's configuration (free form
                                            dict)
 
         Returns:
             List of dict inserted in the db (holding the id key as
             well).  The order of the list is not guaranteed to match
             the order of the initial list.
 
         """
         db = self.db
         db.mktemp_tool(cur)
         db.copy_to(tools, 'tmp_tool',
                    ['name', 'version', 'configuration'],
                    cur)
 
         tools = db.tool_add_from_temp(cur)
         for line in tools:
             yield dict(zip(db.tool_cols, line))
 
     @db_transaction
     def tool_get(self, tool, cur=None):
         """Retrieve tool information.
 
         Args:
             tool (dict): Dictionary representing a tool with the
             following keys::
 
                 tool_name (str): tool's name
                 tool_version (str): tool's version
                 tool_configuration (dict): tool's configuration (free form
                                            dict)
 
         Returns:
             The identifier of the tool if it exists, None otherwise.
 
         """
         db = self.db
         tool_conf = tool['configuration']
         if isinstance(tool_conf, dict):
             tool_conf = json.dumps(tool_conf)
 
         idx = db.tool_get(tool['name'],
                           tool['version'],
                           tool_conf)
         if not idx:
             return None
         return dict(zip(self.db.tool_cols, idx))
 
     @db_transaction
     def metadata_provider_add(self, provider_name, provider_type, provider_url,
                               metadata, cur=None):
         db = self.db
         return db.metadata_provider_add(provider_name, provider_type,
                                         provider_url, metadata, cur)
 
     @db_transaction
     def metadata_provider_get(self, provider_id, cur=None):
         db = self.db
         result = db.metadata_provider_get(provider_id)
         if not result:
             return None
         return dict(zip(self.db.metadata_provider_cols, result))
 
     @db_transaction
     def metadata_provider_get_by(self, provider, cur=None):
         db = self.db
         result = db.metadata_provider_get_by(provider['provider_name'],
                                              provider['provider_url'])
         if not result:
             return None
         return dict(zip(self.db.metadata_provider_cols, result))
diff --git a/swh/storage/tests/test_storage.py b/swh/storage/tests/test_storage.py
index 2a70d020a..80a67bf3d 100644
--- a/swh/storage/tests/test_storage.py
+++ b/swh/storage/tests/test_storage.py
@@ -1,2271 +1,2445 @@
 # Copyright (C) 2015-2017  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import copy
 import datetime
 from operator import itemgetter
 import psycopg2
 import unittest
 from uuid import UUID
 
 from unittest.mock import patch
 
 from nose.tools import istest
 from nose.plugins.attrib import attr
 
 from swh.model import from_disk, identifiers
 from swh.model.hashutil import hash_to_bytes
 from swh.core.tests.db_testing import DbTestFixture
 from swh.storage.tests.storage_testing import StorageTestFixture
 
 
 @attr('db')
 class BaseTestStorage(StorageTestFixture, DbTestFixture):
     def setUp(self):
         super().setUp()
 
         db = self.test_db[self.TEST_STORAGE_DB_NAME]
         self.conn = db.conn
         self.cursor = db.cursor
 
         self.maxDiff = None
 
         self.cont = {
             'data': b'42\n',
             'length': 3,
             'sha1': hash_to_bytes(
                 '34973274ccef6ab4dfaaf86599792fa9c3fe4689'),
             'sha1_git': hash_to_bytes(
                 'd81cc0710eb6cf9efd5b920a8453e1e07157b6cd'),
             'sha256': hash_to_bytes(
                 '673650f936cb3b0a2f93ce09d81be107'
                 '48b1b203c19e8176b4eefc1964a0cf3a'),
             'blake2s256': hash_to_bytes('d5fe1939576527e42cfd76a9455a2'
                                         '432fe7f56669564577dd93c4280e76d661d'),
             'status': 'visible',
         }
 
         self.cont2 = {
             'data': b'4242\n',
             'length': 5,
             'sha1': hash_to_bytes(
                 '61c2b3a30496d329e21af70dd2d7e097046d07b7'),
             'sha1_git': hash_to_bytes(
                 '36fade77193cb6d2bd826161a0979d64c28ab4fa'),
             'sha256': hash_to_bytes(
                 '859f0b154fdb2d630f45e1ecae4a8629'
                 '15435e663248bb8461d914696fc047cd'),
             'blake2s256': hash_to_bytes('849c20fad132b7c2d62c15de310adfe87be'
                                         '94a379941bed295e8141c6219810d'),
             'status': 'visible',
         }
 
         self.cont3 = {
             'data': b'424242\n',
             'length': 7,
             'sha1': hash_to_bytes(
                 '3e21cc4942a4234c9e5edd8a9cacd1670fe59f13'),
             'sha1_git': hash_to_bytes(
                 'c932c7649c6dfa4b82327d121215116909eb3bea'),
             'sha256': hash_to_bytes(
                 '92fb72daf8c6818288a35137b72155f5'
                 '07e5de8d892712ab96277aaed8cf8a36'),
             'blake2s256': hash_to_bytes('76d0346f44e5a27f6bafdd9c2befd304af'
                                         'f83780f93121d801ab6a1d4769db11'),
             'status': 'visible',
         }
 
         self.missing_cont = {
             'data': b'missing\n',
             'length': 8,
             'sha1': hash_to_bytes(
                 'f9c24e2abb82063a3ba2c44efd2d3c797f28ac90'),
             'sha1_git': hash_to_bytes(
                 '33e45d56f88993aae6a0198013efa80716fd8919'),
             'sha256': hash_to_bytes(
                 '6bbd052ab054ef222c1c87be60cd191a'
                 'ddedd24cc882d1f5f7f7be61dc61bb3a'),
             'blake2s256': hash_to_bytes('306856b8fd879edb7b6f1aeaaf8db9bbecc9'
                                         '93cd7f776c333ac3a782fa5c6eba'),
             'status': 'absent',
         }
 
         self.skipped_cont = {
             'length': 1024 * 1024 * 200,
             'sha1_git': hash_to_bytes(
                 '33e45d56f88993aae6a0198013efa80716fd8920'),
             'sha1': hash_to_bytes(
                 '43e45d56f88993aae6a0198013efa80716fd8920'),
             'sha256': hash_to_bytes(
                 '7bbd052ab054ef222c1c87be60cd191a'
                 'ddedd24cc882d1f5f7f7be61dc61bb3a'),
             'blake2s256': hash_to_bytes(
                 'ade18b1adecb33f891ca36664da676e1'
                 '2c772cc193778aac9a137b8dc5834b9b'),
             'reason': 'Content too long',
             'status': 'absent',
         }
 
         self.skipped_cont2 = {
             'length': 1024 * 1024 * 300,
             'sha1_git': hash_to_bytes(
                 '44e45d56f88993aae6a0198013efa80716fd8921'),
             'sha1': hash_to_bytes(
                 '54e45d56f88993aae6a0198013efa80716fd8920'),
             'sha256': hash_to_bytes(
                 '8cbd052ab054ef222c1c87be60cd191a'
                 'ddedd24cc882d1f5f7f7be61dc61bb3a'),
             'blake2s256': hash_to_bytes(
                 '9ce18b1adecb33f891ca36664da676e1'
                 '2c772cc193778aac9a137b8dc5834b9b'),
             'reason': 'Content too long',
             'status': 'absent',
         }
 
         self.dir = {
             'id': b'4\x013\x422\x531\x000\xf51\xe62\xa73\xff7\xc3\xa90',
             'entries': [
                 {
                     'name': b'foo',
                     'type': 'file',
                     'target': self.cont['sha1_git'],
                     'perms': from_disk.DentryPerms.content,
                 },
                 {
                     'name': b'bar\xc3',
                     'type': 'dir',
                     'target': b'12345678901234567890',
                     'perms': from_disk.DentryPerms.directory,
                 },
             ],
         }
 
         self.dir2 = {
             'id': b'4\x013\x422\x531\x000\xf51\xe62\xa73\xff7\xc3\xa95',
             'entries': [
                 {
                     'name': b'oof',
                     'type': 'file',
                     'target': self.cont2['sha1_git'],
                     'perms': from_disk.DentryPerms.content,
                 }
             ],
         }
 
         self.dir3 = {
             'id': hash_to_bytes('33e45d56f88993aae6a0198013efa80716fd8921'),
             'entries': [
                 {
                     'name': b'foo',
                     'type': 'file',
                     'target': self.cont['sha1_git'],
                     'perms': from_disk.DentryPerms.content,
                 },
                 {
                     'name': b'bar',
                     'type': 'dir',
                     'target': b'12345678901234560000',
                     'perms': from_disk.DentryPerms.directory,
                 },
                 {
                     'name': b'hello',
                     'type': 'file',
                     'target': b'12345678901234567890',
                     'perms': from_disk.DentryPerms.content,
                 },
 
             ],
         }
 
         self.minus_offset = datetime.timezone(datetime.timedelta(minutes=-120))
         self.plus_offset = datetime.timezone(datetime.timedelta(minutes=120))
 
         self.revision = {
             'id': b'56789012345678901234',
             'message': b'hello',
             'author': {
                 'name': b'Nicolas Dandrimont',
                 'email': b'nicolas@example.com',
                 'fullname': b'Nicolas Dandrimont <nicolas@example.com> ',
             },
             'date': {
                 'timestamp': 1234567890,
                 'offset': 120,
                 'negative_utc': None,
             },
             'committer': {
                 'name': b'St\xc3fano Zacchiroli',
                 'email': b'stefano@example.com',
                 'fullname': b'St\xc3fano Zacchiroli <stefano@example.com>'
             },
             'committer_date': {
                 'timestamp': 1123456789,
                 'offset': 0,
                 'negative_utc': True,
             },
             'parents': [b'01234567890123456789', b'23434512345123456789'],
             'type': 'git',
             'directory': self.dir['id'],
             'metadata': {
                 'checksums': {
                     'sha1': 'tarball-sha1',
                     'sha256': 'tarball-sha256',
                 },
                 'signed-off-by': 'some-dude',
                 'extra_headers': [
                     ['gpgsig', b'test123'],
                     ['mergetags', [b'foo\\bar', b'\x22\xaf\x89\x80\x01\x00']],
                 ],
             },
             'synthetic': True
         }
 
         self.revision2 = {
             'id': b'87659012345678904321',
             'message': b'hello again',
             'author': {
                 'name': b'Roberto Dicosmo',
                 'email': b'roberto@example.com',
                 'fullname': b'Roberto Dicosmo <roberto@example.com>',
             },
             'date': {
                 'timestamp': {
                     'seconds': 1234567843,
                     'microseconds': 220000,
                 },
                 'offset': -720,
                 'negative_utc': None,
             },
             'committer': {
                 'name': b'tony',
                 'email': b'ar@dumont.fr',
                 'fullname': b'tony <ar@dumont.fr>',
             },
             'committer_date': {
                 'timestamp': 1123456789,
                 'offset': 0,
                 'negative_utc': False,
             },
             'parents': [b'01234567890123456789'],
             'type': 'git',
             'directory': self.dir2['id'],
             'metadata': None,
             'synthetic': False
         }
 
         self.revision3 = {
             'id': hash_to_bytes('7026b7c1a2af56521e951c01ed20f255fa054238'),
             'message': b'a simple revision with no parents this time',
             'author': {
                 'name': b'Roberto Dicosmo',
                 'email': b'roberto@example.com',
                 'fullname': b'Roberto Dicosmo <roberto@example.com>',
             },
             'date': {
                 'timestamp': {
                     'seconds': 1234567843,
                     'microseconds': 220000,
                 },
                 'offset': -720,
                 'negative_utc': None,
             },
             'committer': {
                 'name': b'tony',
                 'email': b'ar@dumont.fr',
                 'fullname': b'tony <ar@dumont.fr>',
             },
             'committer_date': {
                 'timestamp': 1127351742,
                 'offset': 0,
                 'negative_utc': False,
             },
             'parents': [],
             'type': 'git',
             'directory': self.dir2['id'],
             'metadata': None,
             'synthetic': True
         }
 
         self.revision4 = {
             'id': hash_to_bytes('368a48fe15b7db2383775f97c6b247011b3f14f4'),
             'message': b'parent of self.revision2',
             'author': {
                 'name': b'me',
                 'email': b'me@soft.heri',
                 'fullname': b'me <me@soft.heri>',
             },
             'date': {
                 'timestamp': {
                     'seconds': 1244567843,
                     'microseconds': 220000,
                 },
                 'offset': -720,
                 'negative_utc': None,
             },
             'committer': {
                 'name': b'committer-dude',
                 'email': b'committer@dude.com',
                 'fullname': b'committer-dude <committer@dude.com>',
             },
             'committer_date': {
                 'timestamp': {
                     'seconds': 1244567843,
                     'microseconds': 220000,
                 },
                 'offset': -720,
                 'negative_utc': None,
             },
             'parents': [self.revision3['id']],
             'type': 'git',
             'directory': self.dir['id'],
             'metadata': None,
             'synthetic': False
         }
 
         self.origin = {
             'url': 'file:///dev/null',
             'type': 'git',
         }
 
         self.origin2 = {
             'url': 'file:///dev/zero',
             'type': 'git',
         }
 
         self.provider = {
             'name': 'hal',
             'type': 'deposit-client',
             'url': 'http:///hal/inria',
             'metadata': {
                 'location': 'France'
             }
         }
 
         self.metadata_tool = {
             'name': 'swh-deposit',
             'version': '0.0.1',
             'configuration': {
                 'sword_version': '2'
             }
         }
 
         self.origin_metadata = {
             'origin': self.origin,
             'discovery_date': datetime.datetime(2015, 1, 1, 23, 0, 0,
                                                 tzinfo=datetime.timezone.utc),
             'provider': self.provider,
             'tool': 'swh-deposit',
             'metadata': {
                 'name': 'test_origin_metadata',
                 'version': '0.0.1'
              }
         }
 
         self.origin_metadata2 = {
             'origin': self.origin,
             'discovery_date': datetime.datetime(2017, 1, 1, 23, 0, 0,
                                                 tzinfo=datetime.timezone.utc),
             'provider': self.provider,
             'tool': 'swh-deposit',
             'metadata': {
                 'name': 'test_origin_metadata',
                 'version': '0.0.1'
              }
         }
 
         self.date_visit1 = datetime.datetime(2015, 1, 1, 23, 0, 0,
                                              tzinfo=datetime.timezone.utc)
 
         self.occurrence = {
             'branch': b'master',
-            'target': b'67890123456789012345',
+            'target': self.revision['id'],
             'target_type': 'revision',
         }
 
         self.date_visit2 = datetime.datetime(2015, 1, 1, 23, 0, 0,
                                              tzinfo=datetime.timezone.utc)
 
         self.occurrence2 = {
             'branch': b'master',
             'target': self.revision2['id'],
             'target_type': 'revision',
         }
 
         self.date_visit3 = datetime.datetime(2015, 1, 1, 23, 0, 0,
                                              tzinfo=datetime.timezone.utc)
 
         # template occurrence to be filled in test (cf. revision_log_by)
         self.occurrence3 = {
             'branch': b'master',
             'target_type': 'revision',
         }
 
         self.release = {
             'id': b'87659012345678901234',
             'name': b'v0.0.1',
             'author': {
                 'name': b'olasd',
                 'email': b'nic@olasd.fr',
                 'fullname': b'olasd <nic@olasd.fr>',
             },
             'date': {
                 'timestamp': 1234567890,
                 'offset': 42,
                 'negative_utc': None,
             },
             'target': b'43210987654321098765',
             'target_type': 'revision',
             'message': b'synthetic release',
             'synthetic': True,
         }
 
         self.release2 = {
             'id': b'56789012348765901234',
             'name': b'v0.0.2',
             'author': {
                 'name': b'tony',
                 'email': b'ar@dumont.fr',
                 'fullname': b'tony <ar@dumont.fr>',
             },
             'date': {
                 'timestamp': 1634366813,
                 'offset': -120,
                 'negative_utc': None,
             },
             'target': b'432109\xa9765432\xc309\x00765',
             'target_type': 'revision',
             'message': b'v0.0.2\nMisc performance improvments + bug fixes',
             'synthetic': False
         }
 
         self.release3 = {
             'id': b'87659012345678904321',
             'name': b'v0.0.2',
             'author': {
                 'name': b'tony',
                 'email': b'tony@ardumont.fr',
                 'fullname': b'tony <tony@ardumont.fr>',
             },
             'date': {
                 'timestamp': 1634336813,
                 'offset': 0,
                 'negative_utc': False,
             },
             'target': self.revision2['id'],
             'target_type': 'revision',
             'message': b'yet another synthetic release',
             'synthetic': True,
         }
 
         self.fetch_history_date = datetime.datetime(
             2015, 1, 2, 21, 0, 0,
             tzinfo=datetime.timezone.utc)
         self.fetch_history_end = datetime.datetime(
             2015, 1, 2, 23, 0, 0,
             tzinfo=datetime.timezone.utc)
 
         self.fetch_history_duration = (self.fetch_history_end -
                                        self.fetch_history_date)
 
         self.fetch_history_data = {
             'status': True,
             'result': {'foo': 'bar'},
             'stdout': 'blabla',
             'stderr': 'blablabla',
         }
 
         self.entity1 = {
             'uuid': UUID('f96a7ec1-0058-4920-90cc-7327e4b5a4bf'),
             # GitHub users
             'parent': UUID('ad6df473-c1d2-4f40-bc58-2b091d4a750e'),
             'name': 'github:user:olasd',
             'type': 'person',
             'description': 'Nicolas Dandrimont',
             'homepage': 'http://example.com',
             'active': True,
             'generated': True,
             'lister_metadata': {
                 # swh.lister.github
                 'lister': '34bd6b1b-463f-43e5-a697-785107f598e4',
                 'id': 12877,
                 'type': 'user',
                 'last_activity': '2015-11-03',
             },
             'metadata': None,
             'validity': [
                 datetime.datetime(2015, 11, 3, 11, 0, 0,
                                   tzinfo=datetime.timezone.utc),
             ]
         }
 
         self.entity1_query = {
             'lister': '34bd6b1b-463f-43e5-a697-785107f598e4',
             'id': 12877,
             'type': 'user',
         }
 
         self.entity2 = {
             'uuid': UUID('3903d075-32d6-46d4-9e29-0aef3612c4eb'),
             # GitHub users
             'parent': UUID('ad6df473-c1d2-4f40-bc58-2b091d4a750e'),
             'name': 'github:user:zacchiro',
             'type': 'person',
             'description': 'Stefano Zacchiroli',
             'homepage': 'http://example.com',
             'active': True,
             'generated': True,
             'lister_metadata': {
                 # swh.lister.github
                 'lister': '34bd6b1b-463f-43e5-a697-785107f598e4',
                 'id': 216766,
                 'type': 'user',
                 'last_activity': '2015-11-03',
             },
             'metadata': None,
             'validity': [
                 datetime.datetime(2015, 11, 3, 11, 0, 0,
                                   tzinfo=datetime.timezone.utc),
             ]
         }
 
         self.entity3 = {
             'uuid': UUID('111df473-c1d2-4f40-bc58-2b091d4a7111'),
             # GitHub users
             'parent': UUID('222df473-c1d2-4f40-bc58-2b091d4a7222'),
             'name': 'github:user:ardumont',
             'type': 'person',
             'description': 'Antoine R. Dumont a.k.a tony',
             'homepage': 'https://ardumont.github.io',
             'active': True,
             'generated': True,
             'lister_metadata': {
                 'lister': '34bd6b1b-463f-43e5-a697-785107f598e4',
                 'id': 666,
                 'type': 'user',
                 'last_activity': '2016-01-15',
             },
             'metadata': None,
             'validity': [
                 datetime.datetime(2015, 11, 3, 11, 0, 0,
                                   tzinfo=datetime.timezone.utc),
             ]
         }
 
         self.entity4 = {
             'uuid': UUID('222df473-c1d2-4f40-bc58-2b091d4a7222'),
             # GitHub users
             'parent': None,
             'name': 'github:user:ToNyX',
             'type': 'person',
             'description': 'ToNyX',
             'homepage': 'https://ToNyX.github.io',
             'active': True,
             'generated': True,
             'lister_metadata': {
                 'lister': '34bd6b1b-463f-43e5-a697-785107f598e4',
                 'id': 999,
                 'type': 'user',
                 'last_activity': '2015-12-24',
             },
             'metadata': None,
             'validity': [
                 datetime.datetime(2015, 11, 3, 11, 0, 0,
                                   tzinfo=datetime.timezone.utc),
             ]
         }
 
         self.entity2_query = {
             'lister_metadata': {
                 'lister': '34bd6b1b-463f-43e5-a697-785107f598e4',
                 'id': 216766,
                 'type': 'user',
             },
         }
 
+        self.snapshot = {
+            'id': hash_to_bytes('2498dbf535f882bc7f9a18fb16c9ad27fda7bab7'),
+            'branches': {
+                self.occurrence['branch']: {
+                    'target': self.occurrence['target'],
+                    'target_type': self.occurrence['target_type'],
+                },
+            },
+        }
+
+        self.empty_snapshot = {
+            'id': hash_to_bytes('1a8893e6a86f444e8be8e7bda6cb34fb1735a00e'),
+            'branches': {},
+        }
+
+        self.complete_snapshot = {
+            'id': hash_to_bytes('6e65b86363953b780d92b0a928f3e8fcdd10db36'),
+            'branches': {
+                b'directory': {
+                    'target': hash_to_bytes(
+                        '1bd0e65f7d2ff14ae994de17a1e7fe65111dcad8'),
+                    'target_type': 'directory',
+                },
+                b'content': {
+                    'target': hash_to_bytes(
+                        'fe95a46679d128ff167b7c55df5d02356c5a1ae1'),
+                    'target_type': 'content',
+                },
+                b'alias': {
+                    'target': b'revision',
+                    'target_type': 'alias',
+                },
+                b'revision': {
+                    'target': hash_to_bytes(
+                        'aafb16d69fd30ff58afdd69036a26047f3aebdc6'),
+                    'target_type': 'revision',
+                },
+                b'release': {
+                    'target': hash_to_bytes(
+                        '7045404f3d1c54e6473c71bbb716529fbad4be24'),
+                    'target_type': 'release',
+                },
+                b'snapshot': {
+                    'target': hash_to_bytes(
+                        '1a8893e6a86f444e8be8e7bda6cb34fb1735a00e'),
+                    'target_type': 'snapshot',
+                },
+                b'dangling': None,
+            }
+        }
+
     def tearDown(self):
         self.reset_storage_tables()
         super().tearDown()
 
 
 class CommonTestStorage(BaseTestStorage):
     """Base class for Storage testing.
 
     This class is used as-is to test local storage (see TestLocalStorage
     below) and remote storage (see TestRemoteStorage in
     test_remote_storage.py.
 
     We need to have the two classes inherit from this base class
     separately to avoid nosetests running the tests from the base
     class twice.
 
     """
 
     @staticmethod
     def normalize_entity(entity):
         entity = copy.deepcopy(entity)
         for key in ('date', 'committer_date'):
             if key in entity:
                 entity[key] = identifiers.normalize_timestamp(entity[key])
 
         return entity
 
     @istest
     def check_config(self):
         self.assertTrue(self.storage.check_config(check_write=True))
         self.assertTrue(self.storage.check_config(check_write=False))
 
     @istest
     def content_add(self):
         cont = self.cont
 
         self.storage.content_add([cont])
         if hasattr(self.storage, 'objstorage'):
             self.assertIn(cont['sha1'], self.storage.objstorage)
         self.cursor.execute('SELECT sha1, sha1_git, sha256, length, status'
                             ' FROM content WHERE sha1 = %s',
                             (cont['sha1'],))
         datum = self.cursor.fetchone()
         self.assertEqual(
             (datum[0].tobytes(), datum[1].tobytes(), datum[2].tobytes(),
              datum[3], datum[4]),
             (cont['sha1'], cont['sha1_git'], cont['sha256'],
              cont['length'], 'visible'))
 
     @istest
     def content_add_collision(self):
         cont1 = self.cont
 
         # create (corrupted) content with same sha1{,_git} but != sha256
         cont1b = cont1.copy()
         sha256_array = bytearray(cont1b['sha256'])
         sha256_array[0] += 1
         cont1b['sha256'] = bytes(sha256_array)
 
         with self.assertRaises(psycopg2.IntegrityError):
             self.storage.content_add([cont1, cont1b])
 
     @istest
     def skipped_content_add(self):
         cont = self.skipped_cont.copy()
         cont2 = self.skipped_cont2.copy()
         cont2['blake2s256'] = None
 
         self.storage.content_add([cont, cont, cont2])
 
         self.cursor.execute('SELECT sha1, sha1_git, sha256, blake2s256, '
                             'length, status, reason '
                             'FROM skipped_content ORDER BY sha1_git')
 
         datums = self.cursor.fetchall()
 
         self.assertEquals(2, len(datums))
         datum = datums[0]
         self.assertEqual(
             (datum[0].tobytes(), datum[1].tobytes(), datum[2].tobytes(),
              datum[3].tobytes(), datum[4], datum[5], datum[6]),
             (cont['sha1'], cont['sha1_git'], cont['sha256'],
              cont['blake2s256'], cont['length'], 'absent',
              'Content too long')
         )
 
         datum2 = datums[1]
         self.assertEqual(
             (datum2[0].tobytes(), datum2[1].tobytes(), datum2[2].tobytes(),
              datum2[3], datum2[4], datum2[5], datum2[6]),
             (cont2['sha1'], cont2['sha1_git'], cont2['sha256'],
              cont2['blake2s256'], cont2['length'], 'absent',
              'Content too long')
         )
 
     @istest
     def content_missing(self):
         cont2 = self.cont2
         missing_cont = self.missing_cont
         self.storage.content_add([cont2])
         gen = self.storage.content_missing([cont2, missing_cont])
 
         self.assertEqual(list(gen), [missing_cont['sha1']])
 
     @istest
     def content_missing_per_sha1(self):
         # given
         cont2 = self.cont2
         missing_cont = self.missing_cont
         self.storage.content_add([cont2])
         # when
         gen = self.storage.content_missing_per_sha1([cont2['sha1'],
                                                      missing_cont['sha1']])
 
         # then
         self.assertEqual(list(gen), [missing_cont['sha1']])
 
     @istest
     def content_get_metadata(self):
         cont1 = self.cont.copy()
         cont2 = self.cont2.copy()
 
         self.storage.content_add([cont1, cont2])
 
         gen = self.storage.content_get_metadata([cont1['sha1'], cont2['sha1']])
 
         # we only retrieve the metadata
         cont1.pop('data')
         cont2.pop('data')
 
         self.assertEqual(list(gen), [cont1, cont2])
 
     @istest
     def content_get_metadata_missing_sha1(self):
         cont1 = self.cont.copy()
         cont2 = self.cont2.copy()
 
         missing_cont = self.missing_cont.copy()
 
         self.storage.content_add([cont1, cont2])
 
         gen = self.storage.content_get_metadata([missing_cont['sha1']])
 
         # All the metadata keys are None
         missing_cont.pop('data')
         for key in list(missing_cont):
             if key != 'sha1':
                 missing_cont[key] = None
 
         self.assertEqual(list(gen), [missing_cont])
 
     @istest
     def directory_get(self):
         # given
         init_missing = list(self.storage.directory_missing([self.dir['id']]))
         self.assertEqual([self.dir['id']], init_missing)
 
         self.storage.directory_add([self.dir])
 
         # when
         actual_dirs = list(self.storage.directory_get([self.dir['id']]))
 
         self.assertEqual(len(actual_dirs), 1)
 
         dir0 = actual_dirs[0]
         self.assertEqual(dir0['id'], self.dir['id'])
         # ids are generated so non deterministic value
         self.assertEqual(len(dir0['file_entries']), 1)
         self.assertEqual(len(dir0['dir_entries']), 1)
         self.assertIsNone(dir0['rev_entries'])
 
         after_missing = list(self.storage.directory_missing([self.dir['id']]))
         self.assertEqual([], after_missing)
 
     @istest
     def directory_add(self):
         init_missing = list(self.storage.directory_missing([self.dir['id']]))
         self.assertEqual([self.dir['id']], init_missing)
 
         self.storage.directory_add([self.dir])
 
         stored_data = list(self.storage.directory_ls(self.dir['id']))
 
         data_to_store = []
         for ent in sorted(self.dir['entries'], key=itemgetter('name')):
             data_to_store.append({
                 'dir_id': self.dir['id'],
                 'type': ent['type'],
                 'target': ent['target'],
                 'name': ent['name'],
                 'perms': ent['perms'],
                 'status': None,
                 'sha1': None,
                 'sha1_git': None,
                 'sha256': None,
                 'length': None,
             })
 
         self.assertEqual(data_to_store, stored_data)
 
         after_missing = list(self.storage.directory_missing([self.dir['id']]))
         self.assertEqual([], after_missing)
 
     @istest
     def directory_entry_get_by_path(self):
         # given
         init_missing = list(self.storage.directory_missing([self.dir3['id']]))
         self.assertEqual([self.dir3['id']], init_missing)
 
         self.storage.directory_add([self.dir3])
 
         expected_entries = [
             {
                 'dir_id': self.dir3['id'],
                 'name': b'foo',
                 'type': 'file',
                 'target': self.cont['sha1_git'],
                 'sha1': None,
                 'sha1_git': None,
                 'sha256': None,
                 'status': None,
                 'perms': from_disk.DentryPerms.content,
                 'length': None,
             },
             {
                 'dir_id': self.dir3['id'],
                 'name': b'bar',
                 'type': 'dir',
                 'target': b'12345678901234560000',
                 'sha1': None,
                 'sha1_git': None,
                 'sha256': None,
                 'status': None,
                 'perms': from_disk.DentryPerms.directory,
                 'length': None,
             },
             {
                 'dir_id': self.dir3['id'],
                 'name': b'hello',
                 'type': 'file',
                 'target': b'12345678901234567890',
                 'sha1': None,
                 'sha1_git': None,
                 'sha256': None,
                 'status': None,
                 'perms': from_disk.DentryPerms.content,
                 'length': None,
             },
         ]
 
         # when (all must be found here)
         for entry, expected_entry in zip(self.dir3['entries'],
                                          expected_entries):
             actual_entry = self.storage.directory_entry_get_by_path(
                 self.dir3['id'],
                 [entry['name']])
             self.assertEqual(actual_entry, expected_entry)
 
         # when (nothing should be found here since self.dir is not persisted.)
         for entry in self.dir['entries']:
             actual_entry = self.storage.directory_entry_get_by_path(
                 self.dir['id'],
                 [entry['name']])
             self.assertIsNone(actual_entry)
 
     @istest
     def revision_add(self):
         init_missing = self.storage.revision_missing([self.revision['id']])
         self.assertEqual([self.revision['id']], list(init_missing))
 
         self.storage.revision_add([self.revision])
 
         end_missing = self.storage.revision_missing([self.revision['id']])
         self.assertEqual([], list(end_missing))
 
     @istest
     def revision_log(self):
         # given
         # self.revision4 -is-child-of-> self.revision3
         self.storage.revision_add([self.revision3,
                                    self.revision4])
 
         # when
         actual_results = list(self.storage.revision_log(
             [self.revision4['id']]))
 
         # hack: ids generated
         for actual_result in actual_results:
             del actual_result['author']['id']
             del actual_result['committer']['id']
 
         self.assertEqual(len(actual_results), 2)  # rev4 -child-> rev3
         self.assertEquals(actual_results[0],
                           self.normalize_entity(self.revision4))
         self.assertEquals(actual_results[1],
                           self.normalize_entity(self.revision3))
 
     @istest
     def revision_log_with_limit(self):
         # given
         # self.revision4 -is-child-of-> self.revision3
         self.storage.revision_add([self.revision3,
                                    self.revision4])
         actual_results = list(self.storage.revision_log(
             [self.revision4['id']], 1))
 
         # hack: ids generated
         for actual_result in actual_results:
             del actual_result['author']['id']
             del actual_result['committer']['id']
 
         self.assertEqual(len(actual_results), 1)
         self.assertEquals(actual_results[0], self.revision4)
 
     @istest
     def revision_log_by(self):
         # given
         origin_id = self.storage.origin_add_one(self.origin2)
         self.storage.revision_add([self.revision3,
                                    self.revision4])
 
         # occurrence3 targets 'revision4'
         # with branch 'master' and origin origin_id
         occurrence3 = self.occurrence3.copy()
         date_visit1 = self.date_visit3
         origin_visit1 = self.storage.origin_visit_add(origin_id,
                                                       date_visit1)
         occurrence3.update({
             'origin': origin_id,
             'target': self.revision4['id'],
             'visit': origin_visit1['visit'],
         })
 
         self.storage.occurrence_add([occurrence3])
 
         # self.revision4 -is-child-of-> self.revision3
         # when
         actual_results = list(self.storage.revision_log_by(
             origin_id,
             branch_name=occurrence3['branch'],
             timestamp=date_visit1))
 
         # hack: ids generated
         for actual_result in actual_results:
             del actual_result['author']['id']
             del actual_result['committer']['id']
 
         self.assertEqual(len(actual_results), 2)
         self.assertEquals(actual_results[0],
                           self.normalize_entity(self.revision4))
         self.assertEquals(actual_results[1],
                           self.normalize_entity(self.revision3))
 
         # when - 2
         actual_results = list(self.storage.revision_log_by(
             origin_id,
             branch_name=None,
             timestamp=None,
             limit=1))
 
         # then
         for actual_result in actual_results:
             del actual_result['author']['id']
             del actual_result['committer']['id']
 
         self.assertEqual(len(actual_results), 1)
         self.assertEquals(actual_results[0], self.revision4)
 
         # when - 3 (revision not found)
 
         actual_res = list(self.storage.revision_log_by(
             origin_id,
             branch_name='inexistant-branch',
             timestamp=None))
 
         self.assertEquals(actual_res, [])
 
     @staticmethod
     def _short_revision(revision):
         return [revision['id'], revision['parents']]
 
     @istest
     def revision_shortlog(self):
         # given
         # self.revision4 -is-child-of-> self.revision3
         self.storage.revision_add([self.revision3,
                                    self.revision4])
 
         # when
         actual_results = list(self.storage.revision_shortlog(
             [self.revision4['id']]))
 
         self.assertEqual(len(actual_results), 2)  # rev4 -child-> rev3
         self.assertEquals(list(actual_results[0]),
                           self._short_revision(self.revision4))
         self.assertEquals(list(actual_results[1]),
                           self._short_revision(self.revision3))
 
     @istest
     def revision_shortlog_with_limit(self):
         # given
         # self.revision4 -is-child-of-> self.revision3
         self.storage.revision_add([self.revision3,
                                    self.revision4])
         actual_results = list(self.storage.revision_shortlog(
             [self.revision4['id']], 1))
 
         self.assertEqual(len(actual_results), 1)
         self.assertEquals(list(actual_results[0]),
                           self._short_revision(self.revision4))
 
     @istest
     def revision_get(self):
         self.storage.revision_add([self.revision])
 
         actual_revisions = list(self.storage.revision_get(
             [self.revision['id'], self.revision2['id']]))
 
         # when
         del actual_revisions[0]['author']['id']  # hack: ids are generated
         del actual_revisions[0]['committer']['id']
 
         self.assertEqual(len(actual_revisions), 2)
         self.assertEqual(actual_revisions[0],
                          self.normalize_entity(self.revision))
         self.assertIsNone(actual_revisions[1])
 
     @istest
     def revision_get_no_parents(self):
         self.storage.revision_add([self.revision3])
 
         get = list(self.storage.revision_get([self.revision3['id']]))
 
         self.assertEqual(len(get), 1)
         self.assertEqual(get[0]['parents'], [])  # no parents on this one
 
     @istest
     def revision_get_by(self):
         # given
         self.storage.content_add([self.cont2])
         self.storage.directory_add([self.dir2])  # point to self.cont
         self.storage.revision_add([self.revision2])  # points to self.dir
         origin_id = self.storage.origin_add_one(self.origin2)
 
         # occurrence2 points to 'revision2' with branch 'master', we
         # need to point to the right origin
         occurrence2 = self.occurrence2.copy()
         date_visit1 = self.date_visit2
         origin_visit1 = self.storage.origin_visit_add(origin_id, date_visit1)
         occurrence2.update({
             'origin': origin_id,
             'visit': origin_visit1['visit'],
         })
         self.storage.occurrence_add([occurrence2])
 
         # we want only revision 2
         expected_revisions = list(self.storage.revision_get(
             [self.revision2['id']]))
 
         # when
         actual_results = list(self.storage.revision_get_by(
             origin_id,
             occurrence2['branch'],
             None))
 
         self.assertEqual(actual_results[0], expected_revisions[0])
 
         # when (with no branch filtering, it's still ok)
         actual_results = list(self.storage.revision_get_by(
             origin_id,
             None,
             None))
 
         self.assertEqual(actual_results[0], expected_revisions[0])
 
     @istest
     def revision_get_by_multiple_occurrence(self):
         # 2 occurrences pointing to 2 different revisions
         # each occurence have 1 day delta
         # the api must return the revision whose occurrence is the nearest.
 
         # given
         self.storage.content_add([self.cont2])
         self.storage.directory_add([self.dir2])
         self.storage.revision_add([self.revision2, self.revision3])
         origin_id = self.storage.origin_add_one(self.origin2)
 
         # occurrence2 points to 'revision2' with branch 'master', we
         # need to point to the right origin
         date_visit1 = self.date_visit2
         origin_visit1 = self.storage.origin_visit_add(origin_id, date_visit1)
         occurrence2 = self.occurrence2.copy()
         occurrence2.update({
             'origin': origin_id,
             'visit': origin_visit1['visit']
         })
 
         dt = datetime.timedelta(days=1)
         date_visit2 = date_visit1 + dt
         origin_visit2 = self.storage.origin_visit_add(origin_id, date_visit2)
         occurrence3 = self.occurrence2.copy()
         occurrence3.update({
             'origin': origin_id,
             'visit': origin_visit2['visit'],
             'target': self.revision3['id'],
         })
         # 2 occurrences on same revision with lower validity date with 1 day
         # delta
         self.storage.occurrence_add([occurrence2])
         self.storage.occurrence_add([occurrence3])
 
         # when
         actual_results0 = list(self.storage.revision_get_by(
             origin_id,
             occurrence2['branch'],
             date_visit1))
 
         # hack: ids are generated
         del actual_results0[0]['author']['id']
         del actual_results0[0]['committer']['id']
 
         self.assertEquals(len(actual_results0), 1)
         self.assertEqual(actual_results0,
                          [self.normalize_entity(self.revision2)])
 
         # when
         actual_results1 = list(self.storage.revision_get_by(
             origin_id,
             occurrence2['branch'],
             date_visit1 + dt/3))  # closer to first visit
 
         # hack: ids are generated
         del actual_results1[0]['author']['id']
         del actual_results1[0]['committer']['id']
 
         self.assertEquals(len(actual_results1), 1)
         self.assertEqual(actual_results1,
                          [self.normalize_entity(self.revision2)])
 
         # when
         actual_results2 = list(self.storage.revision_get_by(
             origin_id,
             occurrence2['branch'],
             date_visit1 + 2*dt/3))  # closer to second visit
 
         del actual_results2[0]['author']['id']
         del actual_results2[0]['committer']['id']
 
         self.assertEquals(len(actual_results2), 1)
         self.assertEqual(actual_results2,
                          [self.normalize_entity(self.revision3)])
 
         # when
         actual_results3 = list(self.storage.revision_get_by(
             origin_id,
             occurrence3['branch'],
             date_visit2))
 
         # hack: ids are generated
         del actual_results3[0]['author']['id']
         del actual_results3[0]['committer']['id']
 
         self.assertEquals(len(actual_results3), 1)
         self.assertEqual(actual_results3,
                          [self.normalize_entity(self.revision3)])
 
         # when
         actual_results4 = list(self.storage.revision_get_by(
             origin_id,
             None,
             None))
 
         for actual_result in actual_results4:
             del actual_result['author']['id']
             del actual_result['committer']['id']
 
         self.assertEquals(len(actual_results4), 1)
         self.assertCountEqual(actual_results4,
                               [self.normalize_entity(self.revision3)])
 
     @istest
     def release_add(self):
         init_missing = self.storage.release_missing([self.release['id'],
                                                      self.release2['id']])
         self.assertEqual([self.release['id'], self.release2['id']],
                          list(init_missing))
 
         self.storage.release_add([self.release, self.release2])
 
         end_missing = self.storage.release_missing([self.release['id'],
                                                     self.release2['id']])
         self.assertEqual([], list(end_missing))
 
     @istest
     def release_get(self):
         # given
         self.storage.release_add([self.release, self.release2])
 
         # when
         actual_releases = list(self.storage.release_get([self.release['id'],
                                                          self.release2['id']]))
 
         # then
         for actual_release in actual_releases:
             del actual_release['author']['id']  # hack: ids are generated
 
         self.assertEquals([self.normalize_entity(self.release),
                            self.normalize_entity(self.release2)],
                           [actual_releases[0], actual_releases[1]])
 
     @istest
     def release_get_by(self):
         # given
         self.storage.revision_add([self.revision2])  # points to self.dir
         self.storage.release_add([self.release3])
         origin_id = self.storage.origin_add_one(self.origin2)
 
         # occurrence2 points to 'revision2' with branch 'master', we
         # need to point to the right origin
         origin_visit = self.storage.origin_visit_add(origin_id,
                                                      self.date_visit2)
         occurrence2 = self.occurrence2.copy()
         occurrence2.update({
             'origin': origin_id,
             'visit': origin_visit['visit'],
         })
 
         self.storage.occurrence_add([occurrence2])
 
         # we want only revision 2
         expected_releases = list(self.storage.release_get(
             [self.release3['id']]))
 
         # when
         actual_results = list(self.storage.release_get_by(
             occurrence2['origin']))
 
         # then
         self.assertEqual(actual_results[0], expected_releases[0])
 
     @istest
     def origin_add_one(self):
         origin0 = self.storage.origin_get(self.origin)
         self.assertIsNone(origin0)
 
         id = self.storage.origin_add_one(self.origin)
 
         actual_origin = self.storage.origin_get({'url': self.origin['url'],
                                                  'type': self.origin['type']})
         self.assertEqual(actual_origin['id'], id)
 
         id2 = self.storage.origin_add_one(self.origin)
 
         self.assertEqual(id, id2)
 
     @istest
     def origin_add(self):
         origin0 = self.storage.origin_get(self.origin)
         self.assertIsNone(origin0)
 
         id1, id2 = self.storage.origin_add([self.origin, self.origin2])
 
         actual_origin = self.storage.origin_get({
             'url': self.origin['url'],
             'type': self.origin['type'],
         })
         self.assertEqual(actual_origin['id'], id1)
 
         actual_origin2 = self.storage.origin_get({
             'url': self.origin2['url'],
             'type': self.origin2['type'],
         })
         self.assertEqual(actual_origin2['id'], id2)
 
     @istest
     def origin_add_twice(self):
         add1 = self.storage.origin_add([self.origin, self.origin2])
         add2 = self.storage.origin_add([self.origin, self.origin2])
 
         self.assertEqual(add1, add2)
 
     @istest
     def origin_get(self):
         self.assertIsNone(self.storage.origin_get(self.origin))
         id = self.storage.origin_add_one(self.origin)
 
         # lookup per type and url (returns id)
         actual_origin0 = self.storage.origin_get({'url': self.origin['url'],
                                                   'type': self.origin['type']})
         self.assertEqual(actual_origin0['id'], id)
 
         # lookup per id (returns dict)
         actual_origin1 = self.storage.origin_get({'id': id})
 
         self.assertEqual(actual_origin1, {'id': id,
                                           'type': self.origin['type'],
                                           'url': self.origin['url'],
                                           'lister': None,
                                           'project': None})
 
     @istest
     def origin_search(self):
         found_origins = list(self.storage.origin_search(self.origin['url']))
         self.assertEqual(len(found_origins), 0)
 
         found_origins = list(self.storage.origin_search(self.origin['url'],
                                                         regexp=True))
         self.assertEqual(len(found_origins), 0)
 
         id = self.storage.origin_add_one(self.origin)
         origin_data = {'id': id,
                        'type': self.origin['type'],
                        'url': self.origin['url'],
                        'lister': None,
                        'project': None}
         found_origins = list(self.storage.origin_search(self.origin['url']))
         self.assertEqual(len(found_origins), 1)
         self.assertEqual(found_origins[0], origin_data)
 
         found_origins = list(self.storage.origin_search(
             '.' + self.origin['url'][1:-1] + '.', regexp=True))
         self.assertEqual(len(found_origins), 1)
         self.assertEqual(found_origins[0], origin_data)
 
         id2 = self.storage.origin_add_one(self.origin2)
         origin2_data = {'id': id2,
                         'type': self.origin2['type'],
                         'url': self.origin2['url'],
                         'lister': None,
                         'project': None}
         found_origins = list(self.storage.origin_search(self.origin2['url']))
         self.assertEqual(len(found_origins), 1)
         self.assertEqual(found_origins[0], origin2_data)
 
         found_origins = list(self.storage.origin_search(
             '.' + self.origin2['url'][1:-1] + '.', regexp=True))
         self.assertEqual(len(found_origins), 1)
         self.assertEqual(found_origins[0], origin2_data)
 
         found_origins = list(self.storage.origin_search('/'))
         self.assertEqual(len(found_origins), 2)
 
         found_origins = list(self.storage.origin_search('.*/.*', regexp=True))
         self.assertEqual(len(found_origins), 2)
 
         found_origins = list(self.storage.origin_search('/', offset=0, limit=1)) # noqa
         self.assertEqual(len(found_origins), 1)
         self.assertEqual(found_origins[0], origin_data)
 
         found_origins = list(self.storage.origin_search('.*/.*', offset=0, limit=1, regexp=True)) # noqa
         self.assertEqual(len(found_origins), 1)
         self.assertEqual(found_origins[0], origin_data)
 
         found_origins = list(self.storage.origin_search('/', offset=1, limit=1)) # noqa
         self.assertEqual(len(found_origins), 1)
         self.assertEqual(found_origins[0], origin2_data)
 
         found_origins = list(self.storage.origin_search('.*/.*', offset=1, limit=1, regexp=True)) # noqa
         self.assertEqual(len(found_origins), 1)
         self.assertEqual(found_origins[0], origin2_data)
 
     @istest
     def origin_visit_add(self):
         # given
         self.assertIsNone(self.storage.origin_get(self.origin2))
 
         origin_id = self.storage.origin_add_one(self.origin2)
         self.assertIsNotNone(origin_id)
 
         # when
         origin_visit1 = self.storage.origin_visit_add(
             origin_id,
             ts=self.date_visit2)
 
         # then
         self.assertEquals(origin_visit1['origin'], origin_id)
         self.assertIsNotNone(origin_visit1['visit'])
         self.assertTrue(origin_visit1['visit'] > 0)
 
         actual_origin_visits = list(self.storage.origin_visit_get(origin_id))
         self.assertEquals(actual_origin_visits,
                           [{
                               'origin': origin_id,
                               'date': self.date_visit2,
                               'visit': origin_visit1['visit'],
                               'status': 'ongoing',
                               'metadata': None,
                           }])
 
     @istest
     def origin_visit_update(self):
         # given
         origin_id = self.storage.origin_add_one(self.origin2)
         origin_id2 = self.storage.origin_add_one(self.origin)
 
         origin_visit1 = self.storage.origin_visit_add(
             origin_id,
             ts=self.date_visit2)
 
         origin_visit2 = self.storage.origin_visit_add(
             origin_id,
             ts=self.date_visit3)
 
         origin_visit3 = self.storage.origin_visit_add(
             origin_id2,
             ts=self.date_visit3)
 
         # when
         visit1_metadata = {
             'contents': 42,
             'directories': 22,
         }
         self.storage.origin_visit_update(
             origin_id, origin_visit1['visit'], status='full',
             metadata=visit1_metadata)
         self.storage.origin_visit_update(origin_id2, origin_visit3['visit'],
                                          status='partial')
 
         # then
         actual_origin_visits = list(self.storage.origin_visit_get(origin_id))
         self.assertEquals(actual_origin_visits,
                           [{
                               'origin': origin_visit2['origin'],
                               'date': self.date_visit2,
                               'visit': origin_visit1['visit'],
                               'status': 'full',
                               'metadata': visit1_metadata,
                           },
                            {
                                'origin': origin_visit2['origin'],
                                'date': self.date_visit3,
                                'visit': origin_visit2['visit'],
                                'status': 'ongoing',
                                'metadata': None,
                            }])
 
         actual_origin_visits_bis = list(self.storage.origin_visit_get(
             origin_id, limit=1))
         self.assertEquals(actual_origin_visits_bis,
                           [{
                               'origin': origin_visit2['origin'],
                               'date': self.date_visit2,
                               'visit': origin_visit1['visit'],
                               'status': 'full',
                               'metadata': visit1_metadata,
                           }])
 
         actual_origin_visits_ter = list(self.storage.origin_visit_get(
             origin_id, last_visit=origin_visit1['visit']))
         self.assertEquals(actual_origin_visits_ter,
                           [{
                                'origin': origin_visit2['origin'],
                                'date': self.date_visit3,
                                'visit': origin_visit2['visit'],
                                'status': 'ongoing',
                                'metadata': None,
                            }])
 
         actual_origin_visits2 = list(self.storage.origin_visit_get(origin_id2))
         self.assertEquals(actual_origin_visits2,
                           [{
                               'origin': origin_visit3['origin'],
                               'date': self.date_visit3,
                               'visit': origin_visit3['visit'],
                               'status': 'partial',
                               'metadata': None,
                           }])
 
     @istest
     def origin_visit_get_by(self):
         origin_id = self.storage.origin_add_one(self.origin2)
         origin_id2 = self.storage.origin_add_one(self.origin)
 
         origin_visit1 = self.storage.origin_visit_add(
             origin_id,
             ts=self.date_visit2)
 
         occurrence2 = self.occurrence2.copy()
         occurrence2.update({
             'origin': origin_id,
             'visit': origin_visit1['visit'],
         })
 
         self.storage.occurrence_add([occurrence2])
 
         # Add some other {origin, visit} entries
         self.storage.origin_visit_add(origin_id, ts=self.date_visit3)
         self.storage.origin_visit_add(origin_id2, ts=self.date_visit3)
 
         # when
         visit1_metadata = {
             'contents': 42,
             'directories': 22,
         }
 
         self.storage.origin_visit_update(
             origin_id, origin_visit1['visit'], status='full',
             metadata=visit1_metadata)
 
         expected_origin_visit = origin_visit1.copy()
         expected_origin_visit.update({
             'origin': origin_id,
             'visit': origin_visit1['visit'],
             'date': self.date_visit2,
             'metadata': visit1_metadata,
             'status': 'full',
             'occurrences': {
                 occurrence2['branch']: {
                     'target': occurrence2['target'],
                     'target_type': occurrence2['target_type'],
                 }
             }
         })
 
         # when
         actual_origin_visit1 = self.storage.origin_visit_get_by(
             origin_visit1['origin'], origin_visit1['visit'])
 
         # then
         self.assertEquals(actual_origin_visit1, expected_origin_visit)
 
     @istest
     def origin_visit_get_by_no_result(self):
         # No result
         actual_origin_visit = self.storage.origin_visit_get_by(
             10, 999)
 
         self.assertIsNone(actual_origin_visit)
 
     @istest
     def occurrence_add(self):
         occur = self.occurrence.copy()
 
         origin_id = self.storage.origin_add_one(self.origin2)
         date_visit1 = self.date_visit1
         origin_visit1 = self.storage.origin_visit_add(origin_id, date_visit1)
 
         revision = self.revision.copy()
         revision['id'] = occur['target']
         self.storage.revision_add([revision])
 
         occur.update({
             'origin': origin_id,
             'visit': origin_visit1['visit'],
         })
         self.storage.occurrence_add([occur])
 
         test_query = '''
         with indiv_occurrences as (
           select origin, branch, target, target_type, unnest(visits) as visit
           from occurrence_history
         )
         select origin, branch, target, target_type, date
         from indiv_occurrences
         left join origin_visit using(origin, visit)
         order by origin, date'''
 
         self.cursor.execute(test_query)
         ret = self.cursor.fetchall()
         self.assertEqual(len(ret), 1)
         self.assertEqual(
             (ret[0][0], ret[0][1].tobytes(), ret[0][2].tobytes(),
              ret[0][3], ret[0][4]),
             (occur['origin'], occur['branch'], occur['target'],
              occur['target_type'], self.date_visit1))
 
         date_visit2 = date_visit1 + datetime.timedelta(hours=10)
 
         origin_visit2 = self.storage.origin_visit_add(origin_id, date_visit2)
         occur2 = occur.copy()
         occur2.update({
             'visit': origin_visit2['visit'],
         })
         self.storage.occurrence_add([occur2])
 
         self.cursor.execute(test_query)
         ret = self.cursor.fetchall()
         self.assertEqual(len(ret), 2)
         self.assertEqual(
             (ret[0][0], ret[0][1].tobytes(), ret[0][2].tobytes(),
              ret[0][3], ret[0][4]),
             (occur['origin'], occur['branch'], occur['target'],
              occur['target_type'], date_visit1))
         self.assertEqual(
             (ret[1][0], ret[1][1].tobytes(), ret[1][2].tobytes(),
              ret[1][3], ret[1][4]),
             (occur2['origin'], occur2['branch'], occur2['target'],
              occur2['target_type'], date_visit2))
 
     @istest
     def occurrence_get(self):
         # given
         occur = self.occurrence.copy()
         origin_id = self.storage.origin_add_one(self.origin2)
         origin_visit1 = self.storage.origin_visit_add(origin_id,
                                                       self.date_visit1)
 
         revision = self.revision.copy()
         revision['id'] = occur['target']
         self.storage.revision_add([revision])
 
         occur.update({
             'origin': origin_id,
             'visit': origin_visit1['visit'],
         })
         self.storage.occurrence_add([occur])
         self.storage.occurrence_add([occur])
 
         # when
         actual_occurrence = list(self.storage.occurrence_get(origin_id))
 
         # then
         expected_occurrence = self.occurrence.copy()
         expected_occurrence.update({
             'origin': origin_id
         })
         self.assertEquals(len(actual_occurrence), 1)
         self.assertEquals(actual_occurrence[0], expected_occurrence)
 
+    @istest
+    def snapshot_add_get_empty(self):
+        origin_id = self.storage.origin_add_one(self.origin)
+        origin_visit1 = self.storage.origin_visit_add(origin_id,
+                                                      self.date_visit1)
+        visit_id = origin_visit1['visit']
+
+        self.storage.snapshot_add(origin_id, visit_id, self.empty_snapshot)
+
+        by_id = self.storage.snapshot_get(self.empty_snapshot['id'])
+        self.assertEqual(by_id, self.empty_snapshot)
+
+        by_ov = self.storage.snapshot_get_by_origin_visit(origin_id, visit_id)
+        self.assertEqual(by_ov, self.empty_snapshot)
+
+    @istest
+    def snapshot_add_get_complete(self):
+        origin_id = self.storage.origin_add_one(self.origin)
+        origin_visit1 = self.storage.origin_visit_add(origin_id,
+                                                      self.date_visit1)
+        visit_id = origin_visit1['visit']
+
+        self.storage.snapshot_add(origin_id, visit_id, self.complete_snapshot)
+
+        by_id = self.storage.snapshot_get(self.complete_snapshot['id'])
+        self.assertEqual(by_id, self.complete_snapshot)
+
+        by_ov = self.storage.snapshot_get_by_origin_visit(origin_id, visit_id)
+        self.assertEqual(by_ov, self.complete_snapshot)
+
+    @istest
+    def snapshot_add_get(self):
+        origin_id = self.storage.origin_add_one(self.origin)
+        origin_visit1 = self.storage.origin_visit_add(origin_id,
+                                                      self.date_visit1)
+        visit_id = origin_visit1['visit']
+
+        self.storage.snapshot_add(origin_id, visit_id, self.snapshot)
+
+        by_id = self.storage.snapshot_get(self.snapshot['id'])
+        self.assertEqual(by_id, self.snapshot)
+
+        by_ov = self.storage.snapshot_get_by_origin_visit(origin_id, visit_id)
+        self.assertEqual(by_ov, self.snapshot)
+
+        # retrocompat test
+        origin_visit_info = self.storage.origin_visit_get_by(origin_id,
+                                                             visit_id)
+        self.assertEqual(origin_visit_info['occurrences'],
+                         self.snapshot['branches'])
+
+    @istest
+    def snapshot_add_twice(self):
+        origin_id = self.storage.origin_add_one(self.origin)
+        origin_visit1 = self.storage.origin_visit_add(origin_id,
+                                                      self.date_visit1)
+        visit1_id = origin_visit1['visit']
+        self.storage.snapshot_add(origin_id, visit1_id, self.snapshot)
+
+        by_ov1 = self.storage.snapshot_get_by_origin_visit(origin_id,
+                                                           visit1_id)
+        self.assertEqual(by_ov1, self.snapshot)
+
+        origin_visit2 = self.storage.origin_visit_add(origin_id,
+                                                      self.date_visit2)
+        visit2_id = origin_visit2['visit']
+
+        self.storage.snapshot_add(origin_id, visit2_id, self.snapshot)
+
+        by_ov2 = self.storage.snapshot_get_by_origin_visit(origin_id,
+                                                           visit2_id)
+        self.assertEqual(by_ov2, self.snapshot)
+
+    @istest
+    def snapshot_get_nonexistent(self):
+        bogus_snapshot_id = b'bogus snapshot id 00'
+        bogus_origin_id = 1
+        bogus_visit_id = 1
+
+        by_id = self.storage.snapshot_get(bogus_snapshot_id)
+        self.assertIsNone(by_id)
+
+        by_ov = self.storage.snapshot_get_by_origin_visit(bogus_origin_id,
+                                                          bogus_visit_id)
+        self.assertIsNone(by_ov)
+
+    @istest
+    def snapshot_get_retrocompat(self):
+        empty_retro_snapshot = {
+            'id': None,
+            'branches': {},
+        }
+        origin_id = self.storage.origin_add_one(self.origin)
+        origin_visit1 = self.storage.origin_visit_add(origin_id,
+                                                      self.date_visit1)
+        visit_id = origin_visit1['visit']
+
+        by_ov = self.storage.snapshot_get_by_origin_visit(origin_id, visit_id)
+
+        self.assertEqual(by_ov, empty_retro_snapshot)
+
+        self.storage.revision_add([self.revision])
+        self.storage.occurrence_add([{
+            'origin': origin_id,
+            'visit': visit_id,
+            'branch': self.occurrence['branch'],
+            'target': self.occurrence['target'],
+            'target_type': self.occurrence['target_type'],
+        }])
+
+        one_branch_retro_snapshot = {
+            'id': None,
+            'branches': {
+                self.occurrence['branch']: {
+                    'target': self.occurrence['target'],
+                    'target_type': self.occurrence['target_type'],
+                },
+            },
+        }
+
+        by_ov = self.storage.snapshot_get_by_origin_visit(origin_id, visit_id)
+        self.assertEqual(by_ov, one_branch_retro_snapshot)
+
     @istest
     def entity_get_from_lister_metadata(self):
         self.storage.entity_add([self.entity1])
 
         fetched_entities = list(
             self.storage.entity_get_from_lister_metadata(
                 [self.entity1_query, self.entity2_query]))
 
         # Entity 1 should have full metadata, with last_seen/last_id instead
         # of validity
         entity1 = self.entity1.copy()
         entity1['last_seen'] = entity1['validity'][0]
         del fetched_entities[0]['last_id']
         del entity1['validity']
         # Entity 2 should have no metadata
         entity2 = {
             'uuid': None,
             'lister_metadata': self.entity2_query.copy(),
         }
 
         self.assertEquals(fetched_entities, [entity1, entity2])
 
     @istest
     def entity_get_from_lister_metadata_twice(self):
         self.storage.entity_add([self.entity1])
 
         fetched_entities1 = list(
             self.storage.entity_get_from_lister_metadata(
                 [self.entity1_query]))
         fetched_entities2 = list(
             self.storage.entity_get_from_lister_metadata(
                 [self.entity1_query]))
 
         self.assertEquals(fetched_entities1, fetched_entities2)
 
     @istest
     def entity_get(self):
         # given
         self.storage.entity_add([self.entity4])
         self.storage.entity_add([self.entity3])
 
         # when: entity3 -child-of-> entity4
         actual_entity3 = list(self.storage.entity_get(self.entity3['uuid']))
 
         self.assertEquals(len(actual_entity3), 2)
         # remove dynamic data (modified by db)
         entity3 = self.entity3.copy()
         entity4 = self.entity4.copy()
         del entity3['validity']
         del entity4['validity']
         del actual_entity3[0]['last_seen']
         del actual_entity3[0]['last_id']
         del actual_entity3[1]['last_seen']
         del actual_entity3[1]['last_id']
         self.assertEquals(actual_entity3, [entity3, entity4])
 
         # when: entity4 only child
         actual_entity4 = list(self.storage.entity_get(self.entity4['uuid']))
 
         self.assertEquals(len(actual_entity4), 1)
         # remove dynamic data (modified by db)
         entity4 = self.entity4.copy()
         del entity4['validity']
         del actual_entity4[0]['last_id']
         del actual_entity4[0]['last_seen']
 
         self.assertEquals(actual_entity4, [entity4])
 
     @istest
     def entity_get_one(self):
         # given
         self.storage.entity_add([self.entity3, self.entity4])
 
         # when: entity3 -child-of-> entity4
         actual_entity3 = self.storage.entity_get_one(self.entity3['uuid'])
 
         # remove dynamic data (modified by db)
         entity3 = self.entity3.copy()
         del entity3['validity']
         del actual_entity3['last_seen']
         del actual_entity3['last_id']
         self.assertEquals(actual_entity3, entity3)
 
     @istest
     def stat_counters(self):
         expected_keys = ['content', 'directory', 'directory_entry_dir',
                          'occurrence', 'origin', 'person', 'revision']
 
         for key in expected_keys:
             self.cursor.execute('select * from swh_update_counter(%s)', (key,))
         self.conn.commit()
 
         counters = self.storage.stat_counters()
 
         self.assertTrue(set(expected_keys) <= set(counters))
         self.assertIsInstance(counters[expected_keys[0]], int)
 
     @istest
     def content_find_with_present_content(self):
         # 1. with something to find
         cont = self.cont
         self.storage.content_add([cont])
 
         actually_present = self.storage.content_find({'sha1': cont['sha1']})
 
         actually_present.pop('ctime')
         self.assertEqual(actually_present, {
             'sha1': cont['sha1'],
             'sha256': cont['sha256'],
             'sha1_git': cont['sha1_git'],
             'blake2s256': cont['blake2s256'],
             'length': cont['length'],
             'status': 'visible'
         })
 
         # 2. with something to find
         actually_present = self.storage.content_find(
             {'sha1_git': cont['sha1_git']})
 
         actually_present.pop('ctime')
         self.assertEqual(actually_present, {
             'sha1': cont['sha1'],
             'sha256': cont['sha256'],
             'sha1_git': cont['sha1_git'],
             'blake2s256': cont['blake2s256'],
             'length': cont['length'],
             'status': 'visible'
         })
 
         # 3. with something to find
         actually_present = self.storage.content_find(
             {'sha256': cont['sha256']})
 
         actually_present.pop('ctime')
         self.assertEqual(actually_present, {
             'sha1': cont['sha1'],
             'sha256': cont['sha256'],
             'sha1_git': cont['sha1_git'],
             'blake2s256': cont['blake2s256'],
             'length': cont['length'],
             'status': 'visible'
         })
 
         # 4. with something to find
         actually_present = self.storage.content_find({
             'sha1': cont['sha1'],
             'sha1_git': cont['sha1_git'],
             'sha256': cont['sha256'],
             'blake2s256': cont['blake2s256'],
         })
 
         actually_present.pop('ctime')
         self.assertEqual(actually_present, {
             'sha1': cont['sha1'],
             'sha256': cont['sha256'],
             'sha1_git': cont['sha1_git'],
             'blake2s256': cont['blake2s256'],
             'length': cont['length'],
             'status': 'visible'
         })
 
     @istest
     def content_find_with_non_present_content(self):
         # 1. with something that does not exist
         missing_cont = self.missing_cont
 
         actually_present = self.storage.content_find(
             {'sha1': missing_cont['sha1']})
 
         self.assertIsNone(actually_present)
 
         # 2. with something that does not exist
         actually_present = self.storage.content_find(
             {'sha1_git': missing_cont['sha1_git']})
 
         self.assertIsNone(actually_present)
 
         # 3. with something that does not exist
         actually_present = self.storage.content_find(
             {'sha256': missing_cont['sha256']})
 
         self.assertIsNone(actually_present)
 
     @istest
     def content_find_bad_input(self):
         # 1. with bad input
         with self.assertRaises(ValueError):
             self.storage.content_find({})  # empty is bad
 
         # 2. with bad input
         with self.assertRaises(ValueError):
             self.storage.content_find(
                 {'unknown-sha1': 'something'})  # not the right key
 
     @istest
     def object_find_by_sha1_git(self):
         sha1_gits = [b'00000000000000000000']
         expected = {
             b'00000000000000000000': [],
         }
 
         self.storage.content_add([self.cont])
         sha1_gits.append(self.cont['sha1_git'])
         expected[self.cont['sha1_git']] = [{
             'sha1_git': self.cont['sha1_git'],
             'type': 'content',
             'id': self.cont['sha1'],
         }]
 
         self.storage.directory_add([self.dir])
         sha1_gits.append(self.dir['id'])
         expected[self.dir['id']] = [{
             'sha1_git': self.dir['id'],
             'type': 'directory',
             'id': self.dir['id'],
         }]
 
         self.storage.revision_add([self.revision])
         sha1_gits.append(self.revision['id'])
         expected[self.revision['id']] = [{
             'sha1_git': self.revision['id'],
             'type': 'revision',
             'id': self.revision['id'],
         }]
 
         self.storage.release_add([self.release])
         sha1_gits.append(self.release['id'])
         expected[self.release['id']] = [{
             'sha1_git': self.release['id'],
             'type': 'release',
             'id': self.release['id'],
         }]
 
         ret = self.storage.object_find_by_sha1_git(sha1_gits)
         for val in ret.values():
             for obj in val:
                 del obj['object_id']
 
         self.assertEqual(expected, ret)
 
     @istest
     def tool_add(self):
         tool = {
             'name': 'some-unknown-tool',
             'version': 'some-version',
             'configuration': {"debian-package": "some-package"},
         }
 
         actual_tool = self.storage.tool_get(tool)
         self.assertIsNone(actual_tool)  # does not exist
 
         # add it
         actual_tools = list(self.storage.tool_add([tool]))
 
         self.assertEquals(len(actual_tools), 1)
         actual_tool = actual_tools[0]
         self.assertIsNotNone(actual_tool)  # now it exists
         new_id = actual_tool.pop('id')
         self.assertEquals(actual_tool, tool)
 
         actual_tools2 = list(self.storage.tool_add([tool]))
         actual_tool2 = actual_tools2[0]
         self.assertIsNotNone(actual_tool2)  # now it exists
         new_id2 = actual_tool2.pop('id')
 
         self.assertEqual(new_id, new_id2)
         self.assertEqual(actual_tool, actual_tool2)
 
     @istest
     def tool_add_multiple(self):
         tool = {
             'name': 'some-unknown-tool',
             'version': 'some-version',
             'configuration': {"debian-package": "some-package"},
         }
 
         actual_tools = list(self.storage.tool_add([tool]))
         self.assertEqual(len(actual_tools), 1)
 
         new_tools = [tool, {
             'name': 'yet-another-tool',
             'version': 'version',
             'configuration': {},
         }]
 
         actual_tools = list(self.storage.tool_add(new_tools))
         self.assertEqual(len(actual_tools), 2)
 
         # order not guaranteed, so we iterate over results to check
         for tool in actual_tools:
             _id = tool.pop('id')
             self.assertIsNotNone(_id)
             self.assertIn(tool, new_tools)
 
     @istest
     def tool_get_missing(self):
         tool = {
             'name': 'unknown-tool',
             'version': '3.1.0rc2-31-ga2cbb8c',
             'configuration': {"command_line": "nomossa <filepath>"},
         }
 
         actual_tool = self.storage.tool_get(tool)
 
         self.assertIsNone(actual_tool)
 
     @istest
     def tool_metadata_get_missing_context(self):
         tool = {
             'name': 'swh-metadata-translator',
             'version': '0.0.1',
             'configuration': {"context": "unknown-context"},
         }
 
         actual_tool = self.storage.tool_get(tool)
 
         self.assertIsNone(actual_tool)
 
     @istest
     def tool_metadata_get(self):
         tool = {
             'name': 'swh-metadata-translator',
             'version': '0.0.1',
             'configuration': {"type": "local", "context": "npm"},
         }
 
         tools = list(self.storage.tool_add([tool]))
         expected_tool = tools[0]
 
         # when
         actual_tool = self.storage.tool_get(tool)
 
         # then
         self.assertEqual(expected_tool, actual_tool)
 
     @istest
     def metadata_provider_get_by(self):
         # given
         no_provider = self.storage.metadata_provider_get_by({
             'provider_name': self.provider['name'],
             'provider_url': self.provider['url']
         })
         self.assertIsNone(no_provider)
         # when
         provider_id = self.storage.metadata_provider_add(
             self.provider['name'],
             self.provider['type'],
             self.provider['url'],
             self.provider['metadata'])
 
         actual_provider = self.storage.metadata_provider_get_by({
             'provider_name': self.provider['name'],
             'provider_url': self.provider['url']
         })
         # then
         self.assertTrue(provider_id, actual_provider['id'])
 
     @istest
     def origin_metadata_add(self):
         # given
         origin_id = self.storage.origin_add([self.origin])[0]
         origin_metadata0 = list(self.storage.origin_metadata_get_by(origin_id))
         self.assertTrue(len(origin_metadata0) == 0)
 
         tools = list(self.storage.tool_add([self.metadata_tool]))
         tool = tools[0]
 
         self.storage.metadata_provider_add(
                            self.provider['name'],
                            self.provider['type'],
                            self.provider['url'],
                            self.provider['metadata'])
         provider = self.storage.metadata_provider_get_by({
                             'provider_name': self.provider['name'],
                             'provider_url': self.provider['url']
                       })
         tool = self.storage.tool_get(self.metadata_tool)
 
         # when adding for the same origin 2 metadatas
         o_m1 = self.storage.origin_metadata_add(
                     origin_id,
                     self.origin_metadata['discovery_date'],
                     provider['id'],
                     tool['id'],
                     self.origin_metadata['metadata'])
         actual_om1 = list(self.storage.origin_metadata_get_by(origin_id))
         # then
         self.assertEqual(actual_om1[0]['id'], o_m1)
         self.assertEqual(len(actual_om1), 1)
         self.assertEqual(actual_om1[0]['origin_id'], origin_id)
 
     @istest
     def origin_metadata_get(self):
         # given
         origin_id = self.storage.origin_add([self.origin])[0]
         origin_id2 = self.storage.origin_add([self.origin2])[0]
 
         self.storage.metadata_provider_add(self.provider['name'],
                                            self.provider['type'],
                                            self.provider['url'],
                                            self.provider['metadata'])
         provider = self.storage.metadata_provider_get_by({
                             'provider_name': self.provider['name'],
                             'provider_url': self.provider['url']
                    })
         tool = self.storage.tool_get(self.metadata_tool)
         # when adding for the same origin 2 metadatas
         o_m1 = self.storage.origin_metadata_add(
                     origin_id,
                     self.origin_metadata['discovery_date'],
                     provider['id'],
                     tool['id'],
                     self.origin_metadata['metadata'])
         o_m2 = self.storage.origin_metadata_add(
                     origin_id2,
                     self.origin_metadata2['discovery_date'],
                     provider['id'],
                     tool['id'],
                     self.origin_metadata2['metadata'])
         o_m3 = self.storage.origin_metadata_add(
                     origin_id,
                     self.origin_metadata2['discovery_date'],
                     provider['id'],
                     tool['id'],
                     self.origin_metadata2['metadata'])
         all_metadatas = list(self.storage.origin_metadata_get_by(origin_id))
         metadatas_for_origin2 = list(self.storage.origin_metadata_get_by(
                                           origin_id2))
         expected_results = [{
             'origin_id': origin_id,
             'discovery_date': datetime.datetime(
                                 2017, 1, 2, 0, 0,
                                 tzinfo=psycopg2.tz.FixedOffsetTimezone(
                                     offset=60,
                                     name=None)),
             'metadata': {
                 'name': 'test_origin_metadata',
                 'version': '0.0.1'
             },
             'id': o_m3,
             'provider_id': provider['id'],
             'provider_name': 'hal',
             'provider_type': 'deposit-client',
             'provider_url': 'http:///hal/inria',
             'tool_id': tool['id']
         }, {
             'origin_id': origin_id,
             'discovery_date': datetime.datetime(
                                 2015, 1, 2, 0, 0,
                                 tzinfo=psycopg2.tz.FixedOffsetTimezone(
                                     offset=60,
                                     name=None)),
             'metadata': {
                 'name': 'test_origin_metadata',
                 'version': '0.0.1'
             },
             'id': o_m1,
             'provider_id': provider['id'],
             'provider_name': 'hal',
             'provider_type': 'deposit-client',
             'provider_url': 'http:///hal/inria',
             'tool_id': tool['id']
         }]
 
         # then
         self.assertEqual(len(all_metadatas), 2)
         self.assertEqual(len(metadatas_for_origin2), 1)
         self.assertEqual(metadatas_for_origin2[0]['id'], o_m2)
         self.assertEqual(all_metadatas, expected_results)
 
     @istest
     def origin_metadata_get_by_provider_type(self):
         # given
         origin_id = self.storage.origin_add([self.origin])[0]
         origin_id2 = self.storage.origin_add([self.origin2])[0]
         self.storage.metadata_provider_add(
                            self.provider['name'],
                            self.provider['type'],
                            self.provider['url'],
                            self.provider['metadata'])
         provider1 = self.storage.metadata_provider_get_by({
                             'provider_name': self.provider['name'],
                             'provider_url': self.provider['url']
                    })
 
         self.storage.metadata_provider_add(
                             'swMATH',
                             'registry',
                             'http://www.swmath.org/',
                             {'email': 'contact@swmath.org',
                              'license': 'All rights reserved'})
         provider2 = self.storage.metadata_provider_get_by({
                             'provider_name': 'swMATH',
                             'provider_url': 'http://www.swmath.org/'
                    })
 
         # using the only tool now inserted in the data.sql, but for this
         # provider should be a crawler tool (not yet implemented)
         tool = self.storage.tool_get(self.metadata_tool)
 
         # when adding for the same origin 2 metadatas
         o_m1 = self.storage.origin_metadata_add(
                     origin_id,
                     self.origin_metadata['discovery_date'],
                     provider1['id'],
                     tool['id'],
                     self.origin_metadata['metadata'])
         o_m2 = self.storage.origin_metadata_add(
                     origin_id2,
                     self.origin_metadata2['discovery_date'],
                     provider2['id'],
                     tool['id'],
                     self.origin_metadata2['metadata'])
         provider_type = 'registry'
         m_by_provider = list(self.storage.
                              origin_metadata_get_by(
                                 origin_id2,
                                 provider_type))
         expected_results = [{
             'origin_id': origin_id2,
             'discovery_date': datetime.datetime(
                                 2017, 1, 2, 0, 0,
                                 tzinfo=psycopg2.tz.FixedOffsetTimezone(
                                     offset=60,
                                     name=None)),
             'metadata': {
                 'name': 'test_origin_metadata',
                 'version': '0.0.1'
             },
             'id': o_m2,
             'provider_id': provider2['id'],
             'provider_name': 'swMATH',
             'provider_type': provider_type,
             'provider_url': 'http://www.swmath.org/',
             'tool_id': tool['id']
         }]
         # then
 
         self.assertEqual(len(m_by_provider), 1)
         self.assertEqual(m_by_provider, expected_results)
         self.assertEqual(m_by_provider[0]['id'], o_m2)
         self.assertIsNotNone(o_m1)
 
 
 class TestLocalStorage(CommonTestStorage, unittest.TestCase):
     """Test the local storage"""
 
     # Can only be tested with local storage as you can't mock
     # datetimes for the remote server
     @istest
     def fetch_history(self):
         origin = self.storage.origin_add_one(self.origin)
         with patch('datetime.datetime'):
             datetime.datetime.now.return_value = self.fetch_history_date
             fetch_history_id = self.storage.fetch_history_start(origin)
             datetime.datetime.now.assert_called_with(tz=datetime.timezone.utc)
 
         with patch('datetime.datetime'):
             datetime.datetime.now.return_value = self.fetch_history_end
             self.storage.fetch_history_end(fetch_history_id,
                                            self.fetch_history_data)
 
         fetch_history = self.storage.fetch_history_get(fetch_history_id)
         expected_fetch_history = self.fetch_history_data.copy()
 
         expected_fetch_history['id'] = fetch_history_id
         expected_fetch_history['origin'] = origin
         expected_fetch_history['date'] = self.fetch_history_date
         expected_fetch_history['duration'] = self.fetch_history_duration
 
         self.assertEqual(expected_fetch_history, fetch_history)
 
     # The remote API doesn't expose _person_add
     @istest
     def person_get(self):
         # given
         person0 = {
             'fullname': b'bob <alice@bob>',
             'name': b'bob',
             'email': b'alice@bob',
         }
         id0 = self.storage._person_add(person0)
 
         person1 = {
             'fullname': b'tony <tony@bob>',
             'name': b'tony',
             'email': b'tony@bob',
         }
         id1 = self.storage._person_add(person1)
 
         # when
         actual_persons = self.storage.person_get([id0, id1])
 
         # given (person injection through release for example)
         self.assertEqual(
             list(actual_persons), [
                 {
                     'id': id0,
                     'fullname': person0['fullname'],
                     'name': person0['name'],
                     'email': person0['email'],
                 },
                 {
                     'id': id1,
                     'fullname': person1['fullname'],
                     'name': person1['name'],
                     'email': person1['email'],
                 },
             ])
 
 
 class AlteringSchemaTest(BaseTestStorage, unittest.TestCase):
     """This class is dedicated for the rare case where the schema needs to
        be altered dynamically.
 
        Otherwise, the tests could be blocking when ran altogether.
 
     """
     @istest
     def content_update(self):
         cont = copy.deepcopy(self.cont)
 
         self.storage.content_add([cont])
         # alter the sha1_git for example
         cont['sha1_git'] = hash_to_bytes(
             '3a60a5275d0333bf13468e8b3dcab90f4046e654')
 
         self.storage.content_update([cont], keys=['sha1_git'])
 
         self.cursor.execute('SELECT sha1, sha1_git, sha256, length, status'
                             ' FROM content WHERE sha1 = %s',
                             (cont['sha1'],))
         datum = self.cursor.fetchone()
         self.assertEqual(
             (datum[0].tobytes(), datum[1].tobytes(), datum[2].tobytes(),
              datum[3], datum[4]),
             (cont['sha1'], cont['sha1_git'], cont['sha256'],
              cont['length'], 'visible'))
 
     @istest
     def content_update_with_new_cols(self):
         self.cursor.execute("""alter table content
                                add column test text default null,
                                add column test2 text default null""")
 
         cont = copy.deepcopy(self.cont2)
         self.storage.content_add([cont])
         cont['test'] = 'value-1'
         cont['test2'] = 'value-2'
 
         self.storage.content_update([cont], keys=['test', 'test2'])
 
         self.cursor.execute(
             'SELECT sha1, sha1_git, sha256, length, status, test, test2'
             ' FROM content WHERE sha1 = %s',
             (cont['sha1'],))
 
         datum = self.cursor.fetchone()
         self.assertEqual(
             (datum[0].tobytes(), datum[1].tobytes(), datum[2].tobytes(),
              datum[3], datum[4], datum[5], datum[6]),
             (cont['sha1'], cont['sha1_git'], cont['sha256'],
              cont['length'], 'visible', cont['test'], cont['test2']))
 
         self.cursor.execute("""alter table content drop column test,
                                                    drop column test2""")