diff --git a/swh/fuse/cache.py b/swh/fuse/cache.py index eb42570..5c4e7ec 100644 --- a/swh/fuse/cache.py +++ b/swh/fuse/cache.py @@ -1,401 +1,403 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from abc import ABC from collections import OrderedDict from dataclasses import dataclass, field from datetime import datetime import json import logging from pathlib import Path import re import sqlite3 import sys from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple import aiosqlite import dateutil.parser from psutil import virtual_memory from swh.fuse.fs.artifact import RevisionHistoryShardByDate from swh.fuse.fs.entry import FuseDirEntry, FuseEntry from swh.fuse.fs.mountpoint import CacheDir, OriginDir from swh.model.exceptions import ValidationError from swh.model.identifiers import REVISION, SWHID, parse_swhid from swh.web.client.client import ORIGIN_VISIT, typify_json async def db_connect(conf: Dict[str, Any]) -> aiosqlite.Connection: # In-memory (thus temporary) caching is useful for testing purposes if conf.get("in-memory", False): path = "file::memory:?cache=shared" uri = True else: path = conf["path"] Path(path).parent.mkdir(parents=True, exist_ok=True) uri = False return await aiosqlite.connect(path, uri=uri, detect_types=sqlite3.PARSE_DECLTYPES) class FuseCache: """SwhFS retrieves both metadata and file contents from the Software Heritage archive via the network. In order to obtain reasonable performances several caches are used to minimize network transfer. Caches are stored on disk in SQLite databases located at `$XDG_CACHE_HOME/swh/fuse/`. All caches are persistent (i.e., they survive the restart of the SwhFS process) and global (i.e., they are shared by concurrent SwhFS processes). We assume that no cache *invalidation* is necessary, due to intrinsic properties of the Software Heritage archive, such as integrity verification and append-only archive changes. To clean the caches one can just remove the corresponding files from disk. """ def __init__(self, cache_conf: Dict[str, Any]): self.cache_conf = cache_conf async def __aenter__(self): self.metadata = await MetadataCache( conf=self.cache_conf["metadata"] ).__aenter__() self.blob = await BlobCache(conf=self.cache_conf["blob"]).__aenter__() # History and raw metadata share the same SQLite db (hence the same connection) self.history = await HistoryCache( conf=self.cache_conf["metadata"], conn=self.metadata.conn ).__aenter__() self.direntry = DirEntryCache(self.cache_conf["direntry"]) return self async def __aexit__(self, type=None, val=None, tb=None) -> None: await self.metadata.__aexit__() await self.blob.__aexit__() await self.history.__aexit__() async def get_cached_swhids(self) -> AsyncGenerator[SWHID, None]: """ Return a list of all previously cached SWHID """ # Use the metadata db since it should always contain all accessed SWHIDs metadata_cursor = await self.metadata.conn.execute( "select swhid from metadata_cache" ) swhids = await metadata_cursor.fetchall() for raw_swhid in swhids: yield parse_swhid(raw_swhid[0]) async def get_cached_visits(self) -> AsyncGenerator[str, None]: """ Return a list of all previously cached visit URL """ cursor = await self.metadata.conn.execute("select url from visits_cache") urls = await cursor.fetchall() for raw_url in urls: yield raw_url[0] class AbstractCache(ABC): """ Abstract cache implementation to share common behavior between cache types """ DB_SCHEMA: str = "" conf: Dict[str, Any] conn: aiosqlite.Connection def __init__( self, conf: Dict[str, Any], conn: Optional[aiosqlite.Connection] = None ): self.conf = conf self.init_conn = conn async def __aenter__(self): if self.init_conn is None: self.conn = await db_connect(self.conf) else: self.conn = self.init_conn await self.conn.executescript(self.DB_SCHEMA) await self.conn.commit() return self async def __aexit__(self, type=None, val=None, tb=None) -> None: # In case we were given an existing connection, do not close it here if self.init_conn is None: await self.conn.close() class MetadataCache(AbstractCache): """ The metadata cache map each artifact to the complete metadata of the referenced object. This is analogous to what is available in `archive/.json` file (and generally used as data source for returning the content of those files). Artifacts are identified using their SWHIDs, or in the case of origin visits, using their URLs. """ DB_SCHEMA = """ create table if not exists metadata_cache ( swhid text not null primary key, metadata blob, date text ); create table if not exists visits_cache ( url text not null primary key, metadata blob, itime timestamp -- insertion time ); """ async def get(self, swhid: SWHID, typify: bool = True) -> Any: cursor = await self.conn.execute( "select metadata from metadata_cache where swhid=?", (str(swhid),) ) cache = await cursor.fetchone() if cache: metadata = json.loads(cache[0]) return typify_json(metadata, swhid.object_type) if typify else metadata else: return None async def get_visits(self, url_encoded: str) -> Optional[List[Dict[str, Any]]]: cursor = await self.conn.execute( "select metadata, itime from visits_cache where url=?", (url_encoded,), ) cache = await cursor.fetchone() if cache: metadata, itime = cache[0], cache[1] # Force-update cache with (potentially) new origin visits diff = datetime.now() - itime if diff.days >= 1: return None visits = json.loads(metadata) visits_typed = [typify_json(v, ORIGIN_VISIT) for v in visits] return visits_typed else: return None async def set(self, swhid: SWHID, metadata: Any) -> None: # Fill in the date column for revisions (used as cache for history/by-date/) swhid_date = "" if swhid.object_type == REVISION: date = dateutil.parser.parse(metadata["date"]) swhid_date = RevisionHistoryShardByDate.DATE_FMT.format( year=date.year, month=date.month, day=date.day ) await self.conn.execute( "insert into metadata_cache values (?, ?, ?)", (str(swhid), json.dumps(metadata), swhid_date), ) await self.conn.commit() async def set_visits(self, url_encoded: str, visits: List[Dict[str, Any]]) -> None: await self.conn.execute( "insert or replace into visits_cache values (?, ?, ?)", (url_encoded, json.dumps(visits), datetime.now()), ) await self.conn.commit() async def remove(self, swhid: SWHID) -> None: await self.conn.execute( "delete from metadata_cache where swhid=?", (str(swhid),), ) await self.conn.commit() class BlobCache(AbstractCache): """ The blob cache map SWHIDs of type `cnt` to the bytes of their archived content. The blob cache entry for a given content object is populated, at the latest, the first time the object is `read()`-d. It might be populated earlier on due to prefetching, e.g., when a directory pointing to the given content is listed for the first time. """ DB_SCHEMA = """ create table if not exists blob_cache ( swhid text not null primary key, blob blob ); """ async def get(self, swhid: SWHID) -> Optional[bytes]: cursor = await self.conn.execute( "select blob from blob_cache where swhid=?", (str(swhid),) ) cache = await cursor.fetchone() if cache: blob = cache[0] return blob else: return None async def set(self, swhid: SWHID, blob: bytes) -> None: await self.conn.execute( "insert into blob_cache values (?, ?)", (str(swhid), blob) ) await self.conn.commit() async def remove(self, swhid: SWHID) -> None: await self.conn.execute( "delete from blob_cache where swhid=?", (str(swhid),), ) await self.conn.commit() class HistoryCache(AbstractCache): """ The history cache map SWHIDs of type `rev` to a list of `rev` SWHIDs corresponding to all its revision ancestors, sorted in reverse topological order. As the parents cache, the history cache is lazily populated and can be prefetched. To efficiently store the ancestor lists, the history cache represents ancestors as graph edges (a pair of two SWHID nodes), meaning the history cache is shared amongst all revisions parents. """ DB_SCHEMA = """ create table if not exists history_graph ( src text not null, dst text not null, unique(src, dst) ); create index if not exists idx_history on history_graph(src); """ HISTORY_REC_QUERY = """ with recursive dfs(node) AS ( values(?) union select history_graph.dst from history_graph join dfs on history_graph.src = dfs.node ) -- Do not keep the root node since it is not an ancestor select * from dfs limit -1 offset 1 """ async def get(self, swhid: SWHID) -> Optional[List[SWHID]]: cursor = await self.conn.execute(self.HISTORY_REC_QUERY, (str(swhid),),) cache = await cursor.fetchall() if not cache: return None history = [] for row in cache: parent = row[0] try: history.append(parse_swhid(parent)) except ValidationError: logging.warning("Cannot parse object from history cache: %s", parent) return history async def get_with_date_prefix( self, swhid: SWHID, date_prefix: str ) -> List[Tuple[SWHID, str]]: cursor = await self.conn.execute( f""" select swhid, date from ( {self.HISTORY_REC_QUERY} ) as history join metadata_cache on history.node = metadata_cache.swhid where metadata_cache.date like '{date_prefix}%' """, (str(swhid),), ) cache = await cursor.fetchall() if not cache: return [] history = [] for row in cache: parent, date = row[0], row[1] try: history.append((parse_swhid(parent), date)) except ValidationError: logging.warning("Cannot parse object from history cache: %s", parent) return history async def set(self, history: str) -> None: history = history.strip() if history: edges = [edge.split(" ") for edge in history.split("\n")] await self.conn.executemany( "insert or ignore into history_graph values (?, ?)", edges ) await self.conn.commit() class DirEntryCache: """ The direntry cache map inode representing directories to the entries they contain. Each entry comes with its name as well as file attributes (i.e., all its needed to perform a detailed directory listing). Additional attributes of each directory entry should be looked up on a entry by entry basis, possibly hitting other caches. The direntry cache for a given dir is populated, at the latest, when the content of the directory is listed. More aggressive prefetching might happen. For instance, when first opening a dir a recursive listing of it can be retrieved from the remote backend and used to recursively populate the direntry cache for all (transitive) sub-directories. """ @dataclass class LRU(OrderedDict): max_ram: int used_ram: int = field(init=False, default=0) def sizeof(self, value: Any) -> int: # Rough size estimate in bytes for a list of entries return len(value) * 1000 def __getitem__(self, key: Any) -> Any: value = super().__getitem__(key) self.move_to_end(key) return value + def __delitem__(self, key: Any) -> None: + self.used_ram -= self.sizeof(self[key]) + super().__delitem__(key) + def __setitem__(self, key: Any, value: Any) -> None: if key in self: self.move_to_end(key) else: self.used_ram += self.sizeof(value) super().__setitem__(key, value) while self.used_ram > self.max_ram and self: oldest = next(iter(self)) - self.used_ram -= self.sizeof(oldest) del self[oldest] def __init__(self, conf: Dict[str, Any]): m = re.match(r"(\d+)\s*(.+)\s*", conf["maxram"]) if not m: logging.error("Cannot parse direntry maxram config: %s", conf["maxram"]) sys.exit(1) num = float(m.group(1)) unit = m.group(2).upper() if unit == "%": max_ram = int(num * virtual_memory().available / 100) else: units = {"B": 1, "KB": 10 ** 3, "MB": 10 ** 6, "GB": 10 ** 9} max_ram = int(float(num) * units[unit]) self.lru_cache = self.LRU(max_ram) def get(self, direntry: FuseDirEntry) -> Optional[List[FuseEntry]]: return self.lru_cache.get(direntry.inode, None) def set(self, direntry: FuseDirEntry, entries: List[FuseEntry]) -> None: if isinstance(direntry, (CacheDir, CacheDir.ArtifactShardBySwhid, OriginDir)): # The `cache/` and `origin/` directories are populated on the fly pass - elif ( - isinstance(direntry, RevisionHistoryShardByDate) - and not direntry.is_status_done - ): - # The `by-date/' directory is populated in parallel so only cache it - # once it has finished fetching all data from the API - pass else: self.lru_cache[direntry.inode] = entries + + def invalidate(self, direntry: FuseDirEntry) -> None: + try: + del self.lru_cache[direntry.inode] + except KeyError: + pass diff --git a/swh/fuse/fs/artifact.py b/swh/fuse/fs/artifact.py index b340a52..ab051fa 100644 --- a/swh/fuse/fs/artifact.py +++ b/swh/fuse/fs/artifact.py @@ -1,618 +1,632 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import asyncio from dataclasses import dataclass, field import json import logging import os from pathlib import Path import re from typing import Any, AsyncIterator, Dict, List from swh.fuse.fs.entry import ( EntryMode, FuseDirEntry, FuseEntry, FuseFileEntry, FuseSymlinkEntry, ) from swh.model.from_disk import DentryPerms from swh.model.identifiers import CONTENT, DIRECTORY, RELEASE, REVISION, SNAPSHOT, SWHID SWHID_REGEXP = r"swh:1:(cnt|dir|rel|rev|snp):[0-9a-f]{40}" @dataclass class Content(FuseFileEntry): """ Software Heritage content artifact. Attributes: swhid: Software Heritage persistent identifier prefetch: optional prefetched metadata used to set entry attributes Content leaves (AKA blobs) are represented on disks as regular files, containing the corresponding bytes, as archived. Note that permissions are associated to blobs only in the context of directories. Hence, when accessing blobs from the top-level `archive/` directory, the permissions of the `archive/SWHID` file will be arbitrary and not meaningful (e.g., `0x644`). """ swhid: SWHID prefetch: Any = None async def get_content(self) -> bytes: data = await self.fuse.get_blob(self.swhid) if not self.prefetch: self.prefetch = {"length": len(data)} return data async def size(self) -> int: if self.prefetch: return self.prefetch["length"] else: return await super().size() @dataclass class Directory(FuseDirEntry): """ Software Heritage directory artifact. Attributes: swhid: Software Heritage persistent identifier Directory nodes are represented as directories on the file-system, containing one entry for each entry of the archived directory. Entry names and other metadata, including permissions, will correspond to the archived entry metadata. Note that the FUSE mount is read-only, no matter what the permissions say. So it is possible that, in the context of a directory, a file is presented as writable, whereas actually writing to it will fail with `EPERM`. """ swhid: SWHID async def compute_entries(self) -> AsyncIterator[FuseEntry]: metadata = await self.fuse.get_metadata(self.swhid) for entry in metadata: name = entry["name"] swhid = entry["target"] mode = ( # Archived permissions for directories are always set to # 0o040000 so use a read-only permission instead int(EntryMode.RDONLY_DIR) if swhid.object_type == DIRECTORY else entry["perms"] ) # 1. Symlink (check symlink first because condition is less restrictive) if mode == DentryPerms.symlink: target = "" try: # Symlink target is stored in the blob content target = await self.fuse.get_blob(swhid) except Exception: pass # Ignore error and create a (broken) symlink anyway yield self.create_child( FuseSymlinkEntry, name=name, target=target, ) # 2. Regular file elif swhid.object_type == CONTENT: yield self.create_child( Content, name=name, mode=mode, swhid=swhid, # The directory API has extra info we can use to set # attributes without additional Software Heritage API call prefetch=entry, ) # 3. Regular directory elif swhid.object_type == DIRECTORY: yield self.create_child( Directory, name=name, mode=mode, swhid=swhid, ) # 4. Submodule elif swhid.object_type == REVISION: try: # Make sure the revision metadata is fetched and create a # symlink to distinguish it with regular directories await self.fuse.get_metadata(swhid) except Exception: pass # Ignore error and create a (broken) symlink anyway yield self.create_child( FuseSymlinkEntry, name=name, target=Path(self.get_relative_root_path(), f"archive/{swhid}"), ) else: raise ValueError("Unknown directory entry type: {swhid.object_type}") @dataclass class Revision(FuseDirEntry): """ Software Heritage revision artifact. Attributes: swhid: Software Heritage persistent identifier Revision (AKA commit) nodes are represented on the file-system as directories with the following entries: - `root`: source tree at the time of the commit, as a symlink pointing into `archive/`, to a SWHID of type `dir` - `parents/` (note the plural): a virtual directory containing entries named `1`, `2`, `3`, etc., one for each parent commit. Each of these entry is a symlink pointing into `archive/`, to the SWHID file for the given parent commit - `parent` (note the singular): present if and only if the current commit has at least one parent commit (which is the most common case). When present it is a symlink pointing into `parents/1/` - `history`: a virtual directory listing all its revision ancestors, sorted in reverse topological order. The history can be listed through `by-date/`, `by-hash/` or `by-page/` with each its own sharding policy. - `meta.json`: metadata for the current node, as a symlink pointing to the relevant `archive/.json` file """ swhid: SWHID async def compute_entries(self) -> AsyncIterator[FuseEntry]: metadata = await self.fuse.get_metadata(self.swhid) directory = metadata["directory"] parents = metadata["parents"] root_path = self.get_relative_root_path() yield self.create_child( FuseSymlinkEntry, name="root", target=Path(root_path, f"archive/{directory}"), ) yield self.create_child( FuseSymlinkEntry, name="meta.json", target=Path(root_path, f"archive/{self.swhid}.json"), ) yield self.create_child( RevisionParents, name="parents", mode=int(EntryMode.RDONLY_DIR), parents=[x["id"] for x in parents], ) if len(parents) >= 1: yield self.create_child( FuseSymlinkEntry, name="parent", target="parents/1/", ) yield self.create_child( RevisionHistory, name="history", mode=int(EntryMode.RDONLY_DIR), swhid=self.swhid, ) @dataclass class RevisionParents(FuseDirEntry): """ Revision virtual `parents/` directory """ parents: List[SWHID] async def compute_entries(self) -> AsyncIterator[FuseEntry]: root_path = self.get_relative_root_path() for i, parent in enumerate(self.parents): yield self.create_child( FuseSymlinkEntry, name=str(i + 1), target=Path(root_path, f"archive/{parent}"), ) @dataclass class RevisionHistory(FuseDirEntry): """ Revision virtual `history/` directory """ swhid: SWHID - async def prefill_caches(self) -> None: + async def prefill_by_date_cache(self, by_date_dir: FuseDirEntry) -> None: history = await self.fuse.get_history(self.swhid) + nb_api_calls = 0 for swhid in history: + cache = await self.fuse.cache.metadata.get(swhid) + if cache: + continue + await self.fuse.get_metadata(swhid) + # The by-date/ directory is cached temporarily in direntry, and + # invalidated + updated every 100 API calls + nb_api_calls += 1 + if nb_api_calls % 100 == 0: + self.fuse.cache.direntry.invalidate(by_date_dir) + # Make sure to have the latest entries once the prefilling is done + self.fuse.cache.direntry.invalidate(by_date_dir) async def compute_entries(self) -> AsyncIterator[FuseEntry]: - # Run it concurrently because of the many API calls necessary - asyncio.create_task(self.prefill_caches()) - - yield self.create_child( + by_date_dir = self.create_child( RevisionHistoryShardByDate, name="by-date", mode=int(EntryMode.RDONLY_DIR), history_swhid=self.swhid, ) + # Run it concurrently because of the many API calls necessary + asyncio.create_task(self.prefill_by_date_cache(by_date_dir)) + + yield by_date_dir + yield self.create_child( RevisionHistoryShardByHash, name="by-hash", mode=int(EntryMode.RDONLY_DIR), history_swhid=self.swhid, ) yield self.create_child( RevisionHistoryShardByPage, name="by-page", mode=int(EntryMode.RDONLY_DIR), history_swhid=self.swhid, ) @dataclass class RevisionHistoryShardByDate(FuseDirEntry): """ Revision virtual `history/by-date` sharded directory """ history_swhid: SWHID prefix: str = field(default="") is_status_done: bool = field(default=False) DATE_FMT = "{year:04d}/{month:02d}/{day:02d}/" ENTRIES_REGEXP = re.compile(r"^([0-9]{2,4})|(" + SWHID_REGEXP + ")$") @dataclass class StatusFile(FuseFileEntry): """ Temporary file used to indicate loading progress in by-date/ """ name: str = field(init=False, default=".status") mode: int = field(init=False, default=int(EntryMode.RDONLY_FILE)) history_swhid: SWHID def __post_init__(self): super().__post_init__() # This is the only case where we do not want the kernel to cache the file self.file_info_attrs["keep_cache"] = False self.file_info_attrs["direct_io"] = True async def get_content(self) -> bytes: history_full = await self.fuse.get_history(self.history_swhid) history_cached = await self.fuse.cache.history.get_with_date_prefix( self.history_swhid, date_prefix="" ) fmt = f"Done: {len(history_cached)}/{len(history_full)}\n" return fmt.encode() def __post_init__(self): super().__post_init__() # Create the status file only once so we can easily remove it when the # entire history is fetched self.status_file = self.create_child( RevisionHistoryShardByDate.StatusFile, history_swhid=self.history_swhid ) async def compute_entries(self) -> AsyncIterator[FuseEntry]: history_full = await self.fuse.get_history(self.history_swhid) # Only check for cached revisions with the appropriate prefix, since # fetching all of them with the Web API would take too long history_cached = await self.fuse.cache.history.get_with_date_prefix( self.history_swhid, date_prefix=self.prefix ) depth = self.prefix.count("/") root_path = self.get_relative_root_path() sharded_dirs = set() for (swhid, sharded_name) in history_cached: if not sharded_name.startswith(self.prefix): continue if depth == 3: yield self.create_child( FuseSymlinkEntry, name=str(swhid), target=Path(root_path, f"archive/{swhid}"), ) # Create sharded directories else: next_prefix = sharded_name.split("/")[depth] if next_prefix not in sharded_dirs: sharded_dirs.add(next_prefix) yield self.create_child( RevisionHistoryShardByDate, name=next_prefix, mode=int(EntryMode.RDONLY_DIR), prefix=f"{self.prefix}{next_prefix}/", history_swhid=self.history_swhid, ) self.is_status_done = len(history_cached) == len(history_full) if self.is_status_done: self.fuse._remove_inode(self.status_file.inode) elif not self.is_status_done and depth == 0: yield self.status_file @dataclass class RevisionHistoryShardByHash(FuseDirEntry): """ Revision virtual `history/by-hash` sharded directory """ history_swhid: SWHID prefix: str = field(default="") SHARDING_LENGTH = 2 ENTRIES_REGEXP = re.compile(r"^([a-f0-9]+)|(" + SWHID_REGEXP + ")$") async def compute_entries(self) -> AsyncIterator[FuseEntry]: history = await self.fuse.get_history(self.history_swhid) if self.prefix: root_path = self.get_relative_root_path() for swhid in history: if swhid.object_id.startswith(self.prefix): yield self.create_child( FuseSymlinkEntry, name=str(swhid), target=Path(root_path, f"archive/{swhid}"), ) # Create sharded directories else: sharded_dirs = set() for swhid in history: next_prefix = swhid.object_id[: self.SHARDING_LENGTH] if next_prefix not in sharded_dirs: sharded_dirs.add(next_prefix) yield self.create_child( RevisionHistoryShardByHash, name=next_prefix, mode=int(EntryMode.RDONLY_DIR), prefix=next_prefix, history_swhid=self.history_swhid, ) @dataclass class RevisionHistoryShardByPage(FuseDirEntry): """ Revision virtual `history/by-page` sharded directory """ history_swhid: SWHID prefix: int = field(default=None) PAGE_SIZE = 10_000 PAGE_FMT = "{page_number:03d}" ENTRIES_REGEXP = re.compile(r"^([0-9]+)|(" + SWHID_REGEXP + ")$") async def compute_entries(self) -> AsyncIterator[FuseEntry]: history = await self.fuse.get_history(self.history_swhid) if self.prefix is not None: current_page = self.prefix root_path = self.get_relative_root_path() max_idx = min(len(history), (current_page + 1) * self.PAGE_SIZE) for i in range(current_page * self.PAGE_SIZE, max_idx): swhid = history[i] yield self.create_child( FuseSymlinkEntry, name=str(swhid), target=Path(root_path, f"archive/{swhid}"), ) # Create sharded directories else: for i in range(0, len(history), self.PAGE_SIZE): page_number = i // self.PAGE_SIZE yield self.create_child( RevisionHistoryShardByPage, name=self.PAGE_FMT.format(page_number=page_number), mode=int(EntryMode.RDONLY_DIR), history_swhid=self.history_swhid, prefix=page_number, ) @dataclass class Release(FuseDirEntry): """ Software Heritage release artifact. Attributes: swhid: Software Heritage persistent identifier Release nodes are represented on the file-system as directories with the following entries: - `target`: target node, as a symlink to `archive/` - `target_type`: regular file containing the type of the target SWHID - `root`: present if and only if the release points to something that (transitively) resolves to a directory. When present it is a symlink pointing into `archive/` to the SWHID of the given directory - `meta.json`: metadata for the current node, as a symlink pointing to the relevant `archive/.json` file """ swhid: SWHID async def find_root_directory(self, swhid: SWHID) -> SWHID: if swhid.object_type == RELEASE: metadata = await self.fuse.get_metadata(swhid) return await self.find_root_directory(metadata["target"]) elif swhid.object_type == REVISION: metadata = await self.fuse.get_metadata(swhid) return metadata["directory"] elif swhid.object_type == DIRECTORY: return swhid else: return None async def compute_entries(self) -> AsyncIterator[FuseEntry]: metadata = await self.fuse.get_metadata(self.swhid) root_path = self.get_relative_root_path() yield self.create_child( FuseSymlinkEntry, name="meta.json", target=Path(root_path, f"archive/{self.swhid}.json"), ) target = metadata["target"] yield self.create_child( FuseSymlinkEntry, name="target", target=Path(root_path, f"archive/{target}") ) yield self.create_child( ReleaseType, name="target_type", mode=int(EntryMode.RDONLY_FILE), target_type=target.object_type, ) target_dir = await self.find_root_directory(target) if target_dir is not None: yield self.create_child( FuseSymlinkEntry, name="root", target=Path(root_path, f"archive/{target_dir}"), ) @dataclass class ReleaseType(FuseFileEntry): """ Release type virtual file """ target_type: str async def get_content(self) -> bytes: return str.encode(self.target_type + "\n") @dataclass class Snapshot(FuseDirEntry): """ Software Heritage snapshot artifact. Attributes: swhid: Software Heritage persistent identifier Snapshot nodes are represented on the file-system as recursive directories following the branch names structure. For example, a branch named ``refs/tags/v1.0`` will be represented as a ``refs`` directory containing a ``tags`` directory containing a ``v1.0`` symlink pointing to the branch target SWHID. """ swhid: SWHID prefix: str = field(default="") async def compute_entries(self) -> AsyncIterator[FuseEntry]: metadata = await self.fuse.get_metadata(self.swhid) root_path = self.get_relative_root_path() subdirs = set() for branch_name, branch_meta in metadata.items(): if not branch_name.startswith(self.prefix): continue next_subdirs = branch_name[len(self.prefix) :].split("/") next_prefix = next_subdirs[0] if len(next_subdirs) == 1: # Non-alias targets are symlinks to their corresponding archived # artifact, whereas alias targets are relative symlinks to the # corresponding snapshot directory entry. target_type = branch_meta["target_type"] target_raw = branch_meta["target"] if target_type == "alias": prefix = Path(branch_name).parent target = os.path.relpath(target_raw, prefix) else: target = f"{root_path}/archive/{target_raw}" yield self.create_child( FuseSymlinkEntry, name=next_prefix, target=Path(target), ) else: subdirs.add(next_prefix) for subdir in subdirs: yield self.create_child( Snapshot, name=subdir, mode=int(EntryMode.RDONLY_DIR), swhid=self.swhid, prefix=f"{self.prefix}{subdir}/", ) @dataclass class Origin(FuseDirEntry): """ Software Heritage origin artifact. Origin nodes are represented on the file-system as directories with one entry for each origin visit. The visits directories are named after the visit date (`YYYY-MM-DD`, if multiple visits occur the same day only the first one is kept). Each visit directory contains a `meta.json` with associated metadata for the origin node, and potentially a `snapshot` symlink pointing to the visit's snapshot node. """ DATE_FMT = "{year:04d}-{month:02d}-{day:02d}" ENTRIES_REGEXP = re.compile(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}$") async def compute_entries(self) -> AsyncIterator[FuseEntry]: # The origin's name is always its URL (encoded to create a valid UNIX filename) visits = await self.fuse.get_visits(self.name) seen_date = set() for visit in visits: date = visit["date"] name = self.DATE_FMT.format(year=date.year, month=date.month, day=date.day) if name in seen_date: logging.debug( "Conflict date on origin: %s, %s", visit["origin"], str(name) ) else: seen_date.add(name) yield self.create_child( OriginVisit, name=name, mode=int(EntryMode.RDONLY_DIR), meta=visit, ) @dataclass class OriginVisit(FuseDirEntry): """ Origin visit virtual directory """ meta: Dict[str, Any] @dataclass class MetaFile(FuseFileEntry): content: str async def get_content(self) -> bytes: return str.encode(self.content + "\n") async def compute_entries(self) -> AsyncIterator[FuseEntry]: snapshot_swhid = self.meta["snapshot"] if snapshot_swhid: root_path = self.get_relative_root_path() yield self.create_child( FuseSymlinkEntry, name="snapshot", target=Path(root_path, f"archive/{snapshot_swhid}"), ) yield self.create_child( OriginVisit.MetaFile, name="meta.json", mode=int(EntryMode.RDONLY_FILE), content=json.dumps( self.meta, indent=self.fuse.conf["json-indent"], default=lambda x: str(x), ), ) OBJTYPE_GETTERS = { CONTENT: Content, DIRECTORY: Directory, REVISION: Revision, RELEASE: Release, SNAPSHOT: Snapshot, }