diff --git a/swh/fuse/cache.py b/swh/fuse/cache.py index f99626b..8c6986b 100644 --- a/swh/fuse/cache.py +++ b/swh/fuse/cache.py @@ -1,407 +1,413 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from abc import ABC from collections import OrderedDict from dataclasses import dataclass, field from datetime import datetime import json import logging from pathlib import Path import re import sqlite3 import sys from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple import aiosqlite import dateutil.parser from psutil import virtual_memory from swh.fuse.fs.artifact import RevisionHistoryShardByDate from swh.fuse.fs.entry import FuseDirEntry, FuseEntry from swh.fuse.fs.mountpoint import CacheDir, OriginDir from swh.model.exceptions import ValidationError from swh.model.swhids import CoreSWHID, ObjectType from swh.web.client.client import ORIGIN_VISIT, typify_json async def db_connect(conf: Dict[str, Any]) -> aiosqlite.Connection: # In-memory (thus temporary) caching is useful for testing purposes if conf.get("in-memory", False): path = "file::memory:?cache=shared" uri = True else: path = conf["path"] Path(path).parent.mkdir(parents=True, exist_ok=True) uri = False return await aiosqlite.connect(path, uri=uri, detect_types=sqlite3.PARSE_DECLTYPES) class FuseCache: """SwhFS retrieves both metadata and file contents from the Software Heritage archive via the network. In order to obtain reasonable performances several caches are used to minimize network transfer. Caches are stored on disk in SQLite databases located at `$XDG_CACHE_HOME/swh/fuse/`. All caches are persistent (i.e., they survive the restart of the SwhFS process) and global (i.e., they are shared by concurrent SwhFS processes). We assume that no cache *invalidation* is necessary, due to intrinsic properties of the Software Heritage archive, such as integrity verification and append-only archive changes. To clean the caches one can just remove the corresponding files from disk. """ def __init__(self, cache_conf: Dict[str, Any]): self.cache_conf = cache_conf async def __aenter__(self): self.metadata = await MetadataCache( conf=self.cache_conf["metadata"] ).__aenter__() self.blob = await BlobCache(conf=self.cache_conf["blob"]).__aenter__() # History and raw metadata share the same SQLite db (hence the same connection) self.history = await HistoryCache( conf=self.cache_conf["metadata"], conn=self.metadata.conn ).__aenter__() self.direntry = DirEntryCache(self.cache_conf["direntry"]) return self async def __aexit__(self, type=None, val=None, tb=None) -> None: await self.metadata.__aexit__() await self.blob.__aexit__() await self.history.__aexit__() async def get_cached_swhids(self) -> AsyncGenerator[CoreSWHID, None]: - """ Return a list of all previously cached SWHID """ + """Return a list of all previously cached SWHID""" # Use the metadata db since it should always contain all accessed SWHIDs metadata_cursor = await self.metadata.conn.execute( "select swhid from metadata_cache" ) swhids = await metadata_cursor.fetchall() for raw_swhid in swhids: yield CoreSWHID.from_string(raw_swhid[0]) async def get_cached_visits(self) -> AsyncGenerator[str, None]: - """ Return a list of all previously cached visit URL """ + """Return a list of all previously cached visit URL""" cursor = await self.metadata.conn.execute("select url from visits_cache") urls = await cursor.fetchall() for raw_url in urls: yield raw_url[0] class AbstractCache(ABC): - """ Abstract cache implementation to share common behavior between cache types """ + """Abstract cache implementation to share common behavior between cache types""" DB_SCHEMA: str = "" conf: Dict[str, Any] conn: aiosqlite.Connection def __init__( self, conf: Dict[str, Any], conn: Optional[aiosqlite.Connection] = None ): self.conf = conf self.init_conn = conn async def __aenter__(self): if self.init_conn is None: self.conn = await db_connect(self.conf) else: self.conn = self.init_conn await self.conn.executescript(self.DB_SCHEMA) await self.conn.commit() return self async def __aexit__(self, type=None, val=None, tb=None) -> None: # In case we were given an existing connection, do not close it here if self.init_conn is None: await self.conn.close() class MetadataCache(AbstractCache): - """ The metadata cache map each artifact to the complete metadata of the + """The metadata cache map each artifact to the complete metadata of the referenced object. This is analogous to what is available in `archive/.json` file (and generally used as data source for returning the content of those files). Artifacts are identified using their SWHIDs, or - in the case of origin visits, using their URLs. """ + in the case of origin visits, using their URLs.""" DB_SCHEMA = """ create table if not exists metadata_cache ( swhid text not null primary key, metadata blob, date text ); create table if not exists visits_cache ( url text not null primary key, metadata blob, itime timestamp -- insertion time ); """ async def get(self, swhid: CoreSWHID, typify: bool = True) -> Any: cursor = await self.conn.execute( "select metadata from metadata_cache where swhid=?", (str(swhid),) ) cache = await cursor.fetchone() if cache: metadata = json.loads(cache[0]) return ( typify_json(metadata, swhid.object_type.name.lower()) if typify else metadata ) else: return None async def get_visits(self, url_encoded: str) -> Optional[List[Dict[str, Any]]]: cursor = await self.conn.execute( - "select metadata, itime from visits_cache where url=?", (url_encoded,), + "select metadata, itime from visits_cache where url=?", + (url_encoded,), ) cache = await cursor.fetchone() if cache: metadata, itime = cache[0], cache[1] # Force-update cache with (potentially) new origin visits diff = datetime.now() - itime if diff.days >= 1: return None visits = json.loads(metadata) visits_typed = [typify_json(v, ORIGIN_VISIT) for v in visits] return visits_typed else: return None async def set(self, swhid: CoreSWHID, metadata: Any) -> None: # Fill in the date column for revisions (used as cache for history/by-date/) swhid_date = "" if swhid.object_type == ObjectType.REVISION: date = dateutil.parser.parse(metadata["date"]) swhid_date = RevisionHistoryShardByDate.DATE_FMT.format( year=date.year, month=date.month, day=date.day ) await self.conn.execute( "insert into metadata_cache values (?, ?, ?)", (str(swhid), json.dumps(metadata), swhid_date), ) await self.conn.commit() async def set_visits(self, url_encoded: str, visits: List[Dict[str, Any]]) -> None: await self.conn.execute( "insert or replace into visits_cache values (?, ?, ?)", (url_encoded, json.dumps(visits), datetime.now()), ) await self.conn.commit() async def remove(self, swhid: CoreSWHID) -> None: await self.conn.execute( - "delete from metadata_cache where swhid=?", (str(swhid),), + "delete from metadata_cache where swhid=?", + (str(swhid),), ) await self.conn.commit() class BlobCache(AbstractCache): - """ The blob cache map SWHIDs of type `cnt` to the bytes of their archived + """The blob cache map SWHIDs of type `cnt` to the bytes of their archived content. The blob cache entry for a given content object is populated, at the latest, the first time the object is `read()`-d. It might be populated earlier on due to prefetching, e.g., when a directory pointing to the given content is - listed for the first time. """ + listed for the first time.""" DB_SCHEMA = """ create table if not exists blob_cache ( swhid text not null primary key, blob blob ); """ async def get(self, swhid: CoreSWHID) -> Optional[bytes]: cursor = await self.conn.execute( "select blob from blob_cache where swhid=?", (str(swhid),) ) cache = await cursor.fetchone() if cache: blob = cache[0] return blob else: return None async def set(self, swhid: CoreSWHID, blob: bytes) -> None: await self.conn.execute( "insert into blob_cache values (?, ?)", (str(swhid), blob) ) await self.conn.commit() async def remove(self, swhid: CoreSWHID) -> None: await self.conn.execute( - "delete from blob_cache where swhid=?", (str(swhid),), + "delete from blob_cache where swhid=?", + (str(swhid),), ) await self.conn.commit() class HistoryCache(AbstractCache): - """ The history cache map SWHIDs of type `rev` to a list of `rev` SWHIDs + """The history cache map SWHIDs of type `rev` to a list of `rev` SWHIDs corresponding to all its revision ancestors, sorted in reverse topological order. As the parents cache, the history cache is lazily populated and can be prefetched. To efficiently store the ancestor lists, the history cache represents ancestors as graph edges (a pair of two SWHID nodes), meaning the - history cache is shared amongst all revisions parents. """ + history cache is shared amongst all revisions parents.""" DB_SCHEMA = """ create table if not exists history_graph ( src text not null, dst text not null, unique(src, dst) ); create index if not exists idx_history on history_graph(src); """ HISTORY_REC_QUERY = """ with recursive dfs(node) AS ( values(?) union select history_graph.dst from history_graph join dfs on history_graph.src = dfs.node ) -- Do not keep the root node since it is not an ancestor select * from dfs limit -1 offset 1 """ async def get(self, swhid: CoreSWHID) -> Optional[List[CoreSWHID]]: - cursor = await self.conn.execute(self.HISTORY_REC_QUERY, (str(swhid),),) + cursor = await self.conn.execute( + self.HISTORY_REC_QUERY, + (str(swhid),), + ) cache = await cursor.fetchall() if not cache: return None history = [] for row in cache: parent = row[0] try: history.append(CoreSWHID.from_string(parent)) except ValidationError: logging.warning("Cannot parse object from history cache: %s", parent) return history async def get_with_date_prefix( self, swhid: CoreSWHID, date_prefix: str ) -> List[Tuple[CoreSWHID, str]]: cursor = await self.conn.execute( f""" select swhid, date from ( {self.HISTORY_REC_QUERY} ) as history join metadata_cache on history.node = metadata_cache.swhid where metadata_cache.date like '{date_prefix}%' """, (str(swhid),), ) cache = await cursor.fetchall() if not cache: return [] history = [] for row in cache: parent, date = row[0], row[1] try: history.append((CoreSWHID.from_string(parent), date)) except ValidationError: logging.warning("Cannot parse object from history cache: %s", parent) return history async def set(self, history: str) -> None: history = history.strip() if history: edges = [edge.split(" ") for edge in history.split("\n")] await self.conn.executemany( "insert or ignore into history_graph values (?, ?)", edges ) await self.conn.commit() class DirEntryCache: - """ The direntry cache map inode representing directories to the entries + """The direntry cache map inode representing directories to the entries they contain. Each entry comes with its name as well as file attributes (i.e., all its needed to perform a detailed directory listing). Additional attributes of each directory entry should be looked up on a entry by entry basis, possibly hitting other caches. The direntry cache for a given dir is populated, at the latest, when the content of the directory is listed. More aggressive prefetching might happen. For instance, when first opening a dir a recursive listing of it can be retrieved from the remote backend and used to recursively populate the - direntry cache for all (transitive) sub-directories. """ + direntry cache for all (transitive) sub-directories.""" @dataclass class LRU(OrderedDict): max_ram: int used_ram: int = field(init=False, default=0) def sizeof(self, value: Any) -> int: # Rough size estimate in bytes for a list of entries return len(value) * 1000 def __getitem__(self, key: Any) -> Any: value = super().__getitem__(key) self.move_to_end(key) return value def __delitem__(self, key: Any) -> None: self.used_ram -= self.sizeof(self[key]) super().__delitem__(key) def __setitem__(self, key: Any, value: Any) -> None: if key in self: self.move_to_end(key) else: self.used_ram += self.sizeof(value) super().__setitem__(key, value) while self.used_ram > self.max_ram and self: oldest = next(iter(self)) del self[oldest] def __init__(self, conf: Dict[str, Any]): m = re.match(r"(\d+)\s*(.+)\s*", conf["maxram"]) if not m: logging.error("Cannot parse direntry maxram config: %s", conf["maxram"]) sys.exit(1) num = float(m.group(1)) unit = m.group(2).upper() if unit == "%": max_ram = int(num * virtual_memory().available / 100) else: - units = {"B": 1, "KB": 10 ** 3, "MB": 10 ** 6, "GB": 10 ** 9} + units = {"B": 1, "KB": 10**3, "MB": 10**6, "GB": 10**9} max_ram = int(float(num) * units[unit]) self.lru_cache = self.LRU(max_ram) def get(self, direntry: FuseDirEntry) -> Optional[List[FuseEntry]]: return self.lru_cache.get(direntry.inode, None) def set(self, direntry: FuseDirEntry, entries: List[FuseEntry]) -> None: if isinstance(direntry, (CacheDir, CacheDir.ArtifactShardBySwhid, OriginDir)): # The `cache/` and `origin/` directories are populated on the fly pass else: self.lru_cache[direntry.inode] = entries def invalidate(self, direntry: FuseDirEntry) -> None: try: del self.lru_cache[direntry.inode] except KeyError: pass diff --git a/swh/fuse/cli.py b/swh/fuse/cli.py index 62474aa..814d419 100644 --- a/swh/fuse/cli.py +++ b/swh/fuse/cli.py @@ -1,217 +1,218 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # WARNING: do not import unnecessary things here to keep cli startup time under # control import os from pathlib import Path from typing import Any, Dict import click from swh.core.cli import CONTEXT_SETTINGS from swh.core.cli import swh as swh_cli_group from swh.model.cli import CoreSWHIDParamType # All generic config code should reside in swh.core.config DEFAULT_CONFIG_PATH = os.environ.get( "SWH_CONFIG_FILE", os.path.join(click.get_app_dir("swh"), "global.yml") ) CACHE_HOME_DIR: Path = ( Path(os.environ["XDG_CACHE_HOME"]) if "XDG_CACHE_HOME" in os.environ else Path.home() / ".cache" ) DEFAULT_CONFIG: Dict[str, Any] = { "cache": { "metadata": {"path": str(CACHE_HOME_DIR / "swh/fuse/metadata.sqlite")}, "blob": {"path": str(CACHE_HOME_DIR / "swh/fuse/blob.sqlite")}, "direntry": {"maxram": "10%"}, }, "web-api": { "url": "https://archive.softwareheritage.org/api/1", "auth-token": None, }, "json-indent": 2, } @swh_cli_group.group(name="fs", context_settings=CONTEXT_SETTINGS) @click.option( "-C", "--config-file", default=None, type=click.Path(exists=True, dir_okay=False, path_type=str), help=f"Configuration file (default: {DEFAULT_CONFIG_PATH})", ) @click.pass_context def fuse(ctx, config_file): """Software Heritage virtual file system""" import logging from shutil import which import yaml from swh.core import config if which("fusermount3") is None: logging.error("Missing dependency: 'fusermount3'") ctx.exit(1) if not config_file: config_file = DEFAULT_CONFIG_PATH if os.path.isfile(config_file): try: conf = config.read_raw_config(config.config_basepath(config_file)) if not conf: raise ValueError(f"Cannot parse configuration file: {config_file}") if config_file == DEFAULT_CONFIG_PATH: try: conf = conf["swh"]["fuse"] except KeyError: pass # recursive merge not done by config.read conf = config.merge_configs(DEFAULT_CONFIG, conf) except Exception: logging.warning( "Using default configuration (cannot load custom one)", exc_info=True ) conf = DEFAULT_CONFIG else: logging.info("Using default configuration") conf = DEFAULT_CONFIG logging.debug("Read configuration: \n%s", yaml.dump(conf)) ctx.ensure_object(dict) ctx.obj["config"] = conf @fuse.command(name="mount") @click.argument( "path", required=True, metavar="PATH", type=click.Path(exists=True, dir_okay=True, file_okay=False), ) @click.argument("swhids", nargs=-1, metavar="[SWHID]...", type=CoreSWHIDParamType()) @click.option( "-f/-d", "--foreground/--daemon", default=False, help=( "whether to run FUSE attached to the console (foreground) " "or daemonized in the background (default: daemon)" ), ) @click.pass_context def mount(ctx, swhids, path, foreground): """Mount the Software Heritage virtual file system at PATH. If specified, objects referenced by the given SWHIDs will be prefetched and used to populate the virtual file system (VFS). Otherwise the VFS will be populated on-demand, when accessing its content. Example: .. code-block:: bash $ mkdir swhfs $ swh fs mount swhfs/ $ grep printf swhfs/archive/swh:1:cnt:c839dea9e8e6f0528b468214348fee8669b305b2 printf("Hello, World!"); $ """ import asyncio from contextlib import ExitStack import logging from daemon import DaemonContext from swh.fuse import LOGGER_NAME, fuse # TODO: set default logging settings when --log-config is not passed # DEFAULT_LOG_PATH = Path(".local/swh/fuse/mount.log") with ExitStack() as stack: if not foreground: # TODO: temporary fix until swh.core has the proper logging utilities # Disable logging config before daemonizing, and reset it once # daemonized to be sure to not close file handlers log_level = logging.getLogger(LOGGER_NAME).getEffectiveLevel() logging.shutdown() # Stay in the current working directory when spawning daemon cwd = os.getcwd() stack.enter_context(DaemonContext(working_directory=cwd)) logging.config.dictConfig( { "version": 1, "handlers": { "syslog": { "class": "logging.handlers.SysLogHandler", "address": "/dev/log", }, }, "loggers": { - LOGGER_NAME: {"level": log_level, "handlers": ["syslog"],}, + LOGGER_NAME: { + "level": log_level, + "handlers": ["syslog"], + }, }, } ) conf = ctx.obj["config"] asyncio.run(fuse.main(swhids, path, conf)) @fuse.command() @click.argument( "path", required=True, metavar="PATH", type=click.Path(exists=True, dir_okay=True, file_okay=False), ) @click.pass_context def umount(ctx, path): """Unmount a mounted virtual file system. Note: this is equivalent to ``fusermount -u PATH``, which can be used to unmount any FUSE-based virtual file system. See ``man fusermount3``. """ import logging import subprocess try: subprocess.run(["fusermount", "-u", path], check=True) except subprocess.CalledProcessError as err: logging.error( "cannot unmount virtual file system: '%s' returned exit status %d", " ".join(err.cmd), err.returncode, ) ctx.exit(1) @fuse.command() @click.pass_context def clean(ctx): - """Clean on-disk cache(s). - - """ + """Clean on-disk cache(s).""" def rm_cache(conf, cache_name): try: Path(conf["cache"][cache_name]["path"]).unlink() except (FileNotFoundError, KeyError): pass conf = ctx.obj["config"] for cache_name in ["blob", "metadata"]: rm_cache(conf, cache_name) diff --git a/swh/fuse/fs/artifact.py b/swh/fuse/fs/artifact.py index 2e8614e..8f5ad96 100644 --- a/swh/fuse/fs/artifact.py +++ b/swh/fuse/fs/artifact.py @@ -1,621 +1,633 @@ # Copyright (C) 2020-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import asyncio from dataclasses import dataclass, field import json import logging import os from pathlib import Path import re from typing import Any, AsyncIterator, Dict, List, Optional, cast from swh.fuse.fs.entry import ( EntryMode, FuseDirEntry, FuseEntry, FuseFileEntry, FuseSymlinkEntry, ) from swh.model.from_disk import DentryPerms from swh.model.hashutil import hash_to_bytes, hash_to_hex from swh.model.swhids import CoreSWHID, ObjectType SWHID_REGEXP = r"swh:1:(cnt|dir|rel|rev|snp):[0-9a-f]{40}" @dataclass class Content(FuseFileEntry): - """ Software Heritage content artifact. + """Software Heritage content artifact. Content leaves (AKA blobs) are represented on disks as regular files, containing the corresponding bytes, as archived. Note that permissions are associated to blobs only in the context of directories. Hence, when accessing blobs from the top-level `archive/` directory, the permissions of the `archive/SWHID` file will be arbitrary and - not meaningful (e.g., `0x644`). """ + not meaningful (e.g., `0x644`).""" swhid: CoreSWHID prefetch: Any = None """optional prefetched metadata used to set entry attributes""" async def get_content(self) -> bytes: data = await self.fuse.get_blob(self.swhid) if not self.prefetch: self.prefetch = {"length": len(data)} return data async def size(self) -> int: if self.prefetch: return self.prefetch["length"] else: return await super().size() @dataclass class Directory(FuseDirEntry): - """ Software Heritage directory artifact. + """Software Heritage directory artifact. Directory nodes are represented as directories on the file-system, containing one entry for each entry of the archived directory. Entry names and other metadata, including permissions, will correspond to the archived entry metadata. Note that the FUSE mount is read-only, no matter what the permissions say. So it is possible that, in the context of a directory, a file is presented - as writable, whereas actually writing to it will fail with `EPERM`. """ + as writable, whereas actually writing to it will fail with `EPERM`.""" swhid: CoreSWHID async def compute_entries(self) -> AsyncIterator[FuseEntry]: metadata = await self.fuse.get_metadata(self.swhid) for entry in metadata: name = entry["name"] swhid = entry["target"] mode = ( # Archived permissions for directories are always set to # 0o040000 so use a read-only permission instead int(EntryMode.RDONLY_DIR) if swhid.object_type == ObjectType.DIRECTORY else entry["perms"] ) # 1. Symlink (check symlink first because condition is less restrictive) if mode == DentryPerms.symlink: target = b"" try: # Symlink target is stored in the blob content target = await self.fuse.get_blob(swhid) except Exception: pass # Ignore error and create a (broken) symlink anyway yield self.create_child( - FuseSymlinkEntry, name=name, target=target, + FuseSymlinkEntry, + name=name, + target=target, ) # 2. Regular file elif swhid.object_type == ObjectType.CONTENT: yield self.create_child( Content, name=name, mode=mode, swhid=swhid, # The directory API has extra info we can use to set # attributes without additional Software Heritage API call prefetch=entry, ) # 3. Regular directory elif swhid.object_type == ObjectType.DIRECTORY: yield self.create_child( - Directory, name=name, mode=mode, swhid=swhid, + Directory, + name=name, + mode=mode, + swhid=swhid, ) # 4. Submodule elif swhid.object_type == ObjectType.REVISION: try: # Make sure the revision metadata is fetched and create a # symlink to distinguish it with regular directories await self.fuse.get_metadata(swhid) except Exception: pass # Ignore error and create a (broken) symlink anyway yield self.create_child( FuseSymlinkEntry, name=name, target=Path(self.get_relative_root_path(), f"archive/{swhid}"), ) else: raise ValueError("Unknown directory entry type: {swhid.object_type}") @dataclass class Revision(FuseDirEntry): - """ Software Heritage revision artifact. + """Software Heritage revision artifact. Revision (AKA commit) nodes are represented on the file-system as directories with the following entries: - `root`: source tree at the time of the commit, as a symlink pointing into `archive/`, to a SWHID of type `dir` - `parents/` (note the plural): a virtual directory containing entries named `1`, `2`, `3`, etc., one for each parent commit. Each of these entry is a symlink pointing into `archive/`, to the SWHID file for the given parent commit - `parent` (note the singular): present if and only if the current commit has at least one parent commit (which is the most common case). When present it is a symlink pointing into `parents/1/` - `history`: a virtual directory listing all its revision ancestors, sorted in reverse topological order. The history can be listed through `by-date/`, `by-hash/` or `by-page/` with each its own sharding policy. - `meta.json`: metadata for the current node, as a symlink pointing to the - relevant `archive/.json` file """ + relevant `archive/.json` file""" swhid: CoreSWHID async def compute_entries(self) -> AsyncIterator[FuseEntry]: metadata = await self.fuse.get_metadata(self.swhid) directory = metadata["directory"] parents = metadata["parents"] root_path = self.get_relative_root_path() yield self.create_child( FuseSymlinkEntry, name="root", target=Path(root_path, f"archive/{directory}"), ) yield self.create_child( FuseSymlinkEntry, name="meta.json", target=Path(root_path, f"archive/{self.swhid}.json"), ) yield self.create_child( RevisionParents, name="parents", mode=int(EntryMode.RDONLY_DIR), parents=[x["id"] for x in parents], ) if len(parents) >= 1: yield self.create_child( - FuseSymlinkEntry, name="parent", target="parents/1/", + FuseSymlinkEntry, + name="parent", + target="parents/1/", ) yield self.create_child( RevisionHistory, name="history", mode=int(EntryMode.RDONLY_DIR), swhid=self.swhid, ) @dataclass class RevisionParents(FuseDirEntry): - """ Revision virtual `parents/` directory """ + """Revision virtual `parents/` directory""" parents: List[CoreSWHID] async def compute_entries(self) -> AsyncIterator[FuseEntry]: root_path = self.get_relative_root_path() for i, parent in enumerate(self.parents): yield self.create_child( FuseSymlinkEntry, name=str(i + 1), target=Path(root_path, f"archive/{parent}"), ) @dataclass class RevisionHistory(FuseDirEntry): - """ Revision virtual `history/` directory """ + """Revision virtual `history/` directory""" swhid: CoreSWHID async def prefill_by_date_cache(self, by_date_dir: FuseDirEntry) -> None: history = await self.fuse.get_history(self.swhid) nb_api_calls = 0 for swhid in history: cache = await self.fuse.cache.metadata.get(swhid) if cache: continue await self.fuse.get_metadata(swhid) # The by-date/ directory is cached temporarily in direntry, and # invalidated + updated every 100 API calls nb_api_calls += 1 if nb_api_calls % 100 == 0: self.fuse.cache.direntry.invalidate(by_date_dir) # Make sure to have the latest entries once the prefilling is done self.fuse.cache.direntry.invalidate(by_date_dir) async def compute_entries(self) -> AsyncIterator[FuseEntry]: by_date_dir = cast( RevisionHistoryShardByDate, self.create_child( RevisionHistoryShardByDate, name="by-date", mode=int(EntryMode.RDONLY_DIR), history_swhid=self.swhid, ), ) # Run it concurrently because of the many API calls necessary asyncio.create_task(self.prefill_by_date_cache(by_date_dir)) yield by_date_dir yield self.create_child( RevisionHistoryShardByHash, name="by-hash", mode=int(EntryMode.RDONLY_DIR), history_swhid=self.swhid, ) yield self.create_child( RevisionHistoryShardByPage, name="by-page", mode=int(EntryMode.RDONLY_DIR), history_swhid=self.swhid, ) @dataclass class RevisionHistoryShardByDate(FuseDirEntry): - """ Revision virtual `history/by-date` sharded directory """ + """Revision virtual `history/by-date` sharded directory""" history_swhid: CoreSWHID prefix: str = field(default="") is_status_done: bool = field(default=False) DATE_FMT = "{year:04d}/{month:02d}/{day:02d}/" ENTRIES_REGEXP = re.compile(r"^([0-9]{2,4})|(" + SWHID_REGEXP + ")$") @dataclass class StatusFile(FuseFileEntry): - """ Temporary file used to indicate loading progress in by-date/ """ + """Temporary file used to indicate loading progress in by-date/""" name: str = field(init=False, default=".status") mode: int = field(init=False, default=int(EntryMode.RDONLY_FILE)) history_swhid: CoreSWHID def __post_init__(self): super().__post_init__() # This is the only case where we do not want the kernel to cache the file self.file_info_attrs["keep_cache"] = False self.file_info_attrs["direct_io"] = True async def get_content(self) -> bytes: history_full = await self.fuse.get_history(self.history_swhid) history_cached = await self.fuse.cache.history.get_with_date_prefix( self.history_swhid, date_prefix="" ) fmt = f"Done: {len(history_cached)}/{len(history_full)}\n" return fmt.encode() def __post_init__(self): super().__post_init__() # Create the status file only once so we can easily remove it when the # entire history is fetched self.status_file = self.create_child( RevisionHistoryShardByDate.StatusFile, history_swhid=self.history_swhid ) async def compute_entries(self) -> AsyncIterator[FuseEntry]: history_full = await self.fuse.get_history(self.history_swhid) # Only check for cached revisions with the appropriate prefix, since # fetching all of them with the Web API would take too long history_cached = await self.fuse.cache.history.get_with_date_prefix( self.history_swhid, date_prefix=self.prefix ) depth = self.prefix.count("/") root_path = self.get_relative_root_path() sharded_dirs = set() for (swhid, sharded_name) in history_cached: if not sharded_name.startswith(self.prefix): continue if depth == 3: yield self.create_child( FuseSymlinkEntry, name=str(swhid), target=Path(root_path, f"archive/{swhid}"), ) # Create sharded directories else: next_prefix = sharded_name.split("/")[depth] if next_prefix not in sharded_dirs: sharded_dirs.add(next_prefix) yield self.create_child( RevisionHistoryShardByDate, name=next_prefix, mode=int(EntryMode.RDONLY_DIR), prefix=f"{self.prefix}{next_prefix}/", history_swhid=self.history_swhid, ) self.is_status_done = len(history_cached) == len(history_full) if self.is_status_done: self.fuse._remove_inode(self.status_file.inode) elif not self.is_status_done and depth == 0: yield self.status_file @dataclass class RevisionHistoryShardByHash(FuseDirEntry): - """ Revision virtual `history/by-hash` sharded directory """ + """Revision virtual `history/by-hash` sharded directory""" history_swhid: CoreSWHID prefix: str = field(default="") SHARDING_LENGTH = 2 ENTRIES_REGEXP = re.compile(r"^([a-f0-9]+)|(" + SWHID_REGEXP + ")$") async def compute_entries(self) -> AsyncIterator[FuseEntry]: history = await self.fuse.get_history(self.history_swhid) if self.prefix: root_path = self.get_relative_root_path() for swhid in history: if swhid.object_id.startswith(hash_to_bytes(self.prefix)): yield self.create_child( FuseSymlinkEntry, name=str(swhid), target=Path(root_path, f"archive/{swhid}"), ) # Create sharded directories else: sharded_dirs = set() for swhid in history: next_prefix = hash_to_hex(swhid.object_id)[: self.SHARDING_LENGTH] if next_prefix not in sharded_dirs: sharded_dirs.add(next_prefix) yield self.create_child( RevisionHistoryShardByHash, name=next_prefix, mode=int(EntryMode.RDONLY_DIR), prefix=next_prefix, history_swhid=self.history_swhid, ) @dataclass class RevisionHistoryShardByPage(FuseDirEntry): - """ Revision virtual `history/by-page` sharded directory """ + """Revision virtual `history/by-page` sharded directory""" history_swhid: CoreSWHID prefix: Optional[int] = field(default=None) PAGE_SIZE = 10_000 PAGE_FMT = "{page_number:03d}" ENTRIES_REGEXP = re.compile(r"^([0-9]+)|(" + SWHID_REGEXP + ")$") async def compute_entries(self) -> AsyncIterator[FuseEntry]: history = await self.fuse.get_history(self.history_swhid) if self.prefix is not None: current_page = self.prefix root_path = self.get_relative_root_path() max_idx = min(len(history), (current_page + 1) * self.PAGE_SIZE) for i in range(current_page * self.PAGE_SIZE, max_idx): swhid = history[i] yield self.create_child( FuseSymlinkEntry, name=str(swhid), target=Path(root_path, f"archive/{swhid}"), ) # Create sharded directories else: for i in range(0, len(history), self.PAGE_SIZE): page_number = i // self.PAGE_SIZE yield self.create_child( RevisionHistoryShardByPage, name=self.PAGE_FMT.format(page_number=page_number), mode=int(EntryMode.RDONLY_DIR), history_swhid=self.history_swhid, prefix=page_number, ) @dataclass class Release(FuseDirEntry): - """ Software Heritage release artifact. + """Software Heritage release artifact. Release nodes are represented on the file-system as directories with the following entries: - `target`: target node, as a symlink to `archive/` - `target_type`: regular file containing the type of the target SWHID - `root`: present if and only if the release points to something that (transitively) resolves to a directory. When present it is a symlink pointing into `archive/` to the SWHID of the given directory - `meta.json`: metadata for the current node, as a symlink pointing to the - relevant `archive/.json` file """ + relevant `archive/.json` file""" swhid: CoreSWHID async def find_root_directory(self, swhid: CoreSWHID) -> Optional[CoreSWHID]: if swhid.object_type == ObjectType.RELEASE: metadata = await self.fuse.get_metadata(swhid) return await self.find_root_directory(metadata["target"]) elif swhid.object_type == ObjectType.REVISION: metadata = await self.fuse.get_metadata(swhid) return metadata["directory"] elif swhid.object_type == ObjectType.DIRECTORY: return swhid else: return None async def compute_entries(self) -> AsyncIterator[FuseEntry]: metadata = await self.fuse.get_metadata(self.swhid) root_path = self.get_relative_root_path() yield self.create_child( FuseSymlinkEntry, name="meta.json", target=Path(root_path, f"archive/{self.swhid}.json"), ) target = metadata["target"] yield self.create_child( FuseSymlinkEntry, name="target", target=Path(root_path, f"archive/{target}") ) yield self.create_child( ReleaseType, name="target_type", mode=int(EntryMode.RDONLY_FILE), target_type=target.object_type, ) target_dir = await self.find_root_directory(target) if target_dir is not None: yield self.create_child( FuseSymlinkEntry, name="root", target=Path(root_path, f"archive/{target_dir}"), ) @dataclass class ReleaseType(FuseFileEntry): - """ Release type virtual file """ + """Release type virtual file""" target_type: ObjectType async def get_content(self) -> bytes: return str.encode(self.target_type.name.lower() + "\n") @dataclass class Snapshot(FuseDirEntry): - """ Software Heritage snapshot artifact. + """Software Heritage snapshot artifact. Snapshot nodes are represented on the file-system as recursive directories following the branch names structure. For example, a branch named ``refs/tags/v1.0`` will be represented as a ``refs`` directory containing a ``tags`` directory containing a ``v1.0`` symlink pointing to the branch - target SWHID. """ + target SWHID.""" swhid: CoreSWHID prefix: str = field(default="") async def compute_entries(self) -> AsyncIterator[FuseEntry]: metadata = await self.fuse.get_metadata(self.swhid) root_path = self.get_relative_root_path() subdirs = set() for branch_name, branch_meta in metadata.items(): if not branch_name.startswith(self.prefix): continue next_subdirs = branch_name[len(self.prefix) :].split("/") next_prefix = next_subdirs[0] if len(next_subdirs) == 1: # Non-alias targets are symlinks to their corresponding archived # artifact, whereas alias targets are relative symlinks to the # corresponding snapshot directory entry. target_type = branch_meta["target_type"] target_raw = branch_meta["target"] if target_type == "alias": prefix = Path(branch_name).parent target = os.path.relpath(target_raw, prefix) else: target = f"{root_path}/archive/{target_raw}" yield self.create_child( - FuseSymlinkEntry, name=next_prefix, target=Path(target), + FuseSymlinkEntry, + name=next_prefix, + target=Path(target), ) else: subdirs.add(next_prefix) for subdir in subdirs: yield self.create_child( Snapshot, name=subdir, mode=int(EntryMode.RDONLY_DIR), swhid=self.swhid, prefix=f"{self.prefix}{subdir}/", ) @dataclass class Origin(FuseDirEntry): - """ Software Heritage origin artifact. + """Software Heritage origin artifact. Origin nodes are represented on the file-system as directories with one entry for each origin visit. The visits directories are named after the visit date (`YYYY-MM-DD`, if multiple visits occur the same day only the first one is kept). Each visit directory contains a `meta.json` with associated metadata for the origin node, and potentially a `snapshot` symlink pointing to the visit's snapshot - node. """ + node.""" DATE_FMT = "{year:04d}-{month:02d}-{day:02d}" ENTRIES_REGEXP = re.compile(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}$") async def compute_entries(self) -> AsyncIterator[FuseEntry]: # The origin's name is always its URL (encoded to create a valid UNIX filename) visits = await self.fuse.get_visits(self.name) seen_date = set() for visit in visits: date = visit["date"] name = self.DATE_FMT.format(year=date.year, month=date.month, day=date.day) if name in seen_date: logging.debug( "Conflict date on origin: %s, %s", visit["origin"], str(name) ) else: seen_date.add(name) yield self.create_child( - OriginVisit, name=name, mode=int(EntryMode.RDONLY_DIR), meta=visit, + OriginVisit, + name=name, + mode=int(EntryMode.RDONLY_DIR), + meta=visit, ) @dataclass class OriginVisit(FuseDirEntry): - """ Origin visit virtual directory """ + """Origin visit virtual directory""" meta: Dict[str, Any] @dataclass class MetaFile(FuseFileEntry): content: str async def get_content(self) -> bytes: return str.encode(self.content + "\n") async def compute_entries(self) -> AsyncIterator[FuseEntry]: snapshot_swhid = self.meta["snapshot"] if snapshot_swhid: root_path = self.get_relative_root_path() yield self.create_child( FuseSymlinkEntry, name="snapshot", target=Path(root_path, f"archive/{snapshot_swhid}"), ) yield self.create_child( OriginVisit.MetaFile, name="meta.json", mode=int(EntryMode.RDONLY_FILE), content=json.dumps( self.meta, indent=self.fuse.conf["json-indent"], default=lambda x: str(x), ), ) OBJTYPE_GETTERS = { ObjectType.CONTENT: Content, ObjectType.DIRECTORY: Directory, ObjectType.REVISION: Revision, ObjectType.RELEASE: Release, ObjectType.SNAPSHOT: Snapshot, } diff --git a/swh/fuse/fs/entry.py b/swh/fuse/fs/entry.py index b7688f2..4222aaf 100644 --- a/swh/fuse/fs/entry.py +++ b/swh/fuse/fs/entry.py @@ -1,147 +1,145 @@ # Copyright (C) 2020-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from __future__ import annotations from dataclasses import dataclass, field from enum import IntEnum from pathlib import Path import re from stat import S_IFDIR, S_IFLNK, S_IFREG from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, Optional, Pattern, Union if TYPE_CHECKING: # avoid cyclic import from swh.fuse.fuse import Fuse class EntryMode(IntEnum): - """ Default entry mode and permissions for the FUSE. + """Default entry mode and permissions for the FUSE. The FUSE mount is always read-only, even if permissions contradict this statement (in a context of a directory, entries are listed with permissions taken from the archive). """ RDONLY_FILE = S_IFREG | 0o444 RDONLY_DIR = S_IFDIR | 0o555 RDONLY_LNK = S_IFLNK | 0o444 # `cache/` sub-directories need the write permission in order to invalidate # cached artifacts using `rm {SWHID}` RDWR_DIR = S_IFDIR | 0o755 @dataclass class FuseEntry: - """ Main wrapper class to manipulate virtual FUSE entries - """ + """Main wrapper class to manipulate virtual FUSE entries""" name: str """entry filename""" mode: int """entry permission mode""" depth: int fuse: Fuse """internal reference to the main FUSE class""" inode: int = field(init=False) """unique integer identifying the entry""" file_info_attrs: Dict[str, Any] = field(init=False, default_factory=dict) def __post_init__(self): self.inode = self.fuse._alloc_inode(self) # By default, let the kernel cache previously accessed data self.file_info_attrs["keep_cache"] = True async def size(self) -> int: - """ Return the size (in bytes) of an entry """ + """Return the size (in bytes) of an entry""" raise NotImplementedError async def unlink(self, name: str) -> None: raise NotImplementedError def get_relative_root_path(self) -> str: return "../" * (self.depth - 1) def create_child(self, constructor: Any, **kwargs) -> FuseEntry: return constructor(depth=self.depth + 1, fuse=self.fuse, **kwargs) @dataclass class FuseFileEntry(FuseEntry): - """ FUSE virtual file entry """ + """FUSE virtual file entry""" async def get_content(self) -> bytes: - """ Return the content of a file entry """ + """Return the content of a file entry""" raise NotImplementedError async def size(self) -> int: return len(await self.get_content()) @dataclass class FuseDirEntry(FuseEntry): - """ FUSE virtual directory entry """ + """FUSE virtual directory entry""" ENTRIES_REGEXP: Optional[Pattern] = field(init=False, default=None) async def size(self) -> int: return 0 def validate_entry(self, name: str) -> bool: - """ Return true if the name matches the directory entries regular - expression, and false otherwise """ + """Return true if the name matches the directory entries regular + expression, and false otherwise""" if self.ENTRIES_REGEXP: return bool(re.match(self.ENTRIES_REGEXP, name)) else: return True async def compute_entries(self): - """ Return the child entries of a directory entry """ + """Return the child entries of a directory entry""" raise NotImplementedError async def get_entries(self, offset: int = 0) -> AsyncIterator[FuseEntry]: - """ Return the child entries of a directory entry using direntry cache """ + """Return the child entries of a directory entry using direntry cache""" cache = self.fuse.cache.direntry.get(self) if cache: entries = cache else: entries = [x async for x in self.compute_entries()] self.fuse.cache.direntry.set(self, entries) # Avoid copy by manual iteration (instead of slicing) and use of a # generator (instead of returning the full list every time) for i in range(offset, len(entries)): yield entries[i] async def lookup(self, name: str) -> Optional[FuseEntry]: - """ Look up a FUSE entry by name """ + """Look up a FUSE entry by name""" async for entry in self.get_entries(): if entry.name == name: return entry return None @dataclass class FuseSymlinkEntry(FuseEntry): - """ FUSE virtual symlink entry - """ + """FUSE virtual symlink entry""" mode: int = field(init=False, default=int(EntryMode.RDONLY_LNK)) target: Union[str, bytes, Path] """path to symlink target""" async def size(self) -> int: return len(str(self.target)) def get_target(self) -> Union[str, bytes, Path]: - """ Return the path target of a symlink entry """ + """Return the path target of a symlink entry""" return self.target diff --git a/swh/fuse/fs/mountpoint.py b/swh/fuse/fs/mountpoint.py index 9741b0d..5439544 100644 --- a/swh/fuse/fs/mountpoint.py +++ b/swh/fuse/fs/mountpoint.py @@ -1,242 +1,244 @@ # Copyright (C) 2020-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from dataclasses import dataclass, field import json from pathlib import Path import re from typing import AsyncIterator, Optional from swh.fuse.fs.artifact import OBJTYPE_GETTERS, SWHID_REGEXP, Origin from swh.fuse.fs.entry import ( EntryMode, FuseDirEntry, FuseEntry, FuseFileEntry, FuseSymlinkEntry, ) from swh.model.exceptions import ValidationError from swh.model.hashutil import hash_to_hex from swh.model.swhids import CoreSWHID, ObjectType JSON_SUFFIX = ".json" @dataclass class Root(FuseDirEntry): - """ The FUSE mountpoint, consisting of the archive/ and origin/ directories """ + """The FUSE mountpoint, consisting of the archive/ and origin/ directories""" name: str = field(init=False, default="") mode: int = field(init=False, default=int(EntryMode.RDONLY_DIR)) depth: int = field(init=False, default=1) async def compute_entries(self) -> AsyncIterator[FuseEntry]: yield self.create_child(ArchiveDir) yield self.create_child(OriginDir) yield self.create_child(CacheDir) yield self.create_child(Readme) @dataclass class ArchiveDir(FuseDirEntry): - """ The `archive/` virtual directory allows to mount any artifact on the fly + """The `archive/` virtual directory allows to mount any artifact on the fly using its SWHID as name. The associated metadata of the artifact from the Software Heritage Web API can also be accessed through the `SWHID.json` file (in case of pagination, the JSON file will contain a complete version with all pages merged together). Note: the archive directory cannot be listed - with ls, but entries in it can be accessed (e.g., using cat or cd). """ + with ls, but entries in it can be accessed (e.g., using cat or cd).""" name: str = field(init=False, default="archive") mode: int = field(init=False, default=int(EntryMode.RDONLY_DIR)) ENTRIES_REGEXP = re.compile(r"^(" + SWHID_REGEXP + ")(.json)?$") async def compute_entries(self) -> AsyncIterator[FuseEntry]: return yield async def lookup(self, name: str) -> Optional[FuseEntry]: # On the fly mounting of a new artifact try: if name.endswith(JSON_SUFFIX): swhid = CoreSWHID.from_string(name[: -len(JSON_SUFFIX)]) return self.create_child( MetaEntry, name=f"{swhid}{JSON_SUFFIX}", mode=int(EntryMode.RDONLY_FILE), swhid=swhid, ) else: swhid = CoreSWHID.from_string(name) await self.fuse.get_metadata(swhid) return self.create_child( OBJTYPE_GETTERS[swhid.object_type], name=str(swhid), mode=int( EntryMode.RDONLY_FILE if swhid.object_type == ObjectType.CONTENT else EntryMode.RDONLY_DIR ), swhid=swhid, ) except ValidationError: return None @dataclass class MetaEntry(FuseFileEntry): - """ An entry for a `archive/.json` file, containing all the SWHID's - metadata from the Software Heritage archive. """ + """An entry for a `archive/.json` file, containing all the SWHID's + metadata from the Software Heritage archive.""" swhid: CoreSWHID async def get_content(self) -> bytes: # Make sure the metadata is in cache await self.fuse.get_metadata(self.swhid) # Retrieve raw JSON metadata from cache (un-typified) metadata = await self.fuse.cache.metadata.get(self.swhid, typify=False) json_str = json.dumps(metadata, indent=self.fuse.conf["json-indent"]) return (json_str + "\n").encode() async def size(self) -> int: return len(await self.get_content()) @dataclass class OriginDir(FuseDirEntry): - """ The origin/ directory is lazily populated with one entry per accessed + """The origin/ directory is lazily populated with one entry per accessed origin URL (mangled to create a valid UNIX filename). The URL encoding is - done using the percent-encoding mechanism described in RFC 3986. """ + done using the percent-encoding mechanism described in RFC 3986.""" name: str = field(init=False, default="origin") mode: int = field(init=False, default=int(EntryMode.RDONLY_DIR)) ENTRIES_REGEXP = re.compile(r"^.*%3A.*$") # %3A is the encoded version of ':' def create_origin_child(self, url_encoded: str) -> FuseEntry: return super().create_child( - Origin, name=url_encoded, mode=int(EntryMode.RDONLY_DIR), + Origin, + name=url_encoded, + mode=int(EntryMode.RDONLY_DIR), ) async def compute_entries(self) -> AsyncIterator[FuseEntry]: async for url in self.fuse.cache.get_cached_visits(): yield self.create_origin_child(url) async def lookup(self, name: str) -> Optional[FuseEntry]: entry = await super().lookup(name) if entry: return entry # On the fly mounting of new origin url try: url_encoded = name await self.fuse.get_visits(url_encoded) return self.create_origin_child(url_encoded) except ValueError: return None @dataclass class CacheDir(FuseDirEntry): - """ The cache/ directory is an on-disk representation of locally cached + """The cache/ directory is an on-disk representation of locally cached objects and metadata. Via this directory you can browse cached data and selectively remove them from the cache, freeing disk space. (See `swh fs clean` in the {ref}`CLI ` to completely empty the cache). The directory is populated with symlinks to: all artifacts, identified by their SWHIDs and sharded by the first two character of their object id, the metadata identified by a `SWHID.json` entry, and the `origin/` directory. """ name: str = field(init=False, default="cache") mode: int = field(init=False, default=int(EntryMode.RDONLY_DIR)) ENTRIES_REGEXP = re.compile(r"^([a-f0-9]{2})|(" + OriginDir.name + ")$") @dataclass class ArtifactShardBySwhid(FuseDirEntry): ENTRIES_REGEXP = re.compile(r"^(" + SWHID_REGEXP + ")$") prefix: str = field(default="") async def compute_entries(self) -> AsyncIterator[FuseEntry]: root_path = self.get_relative_root_path() async for swhid in self.fuse.cache.get_cached_swhids(): if not hash_to_hex(swhid.object_id).startswith(self.prefix): continue yield self.create_child( FuseSymlinkEntry, name=str(swhid), target=Path(root_path, f"archive/{swhid}"), ) yield self.create_child( FuseSymlinkEntry, name=f"{swhid}{JSON_SUFFIX}", target=Path(root_path, f"archive/{swhid}{JSON_SUFFIX}"), ) async def unlink(self, name: str) -> None: try: if name.endswith(JSON_SUFFIX): name = name[: -len(JSON_SUFFIX)] swhid = CoreSWHID.from_string(name) await self.fuse.cache.metadata.remove(swhid) await self.fuse.cache.blob.remove(swhid) except ValidationError: raise async def compute_entries(self) -> AsyncIterator[FuseEntry]: prefixes = set() async for swhid in self.fuse.cache.get_cached_swhids(): prefixes.add(hash_to_hex(swhid.object_id)[:2]) for prefix in prefixes: yield self.create_child( CacheDir.ArtifactShardBySwhid, name=prefix, mode=int(EntryMode.RDWR_DIR), prefix=prefix, ) yield self.create_child( FuseSymlinkEntry, name=OriginDir.name, target=Path(self.get_relative_root_path(), OriginDir.name), ) @dataclass class Readme(FuseFileEntry): - """ Top-level README to explain briefly what is SwhFS. """ + """Top-level README to explain briefly what is SwhFS.""" name: str = field(init=False, default="README") mode: int = field(init=False, default=int(EntryMode.RDONLY_FILE)) CONTENT = """Welcome to the Software Heritage Filesystem (SwhFS)! This is a user-space POSIX filesystem to browse the Software Heritage archive, as if it were locally available. The SwhFS mount point contains 3 directories, all initially empty and lazily populated: - "archive": virtual directory to mount any Software Heritage artifact on the fly using its SWHID as name. Note: this directory cannot be listed with ls, but entries in it can be accessed (e.g., using cat or cd). - "origin": virtual directory to mount any origin using its encoded URL as name. - "cache": on-disk representation of locally cached objects and metadata. Try it yourself: $ cat archive/swh:1:cnt:c839dea9e8e6f0528b468214348fee8669b305b2 #include int main(void) { printf("Hello, World!\\n"); } You can find more details and examples in the SwhFS online documentation: https://docs.softwareheritage.org/devel/swh-fuse/ """ async def get_content(self) -> bytes: return self.CONTENT.encode() diff --git a/swh/fuse/fuse.py b/swh/fuse/fuse.py index 8cedd1f..207bd7e 100644 --- a/swh/fuse/fuse.py +++ b/swh/fuse/fuse.py @@ -1,361 +1,363 @@ # Copyright (C) 2020-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import asyncio import errno import functools import logging import os from pathlib import Path import time from typing import Any, Dict, List import urllib.parse import pyfuse3 import pyfuse3_asyncio import requests from swh.fuse import LOGGER_NAME from swh.fuse.cache import FuseCache from swh.fuse.fs.entry import FuseDirEntry, FuseEntry, FuseFileEntry, FuseSymlinkEntry from swh.fuse.fs.mountpoint import Root from swh.model.swhids import CoreSWHID, ObjectType from swh.web.client.client import WebAPIClient class Fuse(pyfuse3.Operations): - """ Software Heritage Filesystem in Userspace (FUSE). Locally mount parts of - the archive and navigate it as a virtual file system. """ + """Software Heritage Filesystem in Userspace (FUSE). Locally mount parts of + the archive and navigate it as a virtual file system.""" def __init__(self, root_path: Path, cache: FuseCache, conf: Dict[str, Any]): super(Fuse, self).__init__() self._next_inode: int = pyfuse3.ROOT_INODE self._inode2entry: Dict[int, FuseEntry] = {} # The fuse constructor keyword is propagated up to FuseEntry dataclass, but mypy # 0.812 considers it an unexpected kwarg. Skip typing it. self.root = Root(fuse=self) # type: ignore self.conf = conf self.logger = logging.getLogger(LOGGER_NAME) self.time_ns: int = time.time_ns() # start time, used as timestamp self.gid = os.getgid() self.uid = os.getuid() self.web_api = WebAPIClient( conf["web-api"]["url"], conf["web-api"]["auth-token"] ) self.cache = cache def shutdown(self) -> None: pass def _alloc_inode(self, entry: FuseEntry) -> int: - """ Return a unique inode integer for a given entry """ + """Return a unique inode integer for a given entry""" inode = self._next_inode self._next_inode += 1 self._inode2entry[inode] = entry return inode def _remove_inode(self, inode: int) -> None: try: del self._inode2entry[inode] except KeyError: pass try: pyfuse3.invalidate_inode(inode) except FileNotFoundError: pass def inode2entry(self, inode: int) -> FuseEntry: - """ Return the entry matching a given inode """ + """Return the entry matching a given inode""" try: return self._inode2entry[inode] except KeyError: raise pyfuse3.FUSEError(errno.ENOENT) async def get_metadata(self, swhid: CoreSWHID) -> Any: - """ Retrieve metadata for a given SWHID using Software Heritage API """ + """Retrieve metadata for a given SWHID using Software Heritage API""" cache = await self.cache.metadata.get(swhid) if cache: return cache try: typify = False # Get the raw JSON from the API # TODO: async web API loop = asyncio.get_event_loop() metadata = await loop.run_in_executor(None, self.web_api.get, swhid, typify) await self.cache.metadata.set(swhid, metadata) # Retrieve it from cache so it is correctly typed return await self.cache.metadata.get(swhid) except requests.HTTPError as err: self.logger.error("Cannot fetch metadata for object %s: %s", swhid, err) raise async def get_blob(self, swhid: CoreSWHID) -> bytes: - """ Retrieve the blob bytes for a given content SWHID using Software - Heritage API """ + """Retrieve the blob bytes for a given content SWHID using Software + Heritage API""" if swhid.object_type != ObjectType.CONTENT: raise pyfuse3.FUSEError(errno.EINVAL) # Make sure the metadata cache is also populated with the given SWHID await self.get_metadata(swhid) cache = await self.cache.blob.get(swhid) if cache: self.logger.debug("Found blob %s in cache", swhid) return cache try: self.logger.debug("Retrieving blob %s via web API...", swhid) loop = asyncio.get_event_loop() resp = await loop.run_in_executor(None, self.web_api.content_raw, swhid) blob = b"".join(list(resp)) await self.cache.blob.set(swhid, blob) return blob except requests.HTTPError as err: self.logger.error("Cannot fetch blob for object %s: %s", swhid, err) raise async def get_history(self, swhid: CoreSWHID) -> List[CoreSWHID]: - """ Retrieve a revision's history using Software Heritage Graph API """ + """Retrieve a revision's history using Software Heritage Graph API""" if swhid.object_type != ObjectType.REVISION: raise pyfuse3.FUSEError(errno.EINVAL) cache = await self.cache.history.get(swhid) if cache: self.logger.debug( "Found history of %s in cache (%d ancestors)", swhid, len(cache) ) return cache try: # Use the swh-graph API to retrieve the full history very fast self.logger.debug("Retrieving history of %s via graph API...", swhid) call = f"graph/visit/edges/{swhid}?edges=rev:rev" loop = asyncio.get_event_loop() history = await loop.run_in_executor(None, self.web_api._call, call) await self.cache.history.set(history.text) # Retrieve it from cache so it is correctly typed res = await self.cache.history.get(swhid) return res except requests.HTTPError as err: self.logger.error("Cannot fetch history for object %s: %s", swhid, err) # Ignore exception since swh-graph does not necessarily contain the # most recent artifacts from the archive. Computing the full history # from the Web API is too computationally intensive so simply return # an empty list. return [] async def get_visits(self, url_encoded: str) -> List[Dict[str, Any]]: - """ Retrieve origin visits given an encoded-URL using Software Heritage API """ + """Retrieve origin visits given an encoded-URL using Software Heritage API""" cache = await self.cache.metadata.get_visits(url_encoded) if cache: self.logger.debug( - "Found %d visits for origin '%s' in cache", len(cache), url_encoded, + "Found %d visits for origin '%s' in cache", + len(cache), + url_encoded, ) return cache try: self.logger.debug( "Retrieving visits for origin '%s' via web API...", url_encoded ) typify = False # Get the raw JSON from the API loop = asyncio.get_event_loop() # Web API only takes non-encoded URL url = urllib.parse.unquote_plus(url_encoded) origin_exists = await loop.run_in_executor( None, self.web_api.origin_exists, url ) if not origin_exists: raise ValueError("origin does not exist") visits_it = await loop.run_in_executor( None, functools.partial(self.web_api.visits, url, typify=typify) ) visits = list(visits_it) await self.cache.metadata.set_visits(url_encoded, visits) # Retrieve it from cache so it is correctly typed res = await self.cache.metadata.get_visits(url_encoded) return res except (ValueError, requests.HTTPError) as err: self.logger.error( "Cannot fetch visits for origin '%s': %s", url_encoded, err ) raise async def get_attrs(self, entry: FuseEntry) -> pyfuse3.EntryAttributes: - """ Return entry attributes """ + """Return entry attributes""" attrs = pyfuse3.EntryAttributes() attrs.st_size = 0 attrs.st_atime_ns = self.time_ns attrs.st_ctime_ns = self.time_ns attrs.st_mtime_ns = self.time_ns attrs.st_gid = self.gid attrs.st_uid = self.uid attrs.st_ino = entry.inode attrs.st_mode = entry.mode attrs.st_size = await entry.size() return attrs async def getattr( self, inode: int, _ctx: pyfuse3.RequestContext ) -> pyfuse3.EntryAttributes: - """ Get attributes for a given inode """ + """Get attributes for a given inode""" entry = self.inode2entry(inode) return await self.get_attrs(entry) async def opendir(self, inode: int, _ctx: pyfuse3.RequestContext) -> int: - """ Open a directory referred by a given inode """ + """Open a directory referred by a given inode""" # Re-use inode as directory handle self.logger.debug("opendir(inode=%d)", inode) return inode async def readdir(self, fh: int, offset: int, token: pyfuse3.ReaddirToken) -> None: - """ Read entries in an open directory """ + """Read entries in an open directory""" # opendir() uses inode as directory handle inode = fh direntry = self.inode2entry(inode) self.logger.debug( "readdir(dirname=%s, fh=%d, offset=%d)", direntry.name, fh, offset ) assert isinstance(direntry, FuseDirEntry) next_id = offset + 1 try: async for entry in direntry.get_entries(offset): name = os.fsencode(entry.name) attrs = await self.get_attrs(entry) if not pyfuse3.readdir_reply(token, name, attrs, next_id): break next_id += 1 self._inode2entry[attrs.st_ino] = entry except Exception as err: self.logger.exception("Cannot readdir: %s", err) raise pyfuse3.FUSEError(errno.ENOENT) async def open( self, inode: int, _flags: int, _ctx: pyfuse3.RequestContext ) -> pyfuse3.FileInfo: - """ Open an inode and return a unique file handle """ + """Open an inode and return a unique file handle""" # Re-use inode as file handle self.logger.debug("open(inode=%d)", inode) entry = self.inode2entry(inode) return pyfuse3.FileInfo(fh=inode, **entry.file_info_attrs) async def read(self, fh: int, offset: int, length: int) -> bytes: - """ Read `length` bytes from file handle `fh` at position `offset` """ + """Read `length` bytes from file handle `fh` at position `offset`""" # open() uses inode as file handle inode = fh entry = self.inode2entry(inode) self.logger.debug( "read(name=%s, fh=%d, offset=%d, length=%d)", entry.name, fh, offset, length ) assert isinstance(entry, FuseFileEntry) try: data = await entry.get_content() return data[offset : offset + length] except Exception as err: self.logger.exception("Cannot read: %s", err) raise pyfuse3.FUSEError(errno.ENOENT) async def lookup( self, parent_inode: int, name: str, _ctx: pyfuse3.RequestContext ) -> pyfuse3.EntryAttributes: - """ Look up a directory entry by name and get its attributes """ + """Look up a directory entry by name and get its attributes""" name = os.fsdecode(name) parent_entry = self.inode2entry(parent_inode) self.logger.debug( "lookup(parent_name=%s, parent_inode=%d, name=%s)", parent_entry.name, parent_inode, name, ) assert isinstance(parent_entry, FuseDirEntry) try: if parent_entry.validate_entry(name): lookup_entry = await parent_entry.lookup(name) if lookup_entry: return await self.get_attrs(lookup_entry) except Exception as err: self.logger.exception("Cannot lookup: %s", err) raise pyfuse3.FUSEError(errno.ENOENT) async def readlink(self, inode: int, _ctx: pyfuse3.RequestContext) -> bytes: entry = self.inode2entry(inode) self.logger.debug("readlink(name=%s, inode=%d)", entry.name, inode) assert isinstance(entry, FuseSymlinkEntry) return os.fsencode(entry.get_target()) async def unlink( self, parent_inode: int, name: str, _ctx: pyfuse3.RequestContext ) -> None: - """ Remove a file """ + """Remove a file""" name = os.fsdecode(name) parent_entry = self.inode2entry(parent_inode) self.logger.debug( "unlink(parent_name=%s, parent_inode=%d, name=%s)", parent_entry.name, parent_inode, name, ) try: await parent_entry.unlink(name) except Exception as err: self.logger.exception("Cannot unlink: %s", err) raise pyfuse3.FUSEError(errno.ENOENT) async def main(swhids: List[CoreSWHID], root_path: Path, conf: Dict[str, Any]) -> None: - """ swh-fuse CLI entry-point """ + """swh-fuse CLI entry-point""" # Use pyfuse3 asyncio layer to match the rest of Software Heritage codebase pyfuse3_asyncio.enable() async with FuseCache(conf["cache"]) as cache: fs = Fuse(root_path, cache, conf) # Initially populate the cache for swhid in swhids: try: await fs.get_metadata(swhid) except Exception as err: fs.logger.exception("Cannot prefetch object %s: %s", swhid, err) fuse_options = set(pyfuse3.default_options) fuse_options.add("fsname=swhfs") try: pyfuse3.init(fs, root_path, fuse_options) await pyfuse3.main() except Exception as err: fs.logger.error("Error running FUSE: %s", err) finally: fs.shutdown() pyfuse3.close(unmount=True) diff --git a/swh/fuse/tests/conftest.py b/swh/fuse/tests/conftest.py index 7e0c8f4..10cb70e 100644 --- a/swh/fuse/tests/conftest.py +++ b/swh/fuse/tests/conftest.py @@ -1,81 +1,84 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json from multiprocessing import Process import os from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory import time from click.testing import CliRunner import pytest import yaml import swh.fuse.cli as cli from swh.fuse.tests.data.api_data import API_URL, MOCK_ARCHIVE @pytest.fixture def web_api_mock(requests_mock): for api_call, data in MOCK_ARCHIVE.items(): # Convert Python dict JSON into a string (only for non-raw API call) if not api_call.endswith("raw/") and not api_call.startswith("graph/"): data = json.dumps(data) http_method = requests_mock.get if api_call.startswith("origin/") and api_call.endswith("get/"): http_method = requests_mock.head http_method(f"{API_URL}/{api_call}", text=data) return requests_mock @pytest.fixture def fuse_mntdir(web_api_mock): tmpdir = TemporaryDirectory(suffix=".swh-fuse-test") config = { - "cache": {"metadata": {"in-memory": True}, "blob": {"in-memory": True},}, + "cache": { + "metadata": {"in-memory": True}, + "blob": {"in-memory": True}, + }, "web-api": {"url": API_URL, "auth-token": None}, "json-indent": None, } # Run FUSE in foreground mode but in a separate process, so it does not # block execution and remains easy to kill during teardown def fuse_process(mntdir: Path): with NamedTemporaryFile(suffix=".swh-fuse-test.yml") as tmpfile: config_path = Path(tmpfile.name) config_path.write_text(yaml.dump(config)) CliRunner().invoke( cli.fuse, args=[ "--config-file", str(config_path), "mount", str(mntdir), "--foreground", ], ) fuse = Process(target=fuse_process, args=[Path(tmpdir.name)]) fuse.start() # Wait max 3 seconds for the FUSE to correctly mount for i in range(30): try: root = os.listdir(tmpdir.name) if root: break except FileNotFoundError: pass time.sleep(0.1) else: raise FileNotFoundError(f"Could not mount FUSE in {tmpdir.name}") yield Path(tmpdir.name) CliRunner().invoke(cli.umount, [tmpdir.name]) fuse.join() diff --git a/swh/fuse/tests/data/config.py b/swh/fuse/tests/data/config.py index 9f8af8d..c10e814 100644 --- a/swh/fuse/tests/data/config.py +++ b/swh/fuse/tests/data/config.py @@ -1,100 +1,112 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """ Use the Rust compiler (v1.42.0) as a testing repository """ def remove_swhid_prefix(swhid_str: str) -> str: prefix = "swh:1:XXX:" return swhid_str[len(prefix) :] # Content REGULAR_FILE = "swh:1:cnt:61d3c9e1157203f0c4ed5165608d92294eaca808" # Directory ROOT_DIR = "swh:1:dir:c6dcbe9711ea6d5a31429a833a3d0c59cbbb2578" # Directory (symlinks entries) CNT_SYMLINK = "swh:1:cnt:76219eb72e8524f15c21ec93b9b2592da49b5460" # from rust-clippy DIR_WITH_CNT_SYMLINK = "swh:1:dir:8f04a97403e13995c8790aef9158c8981b026223" DIR_SYMLINK = "swh:1:cnt:e8310385c56dc4bbe379f43400f3181f6a59f260" # from Limnoria DIR_WITH_DIR_SYMLINK = "swh:1:dir:0653a2af52ebb771ef8ce0388c942d77f9d7aee8" REV_SYMLINKS = [ # from rust compiler "swh:1:rev:87dd6843678575f8dda962f239d14ef4be14b352", "swh:1:rev:1a2390247ad6d08160e0dd74f40a01a9578659c2", "swh:1:rev:4d78994915af1bde9a95c04a8c27d8dca066232a", "swh:1:rev:3e6e1001dc6e095dbd5c88005e80969f60e384e1", "swh:1:rev:11e893fc1357bc688418ddf1087c2b7aa25d154d", "swh:1:rev:1c2bd024d13f8011307e13386cf1fea2180352b5", "swh:1:rev:92baf7293dd2d418d2ac4b141b0faa822075d9f7", ] DIR_WITH_REV_SYMLINK = "swh:1:dir:80ae84abc6122c47aae597fde99645f8663d1aba" # Revision ROOT_REV = "swh:1:rev:b8cedc00407a4c56a3bda1ed605c6fc166655447" REV_SMALL_HISTORY = "swh:1:rev:37426e42cf78a43779312d780eecb21a64006d99" # Release ROOT_REL = "swh:1:rel:874f7cbe352033cac5a8bc889847da2fe1d13e9f" # Snapshot # WARNING: Do not use a snapshot artifact which is paginated because it will be # a pain to synchronize formats properly between the mock Web API/Web API Client # (since they differ slightly for snapshots). The Web API Client is already # responsible for merging everything together so the real API will have no # problem, only the mock offline one. ROOT_SNP = "swh:1:snp:02db117fef22434f1658b833a756775ca6effed0" ROOT_SNP_MASTER_BRANCH = "swh:1:rev:430a9fd4c797c50cea26157141b2408073b2ed91" FAKE_SNP_SPECIAL_CASES_SWHID = "swh:1:snp:0000000000000000000000000000000000000000" FAKE_SNP_SPECIAL_CASES = { # All possible target types - "mycnt": {"target_type": "content", "target": remove_swhid_prefix(REGULAR_FILE),}, - "mydir": {"target_type": "directory", "target": remove_swhid_prefix(ROOT_DIR),}, - "myrev": {"target_type": "revision", "target": remove_swhid_prefix(ROOT_REV),}, - "myrel": {"target_type": "release", "target": remove_swhid_prefix(ROOT_REL),}, + "mycnt": { + "target_type": "content", + "target": remove_swhid_prefix(REGULAR_FILE), + }, + "mydir": { + "target_type": "directory", + "target": remove_swhid_prefix(ROOT_DIR), + }, + "myrev": { + "target_type": "revision", + "target": remove_swhid_prefix(ROOT_REV), + }, + "myrel": { + "target_type": "release", + "target": remove_swhid_prefix(ROOT_REL), + }, "refs/heads/master": { "target_type": "revision", "target": remove_swhid_prefix(ROOT_SNP_MASTER_BRANCH), }, # Alias with different target paths "alias-rootdir": { "target_type": "alias", "target": "refs/heads/master", "expected_symlink": "refs/heads/master", }, "refs/heads/alias-subdir": { "target_type": "alias", "target": "refs/heads/master", "expected_symlink": "master", }, "refs/tags/alias-different-subdir": { "target_type": "alias", "target": "refs/heads/master", "expected_symlink": "../heads/master", }, } # Origin ORIGIN_URL = "https://github.com/rust-lang/rust" ORIGIN_URL_ENCODED = "https%3A%2F%2Fgithub.com%2Frust-lang%2Frust" # Special corner cases (not from Rust compiler) REL_TARGET_CNT = "swh:1:rel:da5f9898d6248ab26277116f54aca855338401d2" TARGET_CNT = "swh:1:cnt:be5effea679c057aec2bb020f0241b1d1d660840" REL_TARGET_DIR = "swh:1:rel:3a7b2dfffed2945d2933ba4ebc063adba35ddb2e" TARGET_DIR = "swh:1:dir:b24d39c928b9c3f440f8e2ec06c78f43d28d87d6" ALL_ENTRIES = [ REGULAR_FILE, ROOT_DIR, CNT_SYMLINK, DIR_SYMLINK, *REV_SYMLINKS, DIR_WITH_CNT_SYMLINK, DIR_WITH_DIR_SYMLINK, DIR_WITH_REV_SYMLINK, ROOT_REV, REV_SMALL_HISTORY, ROOT_REL, REL_TARGET_CNT, REL_TARGET_DIR, ROOT_SNP, ROOT_SNP_MASTER_BRANCH, ] diff --git a/swh/fuse/tests/data/gen-api-data.py b/swh/fuse/tests/data/gen-api-data.py index 211b4ad..0d46896 100755 --- a/swh/fuse/tests/data/gen-api-data.py +++ b/swh/fuse/tests/data/gen-api-data.py @@ -1,169 +1,169 @@ #!/usr/bin/env python3 # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json from typing import Any, Dict import requests from swh.fuse.tests.api_url import ( GRAPH_API_REQUEST, swhid_to_graph_url, swhid_to_web_url, ) from swh.fuse.tests.data.config import ( ALL_ENTRIES, FAKE_SNP_SPECIAL_CASES, FAKE_SNP_SPECIAL_CASES_SWHID, ORIGIN_URL, REV_SMALL_HISTORY, ) from swh.model.hashutil import hash_to_bytes from swh.model.swhids import CoreSWHID, ObjectType API_URL_real = "https://archive.softwareheritage.org/api/1" API_URL_test = "https://invalid-test-only.archive.softwareheritage.org/api/1" # Use your own API token to lift rate limiting. Note: this is not necessary to generate # the API data only once but can be useful when re-generating it multiple times. API_TOKEN = "" MOCK_ARCHIVE: Dict[str, Any] = {} # Temporary map (swhid -> metadata) to ease data generation METADATA: Dict[CoreSWHID, Any] = {} def get_from_api(endpoint: str) -> str: headers = {"Authorization": f"Bearer {API_TOKEN}"} if API_TOKEN else {} return requests.get(f"{API_URL_real}/{endpoint}", headers=headers).text def generate_archive_web_api( swhid: CoreSWHID, raw: bool = False, recursive: bool = False ) -> None: # Already in mock archive if swhid in METADATA and not raw: return url = swhid_to_web_url(swhid, raw) data = get_from_api(url) if not raw: data = json.loads(data) MOCK_ARCHIVE[url] = data METADATA[swhid] = data # Retrieve additional needed data for different artifacts (eg: content's # blob data, release target, etc.) if recursive: if swhid.object_type == ObjectType.CONTENT: generate_archive_web_api(swhid, raw=True) elif swhid.object_type == ObjectType.RELEASE: target_type = METADATA[swhid]["target_type"] target_id = METADATA[swhid]["target"] target = CoreSWHID( object_type=ObjectType[target_type.upper()], object_id=hash_to_bytes(target_id), ) generate_archive_web_api(target, recursive=True) def generate_archive_graph_api(swhid: CoreSWHID) -> None: if swhid.object_type == ObjectType.REVISION: # Empty history for all revisions (except REV_SMALL_HISTORY used in tests) url = swhid_to_graph_url(swhid, GRAPH_API_REQUEST.HISTORY) MOCK_ARCHIVE[url] = "" if str(swhid) == REV_SMALL_HISTORY: # TODO: temporary fix, retrieve from the graph API once it is public history = """ swh:1:rev:37426e42cf78a43779312d780eecb21a64006d99 swh:1:rev:0cf3c2ad935be699281ed20fb3d2f29554e6229b swh:1:rev:0cf3c2ad935be699281ed20fb3d2f29554e6229b swh:1:rev:37180552769b316e7239d047008f187127e630e6 swh:1:rev:37180552769b316e7239d047008f187127e630e6 swh:1:rev:dd2716f56c7cf55f2904fbbf4dfabaab1afbcd88 swh:1:rev:dd2716f56c7cf55f2904fbbf4dfabaab1afbcd88 swh:1:rev:968ec145278d3d6562e4b5ec4006af97dc0da563 swh:1:rev:968ec145278d3d6562e4b5ec4006af97dc0da563 swh:1:rev:34dc7053ebfd440648f49dc83d2538ab5e7ceda5 swh:1:rev:34dc7053ebfd440648f49dc83d2538ab5e7ceda5 swh:1:rev:c56a729ff1d9467d612bf522614519ac7b97f798 swh:1:rev:c56a729ff1d9467d612bf522614519ac7b97f798 swh:1:rev:eb7807c4fe7a2c2ad3c074705fb70de5eae5abe3 swh:1:rev:eb7807c4fe7a2c2ad3c074705fb70de5eae5abe3 swh:1:rev:d601b357ecbb1fa33dc10c177bb557868be07deb swh:1:rev:d601b357ecbb1fa33dc10c177bb557868be07deb swh:1:rev:2a2474d497ae19472b4366f6d8d62e9a516787c3 swh:1:rev:2a2474d497ae19472b4366f6d8d62e9a516787c3 swh:1:rev:eed5c0aa249f3e17bbabeeba1650ab699e3dff5a swh:1:rev:eed5c0aa249f3e17bbabeeba1650ab699e3dff5a swh:1:rev:67d1f0a9aafaa7dcd63b86032127ab660e630c46 swh:1:rev:67d1f0a9aafaa7dcd63b86032127ab660e630c46 swh:1:rev:2e3fa5bd68677762c619d83dfdf1a83ba7f0e749 swh:1:rev:2e3fa5bd68677762c619d83dfdf1a83ba7f0e749 swh:1:rev:a9c639ec8af3a4099108788c1db0176c7fea5799 swh:1:rev:a9c639ec8af3a4099108788c1db0176c7fea5799 swh:1:rev:c06ea8f9445dbb5eda99ac8730d7fb2177df6816 swh:1:rev:c06ea8f9445dbb5eda99ac8730d7fb2177df6816 swh:1:rev:422b8a6be4aab120685f450db0a520fcb5a8aa6b swh:1:rev:422b8a6be4aab120685f450db0a520fcb5a8aa6b swh:1:rev:e8759934711c70c50b5d616be22104e649abff58 swh:1:rev:e8759934711c70c50b5d616be22104e649abff58 swh:1:rev:63b5e18207c7f8a261c1f7f50fd8c7bbf9a21bda swh:1:rev:63b5e18207c7f8a261c1f7f50fd8c7bbf9a21bda swh:1:rev:5dfe101e5197d6854aa1d8c9907ac7851468d468 swh:1:rev:5dfe101e5197d6854aa1d8c9907ac7851468d468 swh:1:rev:287d69ddacba3f5945b70695fb721b2f055d3ee6 swh:1:rev:287d69ddacba3f5945b70695fb721b2f055d3ee6 swh:1:rev:85a701c8f668fc03e6340682956e7ca7d9cf54bc swh:1:rev:85a701c8f668fc03e6340682956e7ca7d9cf54bc swh:1:rev:241305caab232b04666704dc6853c41312cd283a swh:1:rev:241305caab232b04666704dc6853c41312cd283a swh:1:rev:0d9565a4c144c07dab052161eb5fa3815dcd7f06 swh:1:rev:0d9565a4c144c07dab052161eb5fa3815dcd7f06 swh:1:rev:72c6c60d80cdfe63af5046a1a98549f0515734f2 swh:1:rev:72c6c60d80cdfe63af5046a1a98549f0515734f2 swh:1:rev:c483808e0ff9836bc1cda0ce95d77c8b7d3be91c swh:1:rev:c483808e0ff9836bc1cda0ce95d77c8b7d3be91c swh:1:rev:1c60be2f32f70f9181a261ae2c2b4efe353d0f85 swh:1:rev:1c60be2f32f70f9181a261ae2c2b4efe353d0f85 swh:1:rev:bcf29b882acdf477be412fdb401b0fc2a6c819aa swh:1:rev:bcf29b882acdf477be412fdb401b0fc2a6c819aa swh:1:rev:261d543920e1c66049c469773ca989aaf9ce480e swh:1:rev:261d543920e1c66049c469773ca989aaf9ce480e swh:1:rev:24d5ff75c3abfe7b327c48468ed9a39f0d8a0427 swh:1:rev:24d5ff75c3abfe7b327c48468ed9a39f0d8a0427 swh:1:rev:d3c0762ff85ff7d29668d1f5d2361df03978bbea swh:1:rev:d3c0762ff85ff7d29668d1f5d2361df03978bbea swh:1:rev:af44ec2856603b8a978a1f2582c285c7c0065403 swh:1:rev:af44ec2856603b8a978a1f2582c285c7c0065403 swh:1:rev:69a34503f4d51b639855501f1b6d6ce2da4e16c7 swh:1:rev:69a34503f4d51b639855501f1b6d6ce2da4e16c7 swh:1:rev:0364a801bb29211d4731f3f910c7629286b51c45 swh:1:rev:0364a801bb29211d4731f3f910c7629286b51c45 swh:1:rev:25eb1fd3c9d997e460dff3e03d87e398e616c726 swh:1:rev:25eb1fd3c9d997e460dff3e03d87e398e616c726 swh:1:rev:4a1f86ccd7e823f63d12208baef79b1e74479203 swh:1:rev:4a1f86ccd7e823f63d12208baef79b1e74479203 swh:1:rev:0016473117e4bc3c8959bf2fd49368844847d74c swh:1:rev:0016473117e4bc3c8959bf2fd49368844847d74c swh:1:rev:935442babcf4f8ae52c1a13bb9ce07270a302886 swh:1:rev:935442babcf4f8ae52c1a13bb9ce07270a302886 swh:1:rev:1f3cff91f6762b0f47f41025b5e2c5ac942479ba swh:1:rev:1f3cff91f6762b0f47f41025b5e2c5ac942479ba swh:1:rev:bc286c7f2ceb5c3d2e06ec72f78d28842f94ef65 swh:1:rev:bc286c7f2ceb5c3d2e06ec72f78d28842f94ef65 swh:1:rev:f038f4d533f897a29f9422510d1b3f0caac97388 swh:1:rev:f038f4d533f897a29f9422510d1b3f0caac97388 swh:1:rev:d6b7c96c3eb29b9244ece0c046d3f372ff432d04 swh:1:rev:d6b7c96c3eb29b9244ece0c046d3f372ff432d04 swh:1:rev:c01efc669f09508b55eced32d3c88702578a7c3e -""" # NoQA: E501 +""" # NoQA: B950 MOCK_ARCHIVE[url] = history hist_nodes = set( map( CoreSWHID.from_string, [edge.split(" ")[1] for edge in history.strip().split("\n")], ) ) for swhid in hist_nodes: generate_archive_web_api(swhid, recursive=False) def generate_origin_archive_web_api(url: str): url_visits = f"origin/{url}/visits/" data = get_from_api(url_visits) data = json.loads(data) MOCK_ARCHIVE[url_visits] = data # Necessary since swh-fuse will check the origin URL using the get/ endpoint url_get = f"origin/{url}/get/" MOCK_ARCHIVE[url_get] = "" for entry in ALL_ENTRIES: swhid = CoreSWHID.from_string(entry) generate_archive_web_api(swhid, recursive=True) generate_archive_graph_api(swhid) # Custom fake snapshot to handle most special cases MOCK_ARCHIVE[swhid_to_web_url(FAKE_SNP_SPECIAL_CASES_SWHID)] = { "branches": FAKE_SNP_SPECIAL_CASES } # Origin artifacts are not identified by SWHID but using an URL generate_origin_archive_web_api(ORIGIN_URL) print("# GENERATED FILE, DO NOT EDIT.") print("# Run './gen-api-data.py > api_data.py' instead.") print("# flake8: noqa") print("from typing import Any, Dict") print("") print(f"API_URL = '{API_URL_test}'\n") print(f"MOCK_ARCHIVE: Dict[str, Any] = {MOCK_ARCHIVE}") diff --git a/swh/fuse/tests/test_cli.py b/swh/fuse/tests/test_cli.py index d8ee8e7..c7c1f1e 100644 --- a/swh/fuse/tests/test_cli.py +++ b/swh/fuse/tests/test_cli.py @@ -1,37 +1,42 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from click.testing import CliRunner import yaml import swh.fuse.cli as cli # mount/umount commands are already tested when setting up the fuse_mntdir fixture def test_clean_command(tmp_path): fake_metadata_db = tmp_path / "metadata.sqlite" fake_blob_db = tmp_path / "blob.sqlite" config_path = tmp_path / "config.yml" config = { "cache": { "metadata": {"path": str(fake_metadata_db)}, "blob": {"path": str(fake_blob_db)}, }, } fake_metadata_db.touch() fake_blob_db.touch() config_path.write_text(yaml.dump(config)) assert fake_metadata_db.exists() assert fake_blob_db.exists() CliRunner().invoke( - cli.fuse, args=["--config-file", str(config_path), "clean",], + cli.fuse, + args=[ + "--config-file", + str(config_path), + "clean", + ], ) assert not fake_metadata_db.exists() assert not fake_blob_db.exists()