diff --git a/swh/fuse/cache.py b/swh/fuse/cache.py index c547714..8cc0639 100644 --- a/swh/fuse/cache.py +++ b/swh/fuse/cache.py @@ -1,291 +1,291 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from abc import ABC from collections import OrderedDict from dataclasses import dataclass, field import json import logging from pathlib import Path import re import sys from typing import Any, AsyncGenerator, Dict, List, Optional import aiosqlite from psutil import virtual_memory from swh.fuse.fs.entry import FuseDirEntry, FuseEntry from swh.fuse.fs.mountpoint import ArchiveDir, MetaDir from swh.model.exceptions import ValidationError from swh.model.identifiers import SWHID, parse_swhid from swh.web.client.client import typify_json class FuseCache: """SwhFS retrieves both metadata and file contents from the Software Heritage archive via the network. In order to obtain reasonable performances several caches are used to minimize network transfer. Caches are stored on disk in SQLite databases located at `$XDG_CACHE_HOME/swh/fuse/`. All caches are persistent (i.e., they survive the restart of the SwhFS process) and global (i.e., they are shared by concurrent SwhFS processes). We assume that no cache *invalidation* is necessary, due to intrinsic properties of the Software Heritage archive, such as integrity verification and append-only archive changes. To clean the caches one can just remove the corresponding files from disk. """ def __init__(self, cache_conf: Dict[str, Any]): self.cache_conf = cache_conf async def __aenter__(self): self.metadata = MetadataCache(self.cache_conf["metadata"]) self.blob = BlobCache(self.cache_conf["blob"]) self.history = HistoryCache(self.cache_conf["history"]) self.direntry = DirEntryCache(self.cache_conf["direntry"]) await self.metadata.__aenter__() await self.blob.__aenter__() await self.history.__aenter__() return self async def __aexit__(self, type=None, val=None, tb=None) -> None: await self.metadata.__aexit__() await self.blob.__aexit__() await self.history.__aexit__() async def get_cached_swhids(self) -> AsyncGenerator[SWHID, None]: """ Return a list of all previously cached SWHID """ # Use the metadata db since it should always contain all accessed SWHIDs metadata_cursor = await self.metadata.conn.execute( "select swhid from metadata_cache" ) swhids = await metadata_cursor.fetchall() for raw_swhid in swhids: yield parse_swhid(raw_swhid[0]) class AbstractCache(ABC): """ Abstract cache implementation to share common behavior between cache types (such as: YAML config parsing, SQLite context manager) """ def __init__(self, conf: Dict[str, Any]): self.conf = conf async def __aenter__(self): # In-memory (thus temporary) caching is useful for testing purposes if self.conf.get("in-memory", False): path = ":memory:" else: path = Path(self.conf["path"]) path.parent.mkdir(parents=True, exist_ok=True) self.conn = await aiosqlite.connect(path) return self async def __aexit__(self, type=None, val=None, tb=None) -> None: await self.conn.close() class MetadataCache(AbstractCache): """ The metadata cache map each SWHID to the complete metadata of the referenced object. This is analogous to what is available in `meta/.json` file (and generally used as data source for returning the content of those files). """ async def __aenter__(self): await super().__aenter__() await self.conn.execute( "create table if not exists metadata_cache (swhid, metadata)" ) await self.conn.commit() return self async def get(self, swhid: SWHID, typify: bool = True) -> Any: cursor = await self.conn.execute( "select metadata from metadata_cache where swhid=?", (str(swhid),) ) cache = await cursor.fetchone() if cache: metadata = json.loads(cache[0]) return typify_json(metadata, swhid.object_type) if typify else metadata else: return None async def set(self, swhid: SWHID, metadata: Any) -> None: await self.conn.execute( "insert into metadata_cache values (?, ?)", (str(swhid), json.dumps(metadata)), ) await self.conn.commit() class BlobCache(AbstractCache): """ The blob cache map SWHIDs of type `cnt` to the bytes of their archived content. The blob cache entry for a given content object is populated, at the latest, the first time the object is `read()`-d. It might be populated earlier on due to prefetching, e.g., when a directory pointing to the given content is listed for the first time. """ async def __aenter__(self): await super().__aenter__() await self.conn.execute("create table if not exists blob_cache (swhid, blob)") await self.conn.commit() return self async def get(self, swhid: SWHID) -> Optional[bytes]: cursor = await self.conn.execute( "select blob from blob_cache where swhid=?", (str(swhid),) ) cache = await cursor.fetchone() if cache: blob = cache[0] return blob else: return None async def set(self, swhid: SWHID, blob: bytes) -> None: await self.conn.execute( "insert into blob_cache values (?, ?)", (str(swhid), blob) ) await self.conn.commit() class HistoryCache(AbstractCache): """ The history cache map SWHIDs of type `rev` to a list of `rev` SWHIDs corresponding to all its revision ancestors, sorted in reverse topological order. As the parents cache, the history cache is lazily populated and can be prefetched. To efficiently store the ancestor lists, the history cache represents ancestors as graph edges (a pair of two SWHID nodes), meaning the history cache is shared amongst all revisions parents. """ async def __aenter__(self): await super().__aenter__() await self.conn.execute( """ create table if not exists history_graph ( src text not null, dst text not null, unique(src, dst) ) """ ) await self.conn.execute( "create index if not exists index_history_graph on history_graph(src)" ) await self.conn.commit() return self async def get(self, swhid: SWHID) -> Optional[List[SWHID]]: cursor = await self.conn.execute( """ with recursive dfs(node) AS ( values(?) union select history_graph.dst from history_graph join dfs on history_graph.src = dfs.node ) -- Do not keep the root node since it is not an ancestor select * from dfs limit -1 offset 1 """, (str(swhid),), ) cache = await cursor.fetchall() if not cache: return None history = [] for parent in cache: parent = parent[0] try: history.append(parse_swhid(parent)) except ValidationError: - logging.warning(f"Cannot parse object from history cache: {parent}") + logging.warning("Cannot parse object from history cache: %s", parent) return history async def set(self, history: str) -> None: history = history.strip() edges = [edge.split(" ") for edge in history.split("\n")] await self.conn.executemany( "insert or ignore into history_graph values (?, ?)", edges ) await self.conn.commit() class DirEntryCache: """ The direntry cache map inode representing directories to the entries they contain. Each entry comes with its name as well as file attributes (i.e., all its needed to perform a detailed directory listing). Additional attributes of each directory entry should be looked up on a entry by entry basis, possibly hitting other caches. The direntry cache for a given dir is populated, at the latest, when the content of the directory is listed. More aggressive prefetching might happen. For instance, when first opening a dir a recursive listing of it can be retrieved from the remote backend and used to recursively populate the direntry cache for all (transitive) sub-directories. """ @dataclass class LRU(OrderedDict): max_ram: int used_ram: int = field(init=False, default=0) def sizeof(self, value: Any) -> int: # Rough size estimate in bytes for a list of entries return len(value) * 1000 def __getitem__(self, key: Any) -> Any: value = super().__getitem__(key) self.move_to_end(key) return value def __setitem__(self, key: Any, value: Any) -> None: if key in self: self.move_to_end(key) else: self.used_ram += self.sizeof(value) super().__setitem__(key, value) while self.used_ram > self.max_ram and self: oldest = next(iter(self)) self.used_ram -= self.sizeof(oldest) del self[oldest] def __init__(self, conf: Dict[str, Any]): m = re.match(r"(\d+)\s*(.+)\s*", conf["maxram"]) if not m: - logging.error(f"Cannot parse direntry maxram config: {conf['maxram']}") + logging.error("Cannot parse direntry maxram config: %s", conf["maxram"]) sys.exit(1) num = float(m.group(1)) unit = m.group(2).upper() if unit == "%": max_ram = int(num * virtual_memory().available / 100) else: units = {"B": 1, "KB": 10 ** 3, "MB": 10 ** 6, "GB": 10 ** 9} max_ram = int(float(num) * units[unit]) self.lru_cache = self.LRU(max_ram) def get(self, direntry: FuseDirEntry) -> Optional[List[FuseEntry]]: return self.lru_cache.get(direntry.inode, None) def set(self, direntry: FuseDirEntry, entries: List[FuseEntry]) -> None: if isinstance(direntry, ArchiveDir) or isinstance(direntry, MetaDir): # The `archive/` and `meta/` are populated on the fly so we should # never cache them pass else: self.lru_cache[direntry.inode] = entries diff --git a/swh/fuse/cli.py b/swh/fuse/cli.py index b8a4957..d26807a 100644 --- a/swh/fuse/cli.py +++ b/swh/fuse/cli.py @@ -1,201 +1,202 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # WARNING: do not import unnecessary things here to keep cli startup time under # control import os from pathlib import Path from typing import Any, Dict import click from swh.core.cli import CONTEXT_SETTINGS from swh.core.cli import swh as swh_cli_group from swh.model.cli import SWHIDParamType # All generic config code should reside in swh.core.config DEFAULT_CONFIG_PATH = os.environ.get( "SWH_CONFIG_FILE", os.path.join(click.get_app_dir("swh"), "global.yml") ) CACHE_HOME_DIR: Path = ( Path(os.environ["XDG_CACHE_HOME"]) if "XDG_CACHE_HOME" in os.environ else Path.home() / ".cache" ) DEFAULT_CONFIG: Dict[str, Any] = { "cache": { "metadata": {"path": CACHE_HOME_DIR / "swh/fuse/metadata.sqlite"}, "blob": {"path": CACHE_HOME_DIR / "swh/fuse/blob.sqlite"}, "history": {"path": CACHE_HOME_DIR / "swh/fuse/history.sqlite"}, "direntry": {"maxram": "10%"}, }, "web-api": { "url": "https://archive.softwareheritage.org/api/1", "auth-token": None, }, "sharding": {"depth": 1, "length": 2,}, } @swh_cli_group.group(name="fs", context_settings=CONTEXT_SETTINGS) @click.option( "-C", "--config-file", default=None, type=click.Path(exists=True, dir_okay=False, path_type=str), help=f"Configuration file (default: {DEFAULT_CONFIG_PATH})", ) @click.pass_context def fuse(ctx, config_file): """Software Heritage virtual file system""" import logging import pprint from swh.core import config if not config_file: config_file = DEFAULT_CONFIG_PATH try: - logging.info(f"Loading configuration from: {config_file}") + logging.info("Loading configuration from: %s", config_file) conf = config.read_raw_config(config.config_basepath(config_file)) if not conf: raise ValueError(f"Cannot parse configuration file: {config_file}") if config_file == DEFAULT_CONFIG_PATH: try: conf = conf["swh"]["fuse"] except KeyError: pass # recursive merge not done by config.read conf = config.merge_configs(DEFAULT_CONFIG, conf) except Exception as err: - logging.warning(f"Using default configuration (cannot load custom one: {err})") + logging.warning("Using default configuration (cannot load custom one: %s)", err) conf = DEFAULT_CONFIG - logging.info(f"Read configuration: \n{pprint.pformat(conf)}") + logging.info("Read configuration: \n%s", pprint.pformat(conf)) ctx.ensure_object(dict) ctx.obj["config"] = conf @fuse.command(name="mount") @click.argument( "path", required=True, metavar="PATH", type=click.Path(exists=True, dir_okay=True, file_okay=False), ) @click.argument("swhids", nargs=-1, metavar="[SWHID]...", type=SWHIDParamType()) @click.option( "-f/-d", "--foreground/--daemon", default=False, help="whether to run FUSE attached to the console (foreground) " "or daemonized in the background (default: daemon)", ) @click.pass_context def mount(ctx, swhids, path, foreground): """Mount the Software Heritage virtual file system at PATH. If specified, objects referenced by the given SWHIDs will be prefetched and used to populate the virtual file system (VFS). Otherwise the VFS will be populated on-demand, when accessing its content. \b Example: \b $ mkdir swhfs $ swh fs mount swhfs/ $ grep printf swhfs/archive/swh:1:cnt:c839dea9e8e6f0528b468214348fee8669b305b2 printf("Hello, World!"); $ """ import asyncio from contextlib import ExitStack import logging from daemon import DaemonContext from swh.fuse import fuse # TODO: set default logging settings when --log-config is not passed # DEFAULT_LOG_PATH = Path(".local/swh/fuse/mount.log") with ExitStack() as stack: if not foreground: # TODO: temporary fix until swh.core has the proper logging utilities # Disable logging config before daemonizing, and reset it once # daemonized to be sure to not close file handlers logging.shutdown() # Stay in the current working directory when spawning daemon cwd = os.getcwd() stack.enter_context(DaemonContext(working_directory=cwd)) logging.config.dictConfig( { "version": 1, "handlers": { "syslog": { "class": "logging.handlers.SysLogHandler", "address": "/dev/log", }, }, "root": {"level": ctx.obj["log_level"], "handlers": ["syslog"],}, } ) conf = ctx.obj["config"] asyncio.run(fuse.main(swhids, path, conf)) @fuse.command() @click.argument( "path", required=True, metavar="PATH", type=click.Path(exists=True, dir_okay=True, file_okay=False), ) @click.pass_context def umount(ctx, path): """Unmount a mounted virtual file system. Note: this is equivalent to ``fusermount -u PATH``, which can be used to unmount any FUSE-based virtual file system. See ``man fusermount3``. """ import logging import subprocess try: subprocess.run(["fusermount", "-u", path], check=True) except subprocess.CalledProcessError as err: logging.error( - f"cannot unmount virtual file system: " - f"\"{' '.join(err.cmd)}\" returned exit status {err.returncode}" + "cannot unmount virtual file system: '%s' returned exit status %d", + " ".join(err.cmd), + err.returncode, ) ctx.exit(1) @fuse.command() @click.pass_context def clean(ctx): """Clean on-disk cache(s). """ def rm_cache(conf, cache_name): try: conf["cache"][cache_name]["path"].unlink(missing_ok=True) except KeyError: pass conf = ctx.obj["config"] for cache_name in ["blob", "metadata", "history"]: rm_cache(conf, cache_name) diff --git a/swh/fuse/fuse.py b/swh/fuse/fuse.py index 5f2feba..8d84016 100644 --- a/swh/fuse/fuse.py +++ b/swh/fuse/fuse.py @@ -1,265 +1,265 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import asyncio import errno import logging import os from pathlib import Path import time from typing import Any, Dict, List import pyfuse3 import pyfuse3_asyncio import requests from swh.fuse.cache import FuseCache from swh.fuse.fs.entry import FuseDirEntry, FuseEntry, FuseFileEntry, FuseSymlinkEntry from swh.fuse.fs.mountpoint import Root from swh.model.identifiers import CONTENT, REVISION, SWHID from swh.web.client.client import WebAPIClient class Fuse(pyfuse3.Operations): """ Software Heritage Filesystem in Userspace (FUSE). Locally mount parts of the archive and navigate it as a virtual file system. """ def __init__( self, root_path: Path, cache: FuseCache, conf: Dict[str, Any], ): super(Fuse, self).__init__() self._next_inode: int = pyfuse3.ROOT_INODE self._inode2entry: Dict[int, FuseEntry] = {} self.root = Root(fuse=self) self.conf = conf self.time_ns: int = time.time_ns() # start time, used as timestamp self.gid = os.getgid() self.uid = os.getuid() self.web_api = WebAPIClient( conf["web-api"]["url"], conf["web-api"]["auth-token"] ) self.cache = cache def shutdown(self) -> None: pass def _alloc_inode(self, entry: FuseEntry) -> int: """ Return a unique inode integer for a given entry """ inode = self._next_inode self._next_inode += 1 self._inode2entry[inode] = entry # TODO add inode recycling with invocation to invalidate_inode when # the dicts get too big return inode def inode2entry(self, inode: int) -> FuseEntry: """ Return the entry matching a given inode """ try: return self._inode2entry[inode] except KeyError: raise pyfuse3.FUSEError(errno.ENOENT) async def get_metadata(self, swhid: SWHID) -> Any: """ Retrieve metadata for a given SWHID using Software Heritage API """ cache = await self.cache.metadata.get(swhid) if cache: return cache try: typify = False # Get the raw JSON from the API # TODO: async web API loop = asyncio.get_event_loop() metadata = await loop.run_in_executor(None, self.web_api.get, swhid, typify) await self.cache.metadata.set(swhid, metadata) # Retrieve it from cache so it is correctly typed return await self.cache.metadata.get(swhid) except requests.HTTPError as err: - logging.error(f"Cannot fetch metadata for object {swhid}: {err}") + logging.error("Cannot fetch metadata for object %s: %s", swhid, err) raise async def get_blob(self, swhid: SWHID) -> bytes: """ Retrieve the blob bytes for a given content SWHID using Software Heritage API """ if swhid.object_type != CONTENT: raise pyfuse3.FUSEError(errno.EINVAL) # Make sure the metadata cache is also populated with the given SWHID await self.get_metadata(swhid) cache = await self.cache.blob.get(swhid) if cache: return cache try: loop = asyncio.get_event_loop() resp = await loop.run_in_executor(None, self.web_api.content_raw, swhid) blob = b"".join(list(resp)) await self.cache.blob.set(swhid, blob) return blob except requests.HTTPError as err: - logging.error(f"Cannot fetch blob for object {swhid}: {err}") + logging.error("Cannot fetch blob for object %s: %s", swhid, err) raise async def get_history(self, swhid: SWHID) -> List[SWHID]: if swhid.object_type != REVISION: raise pyfuse3.FUSEError(errno.EINVAL) cache = await self.cache.history.get(swhid) if cache: return cache try: # Use the swh-graph API to retrieve the full history very fast call = f"graph/visit/edges/{swhid}?edges=rev:rev" loop = asyncio.get_event_loop() history = await loop.run_in_executor(None, self.web_api._call, call) await self.cache.history.set(history.text) # Retrieve it from cache so it is correctly typed return await self.cache.history.get(swhid) except requests.HTTPError as err: - logging.error(f"Cannot fetch history for object {swhid}: {err}") + logging.error("Cannot fetch history for object %s: %s", swhid, err) # Ignore exception since swh-graph does not necessarily contain the # most recent artifacts from the archive. Computing the full history # from the Web API is too computationally intensive so simply return # an empty list. return [] async def get_attrs(self, entry: FuseEntry) -> pyfuse3.EntryAttributes: """ Return entry attributes """ attrs = pyfuse3.EntryAttributes() attrs.st_size = 0 attrs.st_atime_ns = self.time_ns attrs.st_ctime_ns = self.time_ns attrs.st_mtime_ns = self.time_ns attrs.st_gid = self.gid attrs.st_uid = self.uid attrs.st_ino = entry.inode attrs.st_mode = entry.mode attrs.st_size = await entry.size() return attrs async def getattr( self, inode: int, _ctx: pyfuse3.RequestContext ) -> pyfuse3.EntryAttributes: """ Get attributes for a given inode """ entry = self.inode2entry(inode) return await self.get_attrs(entry) async def opendir(self, inode: int, _ctx: pyfuse3.RequestContext) -> int: """ Open a directory referred by a given inode """ # Re-use inode as directory handle return inode async def readdir(self, fh: int, offset: int, token: pyfuse3.ReaddirToken) -> None: """ Read entries in an open directory """ # opendir() uses inode as directory handle inode = fh direntry = self.inode2entry(inode) assert isinstance(direntry, FuseDirEntry) next_id = offset + 1 try: async for entry in direntry.get_entries(offset): name = os.fsencode(entry.name) attrs = await self.get_attrs(entry) if not pyfuse3.readdir_reply(token, name, attrs, next_id): break next_id += 1 self._inode2entry[attrs.st_ino] = entry except Exception as err: - logging.exception(f"Cannot readdir: {err}") + logging.exception("Cannot readdir: %s", err) raise pyfuse3.FUSEError(errno.ENOENT) async def open( self, inode: int, _flags: int, _ctx: pyfuse3.RequestContext ) -> pyfuse3.FileInfo: """ Open an inode and return a unique file handle """ # Re-use inode as file handle return pyfuse3.FileInfo(fh=inode, keep_cache=True) async def read(self, fh: int, offset: int, length: int) -> bytes: """ Read `length` bytes from file handle `fh` at position `offset` """ # open() uses inode as file handle inode = fh entry = self.inode2entry(inode) assert isinstance(entry, FuseFileEntry) try: data = await entry.get_content() return data[offset : offset + length] except Exception as err: - logging.exception(f"Cannot read: {err}") + logging.exception("Cannot read: %s", err) raise pyfuse3.FUSEError(errno.ENOENT) async def lookup( self, parent_inode: int, name: str, _ctx: pyfuse3.RequestContext ) -> pyfuse3.EntryAttributes: """ Look up a directory entry by name and get its attributes """ name = os.fsdecode(name) parent_entry = self.inode2entry(parent_inode) assert isinstance(parent_entry, FuseDirEntry) try: lookup_entry = await parent_entry.lookup(name) if lookup_entry: return await self.get_attrs(lookup_entry) else: raise ValueError(f"unknown name: {name}") except Exception as err: - logging.exception(f"Cannot lookup: {err}") + logging.exception("Cannot lookup: %s", err) raise pyfuse3.FUSEError(errno.ENOENT) async def readlink(self, inode: int, _ctx: pyfuse3.RequestContext) -> bytes: entry = self.inode2entry(inode) assert isinstance(entry, FuseSymlinkEntry) return os.fsencode(entry.get_target()) async def main(swhids: List[SWHID], root_path: Path, conf: Dict[str, Any]) -> None: """ swh-fuse CLI entry-point """ # Use pyfuse3 asyncio layer to match the rest of Software Heritage codebase pyfuse3_asyncio.enable() async with FuseCache(conf["cache"]) as cache: fs = Fuse(root_path, cache, conf) # Initially populate the cache for swhid in swhids: try: await fs.get_metadata(swhid) except Exception as err: - logging.exception(f"Cannot prefetch object {swhid}: {err}") + logging.exception("Cannot prefetch object %s: %s", swhid, err) fuse_options = set(pyfuse3.default_options) fuse_options.add("fsname=swhfs") if logging.root.level <= logging.DEBUG: fuse_options.add("debug") try: pyfuse3.init(fs, root_path, fuse_options) await pyfuse3.main() except Exception as err: - logging.error(f"Error running FUSE: {err}") + logging.error("Error running FUSE: %s", err) finally: fs.shutdown() pyfuse3.close(unmount=True)