diff --git a/swh/objstorage/api/client.py b/swh/objstorage/api/client.py index 5650ef7..6e28e5f 100644 --- a/swh/objstorage/api/client.py +++ b/swh/objstorage/api/client.py @@ -1,94 +1,89 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.core.api import RPCClient from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError, ObjStorageAPIError from swh.objstorage.objstorage import DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT class RemoteObjStorage: """Proxy to a remote object storage. This class allows to connect to an object storage server via http protocol. Attributes: url (string): The url of the server to connect. Must end with a '/' session: The session to send requests. """ def __init__(self, **kwargs): self._proxy = RPCClient( api_exception=ObjStorageAPIError, reraise_exceptions=[ObjNotFoundError, Error], **kwargs, ) def check_config(self, *, check_write): return self._proxy.post("check_config", {"check_write": check_write}) def __contains__(self, obj_id): return self._proxy.post("content/contains", {"obj_id": obj_id}) def add(self, content, obj_id=None, check_presence=True): return self._proxy.post( "content/add", {"content": content, "obj_id": obj_id, "check_presence": check_presence}, ) def add_batch(self, contents, check_presence=True): return self._proxy.post( "content/add/batch", {"contents": contents, "check_presence": check_presence,}, ) def restore(self, content, obj_id=None, *args, **kwargs): return self.add(content, obj_id, check_presence=False) def get(self, obj_id): return self._proxy.post("content/get", {"obj_id": obj_id}) def get_batch(self, obj_ids): return self._proxy.post("content/get/batch", {"obj_ids": obj_ids}) def check(self, obj_id): return self._proxy.post("content/check", {"obj_id": obj_id}) def delete(self, obj_id): # deletion permission are checked server-side return self._proxy.post("content/delete", {"obj_id": obj_id}) # Management methods def get_random(self, batch_size): return self._proxy.post("content/get/random", {"batch_size": batch_size}) # Streaming methods def add_stream(self, content_iter, obj_id, check_presence=True): - obj_id = hashutil.hash_to_hex(obj_id) - return self._proxy.post_stream( - "content/add_stream/{}".format(obj_id), - params={"check_presence": check_presence}, - data=content_iter, - ) + raise NotImplementedError def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): obj_id = hashutil.hash_to_hex(obj_id) return self._proxy.get_stream( "content/get_stream/{}".format(obj_id), chunk_size=chunk_size ) def __iter__(self): yield from self._proxy.get_stream("content") def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): params = {"limit": limit} if last_obj_id: params["last_obj_id"] = hashutil.hash_to_hex(last_obj_id) yield from self._proxy.get_stream("content", params=params) diff --git a/swh/objstorage/api/server.py b/swh/objstorage/api/server.py index a906c27..aa3f668 100644 --- a/swh/objstorage/api/server.py +++ b/swh/objstorage/api/server.py @@ -1,271 +1,235 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -import json import os import aiohttp.web from swh.core.api.asynchronous import RPCServerApp, decode_request from swh.core.api.asynchronous import encode_data_server as encode_data -from swh.core.api.serializers import SWHJSONDecoder, msgpack_loads from swh.core.config import read as config_read from swh.core.statsd import statsd from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.factory import get_objstorage from swh.objstorage.objstorage import DEFAULT_LIMIT def timed(f): async def w(*a, **kw): with statsd.timed( "swh_objstorage_request_duration_seconds", tags={"endpoint": f.__name__} ): return await f(*a, **kw) return w @timed async def index(request): return aiohttp.web.Response(body="SWH Objstorage API server") @timed async def check_config(request): req = await decode_request(request) return encode_data(request.app["objstorage"].check_config(**req)) @timed async def contains(request): req = await decode_request(request) return encode_data(request.app["objstorage"].__contains__(**req)) @timed async def add_bytes(request): req = await decode_request(request) statsd.increment( "swh_objstorage_in_bytes_total", len(req["content"]), tags={"endpoint": "add_bytes"}, ) return encode_data(request.app["objstorage"].add(**req)) @timed async def add_batch(request): req = await decode_request(request) return encode_data(request.app["objstorage"].add_batch(**req)) @timed async def get_bytes(request): req = await decode_request(request) ret = request.app["objstorage"].get(**req) statsd.increment( "swh_objstorage_out_bytes_total", len(ret), tags={"endpoint": "get_bytes"} ) return encode_data(ret) @timed async def get_batch(request): req = await decode_request(request) return encode_data(request.app["objstorage"].get_batch(**req)) @timed async def check(request): req = await decode_request(request) return encode_data(request.app["objstorage"].check(**req)) @timed async def delete(request): req = await decode_request(request) return encode_data(request.app["objstorage"].delete(**req)) # Management methods @timed async def get_random_contents(request): req = await decode_request(request) return encode_data(request.app["objstorage"].get_random(**req)) # Streaming methods -@timed -async def add_stream(request): - hex_id = request.match_info["hex_id"] - obj_id = hashutil.hash_to_bytes(hex_id) - check_pres = request.query.get("check_presence", "").lower() == "true" - objstorage = request.app["objstorage"] - - if check_pres and obj_id in objstorage: - return encode_data(obj_id) - - # XXX this really should go in a decode_stream_request coroutine in - # swh.core, but since py35 does not support async generators, it cannot - # easily be made for now - content_type = request.headers.get("Content-Type") - if content_type == "application/x-msgpack": - decode = msgpack_loads - elif content_type == "application/json": - decode = lambda x: json.loads(x, cls=SWHJSONDecoder) # noqa - else: - raise ValueError("Wrong content type `%s` for API request" % content_type) - - buffer = b"" - with objstorage.chunk_writer(obj_id) as write: - while not request.content.at_eof(): - data, eot = await request.content.readchunk() - buffer += data - if eot: - write(decode(buffer)) - buffer = b"" - - return encode_data(obj_id) - - @timed async def get_stream(request): hex_id = request.match_info["hex_id"] obj_id = hashutil.hash_to_bytes(hex_id) response = aiohttp.web.StreamResponse() await response.prepare(request) for chunk in request.app["objstorage"].get_stream(obj_id, 2 << 20): await response.write(chunk) await response.write_eof() return response @timed async def list_content(request): last_obj_id = request.query.get("last_obj_id") if last_obj_id: last_obj_id = bytes.fromhex(last_obj_id) limit = int(request.query.get("limit", DEFAULT_LIMIT)) response = aiohttp.web.StreamResponse() response.enable_chunked_encoding() await response.prepare(request) for obj_id in request.app["objstorage"].list_content(last_obj_id, limit=limit): await response.write(obj_id) await response.write_eof() return response def make_app(config): """Initialize the remote api application. """ client_max_size = config.get("client_max_size", 1024 * 1024 * 1024) app = RPCServerApp(client_max_size=client_max_size) app.client_exception_classes = (ObjNotFoundError, Error) # retro compatibility configuration settings app["config"] = config app["objstorage"] = get_objstorage(**config["objstorage"]) app.router.add_route("GET", "/", index) app.router.add_route("POST", "/check_config", check_config) app.router.add_route("POST", "/content/contains", contains) app.router.add_route("POST", "/content/add", add_bytes) app.router.add_route("POST", "/content/add/batch", add_batch) app.router.add_route("POST", "/content/get", get_bytes) app.router.add_route("POST", "/content/get/batch", get_batch) app.router.add_route("POST", "/content/get/random", get_random_contents) app.router.add_route("POST", "/content/check", check) app.router.add_route("POST", "/content/delete", delete) app.router.add_route("GET", "/content", list_content) - app.router.add_route("POST", "/content/add_stream/{hex_id}", add_stream) app.router.add_route("GET", "/content/get_stream/{hex_id}", get_stream) return app def load_and_check_config(config_file): """Check the minimal configuration is set to run the api or raise an error explanation. Args: config_file (str): Path to the configuration file to load Raises: Error if the setup is not as expected Returns: configuration as a dict """ if not config_file: raise EnvironmentError("Configuration file must be defined") if not os.path.exists(config_file): raise FileNotFoundError("Configuration file %s does not exist" % (config_file,)) cfg = config_read(config_file) return validate_config(cfg) def validate_config(cfg): """Check the minimal configuration is set to run the api or raise an explanatory error. Args: cfg (dict): Loaded configuration. Raises: Error if the setup is not as expected Returns: configuration as a dict """ if "objstorage" not in cfg: raise KeyError("Invalid configuration; missing objstorage config entry") missing_keys = [] vcfg = cfg["objstorage"] if "cls" not in vcfg: raise KeyError("Invalid configuration; missing cls config entry") cls = vcfg["cls"] if cls == "pathslicing": # Backwards-compatibility: either get the deprecated `args` from the # objstorage config, or use the full config itself to check for keys args = vcfg.get("args", vcfg) for key in ("root", "slicing"): v = args.get(key) if v is None: missing_keys.append(key) if missing_keys: raise KeyError( "Invalid configuration; missing %s config entry" % (", ".join(missing_keys),) ) return cfg def make_app_from_configfile(): """Load configuration and then build application to run """ config_file = os.environ.get("SWH_CONFIG_FILENAME") config = load_and_check_config(config_file) return make_app(config=config) if __name__ == "__main__": print("Deprecated. Use swh-objstorage") diff --git a/swh/objstorage/backends/pathslicing.py b/swh/objstorage/backends/pathslicing.py index 12fb14d..f0b571d 100644 --- a/swh/objstorage/backends/pathslicing.py +++ b/swh/objstorage/backends/pathslicing.py @@ -1,429 +1,445 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections.abc import Iterator from contextlib import contextmanager from itertools import islice import os import random import tempfile from typing import List from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.objstorage import ( DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT, ID_HASH_ALGO, ID_HASH_LENGTH, ObjStorage, compressors, compute_hash, decompressors, ) BUFSIZ = 1048576 DIR_MODE = 0o755 FILE_MODE = 0o644 class PathSlicer: """Helper class to compute a path based on a hash. Used to compute a directory path based on the object hash according to a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. For instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will have the following computed path: - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 Args: root (str): path to the root directory of the storage on the disk. slicing (str): the slicing configuration. """ def __init__(self, root: str, slicing: str): self.root = root # Make a list of tuples where each tuple contains the beginning # and the end of each slicing. try: self.bounds = [ slice(*(int(x) if x else None for x in sbounds.split(":"))) for sbounds in slicing.split("/") if sbounds ] except TypeError: raise ValueError( "Invalid slicing declaration; " "it should be a of the form ':[/:]..." ) def check_config(self): """Check the slicing configuration is valid. Raises: ValueError: if the slicing configuration is invalid. """ if len(self): max_char = max( max(bound.start or 0, bound.stop or 0) for bound in self.bounds ) if ID_HASH_LENGTH < max_char: raise ValueError( "Algorithm %s has too short hash for slicing to char %d" % (ID_HASH_ALGO, max_char) ) def get_directory(self, hex_obj_id: str) -> str: """ Compute the storage directory of an object. See also: PathSlicer::get_path Args: hex_obj_id: object id as hexlified string. Returns: Absolute path (including root) to the directory that contains the given object id. """ return os.path.join(self.root, *self.get_slices(hex_obj_id)) def get_path(self, hex_obj_id: str) -> str: """ Compute the full path to an object into the current storage. See also: PathSlicer::get_directory Args: hex_obj_id(str): object id as hexlified string. Returns: Absolute path (including root) to the object corresponding to the given object id. """ return os.path.join(self.get_directory(hex_obj_id), hex_obj_id) def get_slices(self, hex_obj_id: str) -> List[str]: """Compute the path elements for the given hash. Args: hex_obj_id(str): object id as hexlified string. Returns: Relative path to the actual object corresponding to the given id as a list. """ assert len(hex_obj_id) == ID_HASH_LENGTH return [hex_obj_id[bound] for bound in self.bounds] def __len__(self) -> int: """Number of slices of the slicer""" return len(self.bounds) class PathSlicingObjStorage(ObjStorage): """Implementation of the ObjStorage API based on the hash of the content. On disk, an object storage is a directory tree containing files named after their object IDs. An object ID is a checksum of its content, depending on the value of the ID_HASH_ALGO constant (see swh.model.hashutil for its meaning). To avoid directories that contain too many files, the object storage has a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. So for instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will be stored in the given object storages : - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 The files in the storage are stored in gzipped compressed format. Args: root (str): path to the root directory of the storage on the disk. slicing (str): string that indicates the slicing to perform on the hash of the content to know the path where it should be stored (see the documentation of the PathSlicer class). """ def __init__(self, root, slicing, compression="gzip", **kwargs): super().__init__(**kwargs) self.root = root self.slicer = PathSlicer(root, slicing) self.use_fdatasync = hasattr(os, "fdatasync") self.compression = compression self.check_config(check_write=False) def check_config(self, *, check_write): """Check whether this object storage is properly configured""" self.slicer.check_config() if not os.path.isdir(self.root): raise ValueError( 'PathSlicingObjStorage root "%s" is not a directory' % self.root ) if check_write: if not os.access(self.root, os.W_OK): raise PermissionError( 'PathSlicingObjStorage root "%s" is not writable' % self.root ) if self.compression not in compressors: raise ValueError( 'Unknown compression algorithm "%s" for ' "PathSlicingObjStorage" % self.compression ) return True def __contains__(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return os.path.isfile(self.slicer.get_path(hex_obj_id)) def __iter__(self): """Iterate over the object identifiers currently available in the storage. Warning: with the current implementation of the object storage, this method will walk the filesystem to list objects, meaning that listing all objects will be very slow for large storages. You almost certainly don't want to use this method in production. Return: Iterator over object IDs """ def obj_iterator(): # XXX hackish: it does not verify that the depth of found files # matches the slicing depth of the storage for root, _dirs, files in os.walk(self.root): _dirs.sort() for f in sorted(files): yield bytes.fromhex(f) return obj_iterator() def __len__(self): """Compute the number of objects available in the storage. Warning: this currently uses `__iter__`, its warning about bad performances applies Return: number of objects contained in the storage """ return sum(1 for i in self) def add(self, content, obj_id=None, check_presence=True): if obj_id is None: obj_id = compute_hash(content) if check_presence and obj_id in self: # If the object is already present, return immediately. return obj_id hex_obj_id = hashutil.hash_to_hex(obj_id) if not isinstance(content, Iterator): content = [content] compressor = compressors[self.compression]() with self._write_obj_file(hex_obj_id) as f: for chunk in content: f.write(compressor.compress(chunk)) f.write(compressor.flush()) return obj_id def get(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) # Open the file and return its content as bytes hex_obj_id = hashutil.hash_to_hex(obj_id) d = decompressors[self.compression]() with open(self.slicer.get_path(hex_obj_id), "rb") as f: out = d.decompress(f.read()) if d.unused_data: raise Error("Corrupt object %s: trailing data found" % hex_obj_id,) return out def check(self, obj_id): try: data = self.get(obj_id) except OSError: hex_obj_id = hashutil.hash_to_hex(obj_id) raise Error("Corrupt object %s: not a proper compressed file" % hex_obj_id,) checksums = hashutil.MultiHash.from_data( data, hash_names=[ID_HASH_ALGO] ).digest() actual_obj_id = checksums[ID_HASH_ALGO] hex_obj_id = hashutil.hash_to_hex(obj_id) if hex_obj_id != hashutil.hash_to_hex(actual_obj_id): raise Error( "Corrupt object %s should have id %s" % (hashutil.hash_to_hex(obj_id), hashutil.hash_to_hex(actual_obj_id)) ) def delete(self, obj_id): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) try: os.remove(self.slicer.get_path(hex_obj_id)) except FileNotFoundError: raise ObjNotFoundError(obj_id) return True # Management methods def get_random(self, batch_size): def get_random_content(self, batch_size): """ Get a batch of content inside a single directory. Returns: a tuple (batch size, batch). """ dirs = [] for level in range(len(self.slicer)): path = os.path.join(self.root, *dirs) dir_list = next(os.walk(path))[1] if "tmp" in dir_list: dir_list.remove("tmp") dirs.append(random.choice(dir_list)) path = os.path.join(self.root, *dirs) content_list = next(os.walk(path))[2] length = min(batch_size, len(content_list)) return ( length, map(hashutil.hash_to_bytes, random.sample(content_list, length)), ) while batch_size: length, it = get_random_content(self, batch_size) batch_size = batch_size - length yield from it # Streaming methods @contextmanager def chunk_writer(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) compressor = compressors[self.compression]() with self._write_obj_file(hex_obj_id) as f: yield lambda c: f.write(compressor.compress(c)) f.write(compressor.flush()) def add_stream(self, content_iter, obj_id, check_presence=True): + """Add a new object to the object storage using streaming. + + This function is identical to add() except it takes a generator that + yields the chunked content instead of the whole content at once. + + Args: + content (bytes): chunked generator that yields the object's raw + content to add in storage. + obj_id (bytes): object identifier + check_presence (bool): indicate if the presence of the + content should be verified before adding the file. + + Returns: + the id (bytes) of the object into the storage. + + """ if check_presence and obj_id in self: return obj_id with self.chunk_writer(obj_id) as writer: for chunk in content_iter: writer(chunk) return obj_id def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) decompressor = decompressors[self.compression]() with open(self.slicer.get_path(hex_obj_id), "rb") as f: while True: raw = f.read(chunk_size) if not raw: break r = decompressor.decompress(raw) if not r: continue yield r def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): if last_obj_id: it = self.iter_from(last_obj_id) else: it = iter(self) return islice(it, limit) def iter_from(self, obj_id, n_leaf=False): hex_obj_id = hashutil.hash_to_hex(obj_id) slices = self.slicer.get_slices(hex_obj_id) rlen = len(self.root.split("/")) i = 0 for root, dirs, files in os.walk(self.root): if not dirs: i += 1 level = len(root.split("/")) - rlen dirs.sort() if dirs and root == os.path.join(self.root, *slices[:level]): cslice = slices[level] for d in dirs[:]: if d < cslice: dirs.remove(d) for f in sorted(files): if f > hex_obj_id: yield bytes.fromhex(f) if n_leaf: yield i @contextmanager def _write_obj_file(self, hex_obj_id): """ Context manager for writing object files to the object storage. During writing, data are written to a temporary file, which is atomically renamed to the right file name after closing. Usage sample: with objstorage._write_obj_file(hex_obj_id): f.write(obj_data) Yields: a file-like object open for writing bytes. """ # Get the final paths and create the directory if absent. dir = self.slicer.get_directory(hex_obj_id) if not os.path.isdir(dir): os.makedirs(dir, DIR_MODE, exist_ok=True) path = os.path.join(dir, hex_obj_id) # Create a temporary file. (tmp, tmp_path) = tempfile.mkstemp(suffix=".tmp", prefix="hex_obj_id.", dir=dir) # Open the file and yield it for writing. tmp_f = os.fdopen(tmp, "wb") yield tmp_f # Make sure the contents of the temporary file are written to disk tmp_f.flush() if self.use_fdatasync: os.fdatasync(tmp) else: os.fsync(tmp) # Then close the temporary file and move it to the right path. tmp_f.close() os.chmod(tmp_path, FILE_MODE) os.rename(tmp_path, path) diff --git a/swh/objstorage/backends/seaweedfs/http.py b/swh/objstorage/backends/seaweedfs/http.py index df027ec..5b869c1 100644 --- a/swh/objstorage/backends/seaweedfs/http.py +++ b/swh/objstorage/backends/seaweedfs/http.py @@ -1,101 +1,99 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging from typing import Iterator from urllib.parse import urljoin, urlparse import requests from swh.objstorage.objstorage import DEFAULT_LIMIT LOGGER = logging.getLogger(__name__) class HttpFiler: """Simple class that encapsulates access to a seaweedfs filer service. Objects are expected to be in a single directory. TODO: handle errors """ def __init__(self, url): if not url.endswith("/"): url = url + "/" self.url = url self.baseurl = urljoin(url, "/") self.basepath = urlparse(url).path self.session = requests.Session() self.session.headers["Accept"] = "application/json" self.batchsize = DEFAULT_LIMIT def build_url(self, path): assert path == self.basepath or path.startswith(self.basepath) return urljoin(self.baseurl, path) def get(self, remote_path): url = self.build_url(remote_path) LOGGER.debug("Get file %s", url) resp = self.session.get(url) resp.raise_for_status() return resp.content def exists(self, remote_path): url = self.build_url(remote_path) LOGGER.debug("Check file %s", url) return self.session.head(url).status_code == 200 def put(self, fp, remote_path): url = self.build_url(remote_path) LOGGER.debug("Put file %s", url) return self.session.post(url, files={"file": fp}) def delete(self, remote_path): url = self.build_url(remote_path) LOGGER.debug("Delete file %s", url) return self.session.delete(url) def iterfiles(self, last_file_name: str = "") -> Iterator[str]: """yield absolute file names Args: last_file_name: if given, starts from the file just after; must be basename. Yields: absolute file names """ for entry in self._iter_dir(last_file_name): fullpath = entry["FullPath"] if entry["Mode"] & 1 << 31: # it's a directory, recurse # see https://pkg.go.dev/io/fs#FileMode yield from self.iterfiles(fullpath) else: yield fullpath def _iter_dir(self, last_file_name: str = ""): params = {"limit": self.batchsize} if last_file_name: params["lastFileName"] = last_file_name LOGGER.debug("List directory %s", self.url) while True: rsp = self.session.get(self.url, params=params) if rsp.ok: dircontent = rsp.json() if dircontent["Entries"]: yield from dircontent["Entries"] if not dircontent["ShouldDisplayLoadMore"]: break params["lastFileName"] = dircontent["LastFileName"] else: - LOGGER.error( - 'Error listing "%s". [HTTP %d]', self.url, rsp.status_code - ) + LOGGER.error('Error listing "%s". [HTTP %d]', self.url, rsp.status_code) break