diff --git a/mypy.ini b/mypy.ini index 256b044..a33c451 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,30 +1,33 @@ [mypy] namespace_packages = True warn_unused_ignores = True # 3rd party libraries without stubs (yet) [mypy-azure.*] ignore_missing_imports = True [mypy-libcloud.*] ignore_missing_imports = True +[mypy-msgpack.*] +ignore_missing_imports = True + [mypy-pkg_resources.*] ignore_missing_imports = True [mypy-pytest.*] ignore_missing_imports = True [mypy-requests_toolbelt.*] ignore_missing_imports = True [mypy-psycopg2.*] ignore_missing_imports = True [mypy-swh.perfecthash.*] ignore_missing_imports = True [mypy-sh.*] ignore_missing_imports = True diff --git a/requirements.txt b/requirements.txt index 96c2cb9..47cff8c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,10 @@ # Add here external Python modules dependencies, one per line. Module names # should match https://pypi.python.org/pypi names. For the full spec or # dependency lines, see https://pip.readthedocs.org/en/1.1/requirements.html click +msgpack typing-extensions >= 3.7.4 # seaweedfs backend requests diff --git a/swh/objstorage/api/client.py b/swh/objstorage/api/client.py index 660f351..f8c4cc5 100644 --- a/swh/objstorage/api/client.py +++ b/swh/objstorage/api/client.py @@ -1,49 +1,55 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Iterator, Optional +import msgpack + from swh.core.api import RPCClient -from swh.core.utils import iter_chunks from swh.model import hashutil -from swh.objstorage.constants import DEFAULT_LIMIT, ID_DIGEST_LENGTH +from swh.objstorage.constants import DEFAULT_LIMIT from swh.objstorage.exc import Error, ObjNotFoundError, ObjStorageAPIError -from swh.objstorage.interface import ObjId, ObjStorageInterface +from swh.objstorage.interface import CompositeObjId, ObjId, ObjStorageInterface class RemoteObjStorage(RPCClient): """Proxy to a remote object storage. This class allows to connect to an object storage server via http protocol. Attributes: url (string): The url of the server to connect. Must end with a '/' session: The session to send requests. """ api_exception = ObjStorageAPIError reraise_exceptions = [ObjNotFoundError, Error] backend_class = ObjStorageInterface def restore(self: ObjStorageInterface, content: bytes, obj_id: ObjId) -> None: return self.add(content, obj_id, check_presence=False) - def __iter__(self): + def __iter__(self) -> Iterator[CompositeObjId]: yield from self.list_content() def list_content( self, last_obj_id: Optional[ObjId] = None, limit: int = DEFAULT_LIMIT, - ) -> Iterator[ObjId]: + ) -> Iterator[CompositeObjId]: params = {"limit": limit} if last_obj_id: params["last_obj_id"] = hashutil.hash_to_hex(last_obj_id) - yield from iter_chunks( - self._get_stream("content", params=params), chunk_size=ID_DIGEST_LENGTH + response = self.raw_verb( + "get", + "content", + headers={"accept": "application/x-msgpack"}, + params=params, + stream=True, ) + yield from msgpack.Unpacker(response.raw, raw=True) diff --git a/swh/objstorage/api/server.py b/swh/objstorage/api/server.py index 02bd645..d715c30 100644 --- a/swh/objstorage/api/server.py +++ b/swh/objstorage/api/server.py @@ -1,186 +1,190 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import contextlib import functools import logging import os +from typing import Iterator from flask import request +import msgpack from swh.core.api import RPCServerApp from swh.core.api import encode_data_server as encode_data from swh.core.api import error_handler from swh.core.config import read as config_read from swh.core.statsd import statsd from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.factory import get_objstorage as get_swhobjstorage from swh.objstorage.interface import ObjStorageInterface from swh.objstorage.objstorage import DEFAULT_LIMIT def timed(f): @functools.wraps(f) def w(*a, **kw): with statsd.timed( "swh_objstorage_request_duration_seconds", tags={"endpoint": f.__name__} ): return f(*a, **kw) return w @contextlib.contextmanager def timed_context(f_name): with statsd.timed( "swh_objstorage_request_duration_seconds", tags={"endpoint": f_name} ): yield def get_objstorage(): global objstorage if objstorage is None: objstorage = get_swhobjstorage(**app.config["objstorage"]) return objstorage class ObjStorageServerApp(RPCServerApp): client_exception_classes = (ObjNotFoundError, Error) method_decorators = [timed] def pre_add(self, kw): """Called before the 'add' method.""" statsd.increment( "swh_objstorage_in_bytes_total", len(kw["content"]), tags={"endpoint": "add_bytes"}, ) def post_get(self, ret, kw): """Called after the 'get' method.""" statsd.increment( "swh_objstorage_out_bytes_total", len(ret), tags={"endpoint": "get_bytes"} ) app = ObjStorageServerApp( __name__, backend_class=ObjStorageInterface, backend_factory=get_objstorage, ) objstorage = None @app.errorhandler(Error) def argument_error_handler(exception): return error_handler(exception, encode_data, status_code=400) @app.errorhandler(Exception) def my_error_handler(exception): return error_handler(exception, encode_data) @app.route("/") @timed def index(): return "SWH Objstorage API server" @app.route("/content") def list_content(): last_obj_id = request.args.get("last_obj_id") if last_obj_id: last_obj_id = bytes.fromhex(last_obj_id) limit = int(request.args.get("limit", DEFAULT_LIMIT)) - def generate(): + def generate() -> Iterator[bytes]: with timed_context("list_content"): - yield from get_objstorage().list_content(last_obj_id, limit=limit) + packer = msgpack.Packer(use_bin_type=True) + for obj in get_objstorage().list_content(last_obj_id, limit=limit): + yield packer.pack(obj) return app.response_class(generate()) api_cfg = None def load_and_check_config(config_file): """Check the minimal configuration is set to run the api or raise an error explanation. Args: config_file (str): Path to the configuration file to load Raises: Error if the setup is not as expected Returns: configuration as a dict """ if not config_file: raise EnvironmentError("Configuration file must be defined") if not os.path.exists(config_file): raise FileNotFoundError("Configuration file %s does not exist" % (config_file,)) cfg = config_read(config_file) return validate_config(cfg) def validate_config(cfg): """Check the minimal configuration is set to run the api or raise an explanatory error. Args: cfg (dict): Loaded configuration. Raises: Error if the setup is not as expected Returns: configuration as a dict """ if "objstorage" not in cfg: raise KeyError("Invalid configuration; missing objstorage config entry") missing_keys = [] vcfg = cfg["objstorage"] if "cls" not in vcfg: raise KeyError("Invalid configuration; missing cls config entry") cls = vcfg["cls"] if cls == "pathslicing": for key in ("root", "slicing"): v = vcfg.get(key) if v is None: missing_keys.append(key) if missing_keys: raise KeyError( "Invalid configuration; missing %s config entry" % (", ".join(missing_keys),) ) return cfg def make_app_from_configfile(): """Load configuration and then build application to run""" global api_cfg if not api_cfg: config_path = os.environ.get("SWH_CONFIG_FILENAME") api_cfg = load_and_check_config(config_path) app.config.update(api_cfg) handler = logging.StreamHandler() app.logger.addHandler(handler) return app if __name__ == "__main__": print("Deprecated. Use swh-objstorage") diff --git a/swh/objstorage/backends/azure.py b/swh/objstorage/backends/azure.py index d63c847..23a2b12 100644 --- a/swh/objstorage/backends/azure.py +++ b/swh/objstorage/backends/azure.py @@ -1,410 +1,410 @@ # Copyright (C) 2016-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import asyncio import contextlib import datetime from itertools import product import string from typing import Dict, Iterator, List, Optional, Union import warnings from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError from azure.storage.blob import ( ContainerClient, ContainerSasPermissions, generate_container_sas, ) from azure.storage.blob.aio import ContainerClient as AsyncContainerClient from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError -from swh.objstorage.interface import ObjId +from swh.objstorage.interface import CompositeObjId, ObjId from swh.objstorage.objstorage import ( ObjStorage, compressors, compute_hash, decompressors, ) from swh.objstorage.utils import call_async def get_container_url( account_name: str, account_key: str, container_name: str, access_policy: str = "read_only", expiry: datetime.timedelta = datetime.timedelta(days=365), **kwargs, ) -> str: """Get the full url, for the given container on the given account, with a Shared Access Signature granting the specified access policy. Args: account_name: name of the storage account for which to generate the URL account_key: shared account key of the storage account used to generate the SAS container_name: name of the container for which to grant access in the storage account access_policy: one of ``read_only``, ``append_only``, ``full`` expiry: the interval in the future with which the signature will expire Returns: the full URL of the container, with the shared access signature. """ access_policies = { "read_only": ContainerSasPermissions( read=True, list=True, delete=False, write=False ), "append_only": ContainerSasPermissions( read=True, list=True, delete=False, write=True ), "full": ContainerSasPermissions(read=True, list=True, delete=True, write=True), } current_time = datetime.datetime.utcnow() signature = generate_container_sas( account_name, container_name, account_key=account_key, permission=access_policies[access_policy], start=current_time + datetime.timedelta(minutes=-1), expiry=current_time + expiry, ) return f"https://{account_name}.blob.core.windows.net/{container_name}?{signature}" class AzureCloudObjStorage(ObjStorage): """ObjStorage backend for Azure blob storage accounts. Args: container_url: the URL of the container in which the objects are stored. account_name: (deprecated) the name of the storage account under which objects are stored api_secret_key: (deprecated) the shared account key container_name: (deprecated) the name of the container under which objects are stored compression: the compression algorithm used to compress objects in storage Notes: The container url should contain the credentials via a "Shared Access Signature". The :func:`get_container_url` helper can be used to generate such a URL from the account's access keys. The ``account_name``, ``api_secret_key`` and ``container_name`` arguments are deprecated. """ def __init__( self, container_url: Optional[str] = None, account_name: Optional[str] = None, api_secret_key: Optional[str] = None, container_name: Optional[str] = None, compression="gzip", **kwargs, ): if container_url is None: if account_name is None or api_secret_key is None or container_name is None: raise ValueError( "AzureCloudObjStorage must have a container_url or all three " "account_name, api_secret_key and container_name" ) else: warnings.warn( "The Azure objstorage account secret key parameters are " "deprecated, please use container URLs instead.", DeprecationWarning, ) container_url = get_container_url( account_name=account_name, account_key=api_secret_key, container_name=container_name, access_policy="full", ) super().__init__(**kwargs) self.container_url = container_url self.compression = compression def get_container_client(self, hex_obj_id): """Get the container client for the container that contains the object with internal id hex_obj_id This is used to allow the PrefixedAzureCloudObjStorage to dispatch the client according to the prefix of the object id. """ return ContainerClient.from_container_url(self.container_url) @contextlib.asynccontextmanager async def get_async_container_clients(self): """Returns a collection of container clients, to be passed to ``get_async_blob_client``. Each container may not be used in more than one asyncio loop.""" client = AsyncContainerClient.from_container_url(self.container_url) async with client: yield {"": client} def get_blob_client(self, hex_obj_id): """Get the azure blob client for the given hex obj id""" container_client = self.get_container_client(hex_obj_id) return container_client.get_blob_client(blob=hex_obj_id) def get_async_blob_client(self, hex_obj_id, container_clients): """Get the azure blob client for the given hex obj id and a collection yielded by ``get_async_container_clients``.""" return container_clients[""].get_blob_client(blob=hex_obj_id) def get_all_container_clients(self): """Get all active block_blob_services""" yield self.get_container_client("") def _internal_id(self, obj_id): """Internal id is the hex version in objstorage.""" return hashutil.hash_to_hex(obj_id) def check_config(self, *, check_write): """Check the configuration for this object storage""" for container_client in self.get_all_container_clients(): props = container_client.get_container_properties() # FIXME: check_write is ignored here if not props: return False return True - def __contains__(self, obj_id): + def __contains__(self, obj_id: ObjId) -> bool: """Does the storage contains the obj_id.""" hex_obj_id = self._internal_id(obj_id) client = self.get_blob_client(hex_obj_id) try: client.get_blob_properties() except ResourceNotFoundError: return False else: return True - def __iter__(self): + def __iter__(self) -> Iterator[CompositeObjId]: """Iterate over the objects present in the storage.""" for client in self.get_all_container_clients(): for obj in client.list_blobs(): yield hashutil.hash_to_bytes(obj.name) def __len__(self): """Compute the number of objects in the current object storage. Returns: number of objects contained in the storage. """ return sum(1 for i in self) def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: """Add an obj in storage if it's not there already.""" if check_presence and obj_id in self: return hex_obj_id = self._internal_id(obj_id) # Send the compressed content compressor = compressors[self.compression]() data = compressor.compress(content) data += compressor.flush() client = self.get_blob_client(hex_obj_id) try: client.upload_blob(data=data, length=len(data)) except ResourceExistsError: # There's a race condition between check_presence and upload_blob, # that we can't get rid of as the azure api doesn't allow atomic # replaces or renaming a blob. As the restore operation explicitly # removes the blob, it should be safe to just ignore the error. pass def restore(self, content: bytes, obj_id: ObjId) -> None: """Restore a content.""" if obj_id in self: self.delete(obj_id) return self.add(content, obj_id, check_presence=False) def get(self, obj_id: ObjId) -> bytes: """retrieve blob's content if found.""" return call_async(self._get_async, obj_id) async def _get_async(self, obj_id, container_clients=None): """Coroutine implementing ``get(obj_id)`` using azure-storage-blob's asynchronous implementation. While ``get(obj_id)`` does not need asynchronicity, this is useful to ``get_batch(obj_ids)``, as it can run multiple ``_get_async`` tasks concurrently.""" if container_clients is None: # If the container_clients argument is not passed, create a new # collection of container_clients and restart the function with it. async with self.get_async_container_clients() as container_clients: return await self._get_async(obj_id, container_clients) hex_obj_id = self._internal_id(obj_id) client = self.get_async_blob_client(hex_obj_id, container_clients) try: download = await client.download_blob() except ResourceNotFoundError: raise ObjNotFoundError(obj_id) from None else: data = await download.content_as_bytes() decompressor = decompressors[self.compression]() ret = decompressor.decompress(data) if decompressor.unused_data: raise Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret async def _get_async_or_none(self, obj_id, container_clients): """Like ``get_async(obj_id)``, but returns None instead of raising ResourceNotFoundError. Used by ``get_batch`` so other blobs can be returned even if one is missing.""" try: return await self._get_async(obj_id, container_clients) except ObjNotFoundError: return None async def _get_batch_async(self, obj_ids): async with self.get_async_container_clients() as container_clients: return await asyncio.gather( *[ self._get_async_or_none(obj_id, container_clients) for obj_id in obj_ids ] ) def get_batch(self, obj_ids: List[ObjId]) -> Iterator[Optional[bytes]]: """Retrieve objects' raw content in bulk from storage, concurrently.""" return call_async(self._get_batch_async, obj_ids) def check(self, obj_id: ObjId) -> None: """Check the content integrity.""" obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id: ObjId): """Delete an object.""" super().delete(obj_id) # Check delete permission hex_obj_id = self._internal_id(obj_id) client = self.get_blob_client(hex_obj_id) try: client.delete_blob() except ResourceNotFoundError: raise ObjNotFoundError(obj_id) from None return True class PrefixedAzureCloudObjStorage(AzureCloudObjStorage): """ObjStorage with azure capabilities, striped by prefix. accounts is a dict containing entries of the form: : """ def __init__( self, accounts: Dict[str, Union[str, Dict[str, str]]], compression="gzip", **kwargs, ): # shortcut AzureCloudObjStorage __init__ ObjStorage.__init__(self, **kwargs) self.compression = compression # Definition sanity check prefix_lengths = set(len(prefix) for prefix in accounts) if not len(prefix_lengths) == 1: raise ValueError( "Inconsistent prefixes, found lengths %s" % ", ".join(str(lst) for lst in sorted(prefix_lengths)) ) self.prefix_len = prefix_lengths.pop() expected_prefixes = set( "".join(letters) for letters in product( set(string.hexdigits.lower()), repeat=self.prefix_len ) ) missing_prefixes = expected_prefixes - set(accounts) if missing_prefixes: raise ValueError( "Missing prefixes %s" % ", ".join(sorted(missing_prefixes)) ) do_warning = False self.container_urls = {} for prefix, container_url in accounts.items(): if isinstance(container_url, dict): do_warning = True container_url = get_container_url( account_name=container_url["account_name"], account_key=container_url["api_secret_key"], container_name=container_url["container_name"], access_policy="full", ) self.container_urls[prefix] = container_url if do_warning: warnings.warn( "The Azure objstorage account secret key parameters are " "deprecated, please use container URLs instead.", DeprecationWarning, ) def get_container_client(self, hex_obj_id): """Get the block_blob_service and container that contains the object with internal id hex_obj_id """ prefix = hex_obj_id[: self.prefix_len] return ContainerClient.from_container_url(self.container_urls[prefix]) @contextlib.asynccontextmanager async def get_async_container_clients(self): # This is equivalent to: # client1 = AsyncContainerClient.from_container_url(url1) # ... # client16 = AsyncContainerClient.from_container_url(url16) # async with client1, ..., client16: # yield {prefix1: client1, ..., prefix16: client16} clients = { prefix: AsyncContainerClient.from_container_url(url) for (prefix, url) in self.container_urls.items() } async with contextlib.AsyncExitStack() as stack: for client in clients.values(): await stack.enter_async_context(client) yield clients def get_async_blob_client(self, hex_obj_id, container_clients): """Get the azure blob client for the given hex obj id and a collection yielded by ``get_async_container_clients``.""" prefix = hex_obj_id[: self.prefix_len] return container_clients[prefix].get_blob_client(blob=hex_obj_id) def get_all_container_clients(self): """Get all active container clients""" # iterate on items() to sort blob services; # needed to be able to paginate in the list_content() method yield from ( self.get_container_client(prefix) for prefix in sorted(self.container_urls) ) diff --git a/swh/objstorage/backends/generator.py b/swh/objstorage/backends/generator.py index cc2fc0c..0bb3a58 100644 --- a/swh/objstorage/backends/generator.py +++ b/swh/objstorage/backends/generator.py @@ -1,225 +1,228 @@ from itertools import count, islice, repeat import logging import random -from typing import Iterator, Optional +from typing import Generator, Iterator, Optional, cast -from swh.objstorage.interface import ObjId +from swh.objstorage.constants import ID_HASH_ALGO +from swh.objstorage.interface import CompositeObjId, ObjId from swh.objstorage.objstorage import DEFAULT_LIMIT, ObjStorage logger = logging.getLogger(__name__) class Randomizer: def __init__(self): self.size = 0 self.read(1024) # create a not-so-small initial buffer def read(self, size): if size > self.size: with open("/dev/urandom", "rb") as fobj: self.data = fobj.read(2 * size) self.size = len(self.data) # pick a random subset of our existing buffer idx = random.randint(0, self.size - size - 1) return self.data[idx : idx + size] def gen_sizes(): """generates numbers according to the rought distribution of file size in the SWH archive """ # these are the histogram bounds of the pg content.length column bounds = [ 0, 2, 72, 119, 165, 208, 256, 300, 345, 383, 429, 474, 521, 572, 618, 676, 726, 779, 830, 879, 931, 992, 1054, 1119, 1183, 1244, 1302, 1370, 1437, 1504, 1576, 1652, 1725, 1806, 1883, 1968, 2045, 2133, 2236, 2338, 2433, 2552, 2659, 2774, 2905, 3049, 3190, 3322, 3489, 3667, 3834, 4013, 4217, 4361, 4562, 4779, 5008, 5233, 5502, 5788, 6088, 6396, 6728, 7094, 7457, 7835, 8244, 8758, 9233, 9757, 10313, 10981, 11693, 12391, 13237, 14048, 14932, 15846, 16842, 18051, 19487, 20949, 22595, 24337, 26590, 28840, 31604, 34653, 37982, 41964, 46260, 51808, 58561, 66584, 78645, 95743, 122883, 167016, 236108, 421057, 1047367, 55056238, ] nbounds = len(bounds) for i in count(): idx = random.randint(1, nbounds - 1) lower = bounds[idx - 1] upper = bounds[idx] yield random.randint(lower, upper - 1) def gen_random_content(total=None, filesize=None): """generates random (file) content which sizes roughly follows the SWH archive file size distribution (by default). Args: total (int): the total number of objects to generate. Infinite if unset. filesize (int): generate objects with fixed size instead of random ones. """ randomizer = Randomizer() if filesize: gen = repeat(filesize) else: gen = gen_sizes() if total: gen = islice(gen, total) for objsize in gen: yield randomizer.read(objsize) class RandomGeneratorObjStorage(ObjStorage): """A stupid read-only storage that generates blobs for testing purpose.""" def __init__(self, filesize=None, total=None, **kwargs): super().__init__() if filesize: filesize = int(filesize) self.filesize = filesize if total: total = int(total) self.total = total self._content_generator = None @property def content_generator(self): if self._content_generator is None: self._content_generator = gen_random_content(self.total, self.filesize) return self._content_generator def check_config(self, *, check_write): return True def __contains__(self, obj_id, *args, **kwargs): return False - def __iter__(self): + def __iter__(self) -> Iterator[CompositeObjId]: i = 1 while True: - j = yield (b"%d" % i) + j = yield {ID_HASH_ALGO: b"%d" % i} if self.total and i >= self.total: logger.debug("DONE") break if j is not None: i = j else: i += 1 def get(self, obj_id, *args, **kwargs): return next(self.content_generator) def add(self, content, obj_id, check_presence=True, *args, **kwargs): pass def check(self, obj_id, *args, **kwargs): return True def delete(self, obj_id, *args, **kwargs): return True def list_content( self, last_obj_id: Optional[ObjId] = None, limit: int = DEFAULT_LIMIT, - ) -> Iterator[ObjId]: - it = iter(self) + ) -> Iterator[CompositeObjId]: + if isinstance(last_obj_id, dict): + last_obj_id = last_obj_id[ID_HASH_ALGO] + it = cast(Generator[CompositeObjId, int, None], iter(self)) if last_obj_id: next(it) it.send(int(last_obj_id)) return islice(it, limit) diff --git a/swh/objstorage/backends/http.py b/swh/objstorage/backends/http.py index 35542fa..1900e11 100644 --- a/swh/objstorage/backends/http.py +++ b/swh/objstorage/backends/http.py @@ -1,98 +1,98 @@ # Copyright (C) 2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging from typing import Iterator, Optional from urllib.parse import urljoin import requests from swh.model import hashutil from swh.objstorage import exc -from swh.objstorage.interface import ObjId +from swh.objstorage.interface import CompositeObjId, ObjId from swh.objstorage.objstorage import ( DEFAULT_LIMIT, ObjStorage, compute_hash, decompressors, ) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.ERROR) class HTTPReadOnlyObjStorage(ObjStorage): """Simple ObjStorage retrieving objects from an HTTP server. For example, can be used to retrieve objects from S3: objstorage: cls: http url: https://softwareheritage.s3.amazonaws.com/content/ """ def __init__(self, url=None, compression=None, **kwargs): super().__init__(**kwargs) self.session = requests.sessions.Session() self.root_path = url if not self.root_path.endswith("/"): self.root_path += "/" self.compression = compression def check_config(self, *, check_write): """Check the configuration for this object storage""" return True - def __contains__(self, obj_id): + def __contains__(self, obj_id: ObjId) -> bool: resp = self.session.head(self._path(obj_id)) return resp.status_code == 200 - def __iter__(self): + def __iter__(self) -> Iterator[CompositeObjId]: raise exc.NonIterableObjStorage("__iter__") def __len__(self): raise exc.NonIterableObjStorage("__len__") def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: raise exc.ReadOnlyObjStorage("add") def delete(self, obj_id: ObjId): raise exc.ReadOnlyObjStorage("delete") def restore(self, content: bytes, obj_id: ObjId) -> None: raise exc.ReadOnlyObjStorage("restore") def list_content( self, last_obj_id: Optional[ObjId] = None, limit: int = DEFAULT_LIMIT, - ) -> Iterator[ObjId]: + ) -> Iterator[CompositeObjId]: raise exc.NonIterableObjStorage("__len__") def get(self, obj_id: ObjId) -> bytes: try: resp = self.session.get(self._path(obj_id)) resp.raise_for_status() except Exception: raise exc.ObjNotFoundError(obj_id) ret: bytes = resp.content if self.compression: d = decompressors[self.compression]() ret = d.decompress(ret) if d.unused_data: hex_obj_id = hashutil.hash_to_hex(obj_id) raise exc.Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret def check(self, obj_id: ObjId) -> None: # Check the content integrity obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise exc.Error(obj_id) def _path(self, obj_id): return urljoin(self.root_path, hashutil.hash_to_hex(obj_id)) diff --git a/swh/objstorage/backends/in_memory.py b/swh/objstorage/backends/in_memory.py index 3612638..1d6552a 100644 --- a/swh/objstorage/backends/in_memory.py +++ b/swh/objstorage/backends/in_memory.py @@ -1,55 +1,57 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +from typing import Iterator + from swh.objstorage.exc import Error, ObjNotFoundError -from swh.objstorage.interface import ObjId -from swh.objstorage.objstorage import ObjStorage, compute_hash +from swh.objstorage.interface import CompositeObjId, ObjId +from swh.objstorage.objstorage import ObjStorage, compute_hash, objid_to_default_hex class InMemoryObjStorage(ObjStorage): """In-Memory objstorage. Intended for test purposes. """ def __init__(self, **args): super().__init__() self.state = {} def check_config(self, *, check_write): return True - def __contains__(self, obj_id): + def __contains__(self, obj_id: ObjId) -> bool: return obj_id in self.state - def __iter__(self): + def __iter__(self) -> Iterator[CompositeObjId]: return iter(sorted(self.state)) def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: if check_presence and obj_id in self: return self.state[obj_id] = content def get(self, obj_id: ObjId) -> bytes: if obj_id not in self: raise ObjNotFoundError(obj_id) return self.state[obj_id] def check(self, obj_id: ObjId) -> None: if obj_id not in self: raise ObjNotFoundError(obj_id) if compute_hash(self.state[obj_id]) != obj_id: - raise Error("Corrupt object %s" % obj_id.hex()) + raise Error("Corrupt object %s" % objid_to_default_hex(obj_id)) def delete(self, obj_id: ObjId): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) self.state.pop(obj_id) return True diff --git a/swh/objstorage/backends/libcloud.py b/swh/objstorage/backends/libcloud.py index 2d8b5ef..3441778 100644 --- a/swh/objstorage/backends/libcloud.py +++ b/swh/objstorage/backends/libcloud.py @@ -1,251 +1,250 @@ # Copyright (C) 2016-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc from collections import OrderedDict -from collections.abc import Iterator -from typing import Optional +from typing import Iterator, Optional from urllib.parse import urlencode from libcloud.storage import providers import libcloud.storage.drivers.s3 from libcloud.storage.types import ObjectDoesNotExistError, Provider from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError -from swh.objstorage.interface import ObjId +from swh.objstorage.interface import CompositeObjId, ObjId from swh.objstorage.objstorage import ( ObjStorage, compressors, compute_hash, decompressors, ) def patch_libcloud_s3_urlencode(): """Patches libcloud's S3 backend to properly sign queries. Recent versions of libcloud are not affected (they use signature V4), but 1.5.0 (the one in Debian 9) is.""" def s3_urlencode(params): """Like urllib.parse.urlencode, but sorts the parameters first. This is required to properly compute the request signature, see https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#ConstructingTheCanonicalizedResourceElement """ # noqa return urlencode(OrderedDict(sorted(params.items()))) libcloud.storage.drivers.s3.urlencode = s3_urlencode patch_libcloud_s3_urlencode() class CloudObjStorage(ObjStorage, metaclass=abc.ABCMeta): """Abstract ObjStorage that connect to a cloud using Libcloud Implementations of this class must redefine the _get_provider method to make it return a driver provider (i.e. object that supports `get_driver` method) which return a LibCloud driver (see https://libcloud.readthedocs.io/en/latest/storage/api.html). Args: container_name: Name of the base container path_prefix: prefix to prepend to object paths in the container, separated with a slash compression: compression algorithm to use for objects kwargs: extra arguments are passed through to the LibCloud driver """ def __init__( self, container_name: str, compression: str = "gzip", path_prefix: Optional[str] = None, **kwargs, ): super().__init__(**kwargs) self.driver = self._get_driver(**kwargs) self.container_name = container_name self.container = self.driver.get_container(container_name=container_name) self.compression = compression self.path_prefix = None if path_prefix: self.path_prefix = path_prefix.rstrip("/") + "/" def _get_driver(self, **kwargs): """Initialize a driver to communicate with the cloud Kwargs: arguments passed to the StorageDriver class, typically key: key to connect to the API. secret: secret key for authentication. secure: (bool) support HTTPS host: (str) port: (int) api_version: (str) region: (str) Returns: a Libcloud driver to a cloud storage. """ # Get the driver class from its description. cls = providers.get_driver(self._get_provider()) # Initialize the driver. return cls(**kwargs) @abc.abstractmethod def _get_provider(self): """Get a libcloud driver provider This method must be overridden by subclasses to specify which of the native libcloud driver the current storage should connect to. Alternatively, provider for a custom driver may be returned, in which case the provider will have to support `get_driver` method. """ raise NotImplementedError( "%s must implement `get_provider` method" % type(self) ) def check_config(self, *, check_write): """Check the configuration for this object storage""" # FIXME: hopefully this blew up during instantiation return True - def __contains__(self, obj_id): + def __contains__(self, obj_id: ObjId) -> bool: try: self._get_object(obj_id) except ObjNotFoundError: return False else: return True - def __iter__(self): + def __iter__(self) -> Iterator[CompositeObjId]: """Iterate over the objects present in the storage Warning: Iteration over the contents of a cloud-based object storage may have bad efficiency: due to the very high amount of objects in it and the fact that it is remote, get all the contents of the current object storage may result in a lot of network requests. You almost certainly don't want to use this method in production. """ for obj in self.driver.iterate_container_objects(self.container): name = obj.name if self.path_prefix and not name.startswith(self.path_prefix): continue if self.path_prefix: name = name[len(self.path_prefix) :] yield hashutil.hash_to_bytes(name) def __len__(self): """Compute the number of objects in the current object storage. Warning: this currently uses `__iter__`, its warning about bad performance applies. Returns: number of objects contained in the storage. """ return sum(1 for i in self) def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: if check_presence and obj_id in self: return self._put_object(content, obj_id) def restore(self, content: bytes, obj_id: ObjId) -> None: return self.add(content, obj_id, check_presence=False) def get(self, obj_id: ObjId) -> bytes: obj = b"".join(self._get_object(obj_id).as_stream()) d = decompressors[self.compression]() ret = d.decompress(obj) if d.unused_data: hex_obj_id = hashutil.hash_to_hex(obj_id) raise Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret def check(self, obj_id: ObjId) -> None: # Check that the file exists, as _get_object raises ObjNotFoundError self._get_object(obj_id) # Check the content integrity obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id: ObjId): super().delete(obj_id) # Check delete permission obj = self._get_object(obj_id) return self.driver.delete_object(obj) def _object_path(self, obj_id): """Get the full path to an object""" hex_obj_id = hashutil.hash_to_hex(obj_id) if self.path_prefix: return self.path_prefix + hex_obj_id else: return hex_obj_id def _get_object(self, obj_id): """Get a Libcloud wrapper for an object pointer. This wrapper does not retrieve the content of the object directly. """ object_path = self._object_path(obj_id) try: return self.driver.get_object(self.container_name, object_path) except ObjectDoesNotExistError: raise ObjNotFoundError(obj_id) def _compressor(self, data): comp = compressors[self.compression]() for chunk in data: cchunk = comp.compress(chunk) if cchunk: yield cchunk trail = comp.flush() if trail: yield trail def _put_object(self, content, obj_id): """Create an object in the cloud storage. Created object will contain the content and be referenced by the given id. """ object_path = self._object_path(obj_id) if not isinstance(content, Iterator): content = (content,) self.driver.upload_object_via_stream( self._compressor(content), self.container, object_path ) class AwsCloudObjStorage(CloudObjStorage): """Amazon's S3 Cloud-based object storage""" def _get_provider(self): return Provider.S3 class OpenStackCloudObjStorage(CloudObjStorage): """OpenStack Swift Cloud based object storage""" def _get_provider(self): return Provider.OPENSTACK_SWIFT diff --git a/swh/objstorage/backends/pathslicing.py b/swh/objstorage/backends/pathslicing.py index c839e39..f8f19d9 100644 --- a/swh/objstorage/backends/pathslicing.py +++ b/swh/objstorage/backends/pathslicing.py @@ -1,368 +1,373 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from contextlib import contextmanager from itertools import islice import os import tempfile from typing import Iterator, List, Optional from swh.model import hashutil from swh.objstorage.constants import DEFAULT_LIMIT, ID_HASH_ALGO, ID_HEXDIGEST_LENGTH from swh.objstorage.exc import Error, ObjNotFoundError -from swh.objstorage.interface import ObjId -from swh.objstorage.objstorage import ObjStorage, compressors, decompressors +from swh.objstorage.interface import CompositeObjId, ObjId +from swh.objstorage.objstorage import ( + ObjStorage, + compressors, + decompressors, + objid_to_default_hex, +) BUFSIZ = 1048576 DIR_MODE = 0o755 FILE_MODE = 0o644 class PathSlicer: """Helper class to compute a path based on a hash. Used to compute a directory path based on the object hash according to a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. For instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will have the following computed path: - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 Args: root (str): path to the root directory of the storage on the disk. slicing (str): the slicing configuration. """ def __init__(self, root: str, slicing: str): self.root = root # Make a list of tuples where each tuple contains the beginning # and the end of each slicing. try: self.bounds = [ slice(*(int(x) if x else None for x in sbounds.split(":"))) for sbounds in slicing.split("/") if sbounds ] except TypeError: raise ValueError( "Invalid slicing declaration; " "it should be a of the form ':[/:]..." ) def check_config(self): """Check the slicing configuration is valid. Raises: ValueError: if the slicing configuration is invalid. """ if len(self): max_char = max( max(bound.start or 0, bound.stop or 0) for bound in self.bounds ) if ID_HEXDIGEST_LENGTH < max_char: raise ValueError( "Algorithm %s has too short hash for slicing to char %d" % (ID_HASH_ALGO, max_char) ) def get_directory(self, hex_obj_id: str) -> str: """Compute the storage directory of an object. See also: PathSlicer::get_path Args: hex_obj_id: object id as hexlified string. Returns: Absolute path (including root) to the directory that contains the given object id. """ return os.path.join(self.root, *self.get_slices(hex_obj_id)) def get_path(self, hex_obj_id: str) -> str: """Compute the full path to an object into the current storage. See also: PathSlicer::get_directory Args: hex_obj_id(str): object id as hexlified string. Returns: Absolute path (including root) to the object corresponding to the given object id. """ return os.path.join(self.get_directory(hex_obj_id), hex_obj_id) def get_slices(self, hex_obj_id: str) -> List[str]: """Compute the path elements for the given hash. Args: hex_obj_id(str): object id as hexlified string. Returns: Relative path to the actual object corresponding to the given id as a list. """ assert len(hex_obj_id) == ID_HEXDIGEST_LENGTH return [hex_obj_id[bound] for bound in self.bounds] def __len__(self) -> int: """Number of slices of the slicer""" return len(self.bounds) class PathSlicingObjStorage(ObjStorage): """Implementation of the ObjStorage API based on the hash of the content. On disk, an object storage is a directory tree containing files named after their object IDs. An object ID is a checksum of its content, depending on the value of the ID_HASH_ALGO constant (see swh.model.hashutil for its meaning). To avoid directories that contain too many files, the object storage has a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. So for instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will be stored in the given object storages : - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 The files in the storage are stored in gzipped compressed format. Args: root (str): path to the root directory of the storage on the disk. slicing (str): string that indicates the slicing to perform on the hash of the content to know the path where it should be stored (see the documentation of the PathSlicer class). """ def __init__(self, root, slicing, compression="gzip", **kwargs): super().__init__(**kwargs) self.root = root self.slicer = PathSlicer(root, slicing) self.use_fdatasync = hasattr(os, "fdatasync") self.compression = compression self.check_config(check_write=False) def check_config(self, *, check_write): """Check whether this object storage is properly configured""" self.slicer.check_config() if not os.path.isdir(self.root): raise ValueError( 'PathSlicingObjStorage root "%s" is not a directory' % self.root ) if check_write: if not os.access(self.root, os.W_OK): raise PermissionError( 'PathSlicingObjStorage root "%s" is not writable' % self.root ) if self.compression not in compressors: raise ValueError( 'Unknown compression algorithm "%s" for ' "PathSlicingObjStorage" % self.compression ) return True def __contains__(self, obj_id: ObjId) -> bool: - hex_obj_id = hashutil.hash_to_hex(obj_id) + hex_obj_id = objid_to_default_hex(obj_id) return os.path.isfile(self.slicer.get_path(hex_obj_id)) - def __iter__(self) -> Iterator[bytes]: + def __iter__(self) -> Iterator[CompositeObjId]: """Iterate over the object identifiers currently available in the storage. Warning: with the current implementation of the object storage, this method will walk the filesystem to list objects, meaning that listing all objects will be very slow for large storages. You almost certainly don't want to use this method in production. Return: Iterator over object IDs """ def obj_iterator(): # XXX hackish: it does not verify that the depth of found files # matches the slicing depth of the storage for root, _dirs, files in os.walk(self.root): _dirs.sort() for f in sorted(files): yield bytes.fromhex(f) return obj_iterator() def __len__(self) -> int: """Compute the number of objects available in the storage. Warning: this currently uses `__iter__`, its warning about bad performances applies Return: number of objects contained in the storage """ return sum(1 for i in self) def add( self, content: bytes, obj_id: ObjId, check_presence: bool = True, ) -> None: if check_presence and obj_id in self: # If the object is already present, return immediately. return - hex_obj_id = hashutil.hash_to_hex(obj_id) + hex_obj_id = objid_to_default_hex(obj_id) compressor = compressors[self.compression]() with self._write_obj_file(hex_obj_id) as f: f.write(compressor.compress(content)) f.write(compressor.flush()) def get(self, obj_id: ObjId) -> bytes: if obj_id not in self: raise ObjNotFoundError(obj_id) # Open the file and return its content as bytes - hex_obj_id = hashutil.hash_to_hex(obj_id) + hex_obj_id = objid_to_default_hex(obj_id) d = decompressors[self.compression]() with open(self.slicer.get_path(hex_obj_id), "rb") as f: out = d.decompress(f.read()) if d.unused_data: raise Error( "Corrupt object %s: trailing data found" % hex_obj_id, ) return out def check(self, obj_id: ObjId) -> None: try: data = self.get(obj_id) except OSError: - hex_obj_id = hashutil.hash_to_hex(obj_id) + hex_obj_id = objid_to_default_hex(obj_id) raise Error( "Corrupt object %s: not a proper compressed file" % hex_obj_id, ) checksums = hashutil.MultiHash.from_data( data, hash_names=[ID_HASH_ALGO] ).digest() - actual_obj_id = checksums[ID_HASH_ALGO] - hex_obj_id = hashutil.hash_to_hex(obj_id) + actual_obj_sha1 = checksums[ID_HASH_ALGO] + hex_obj_id = objid_to_default_hex(obj_id) - if hex_obj_id != hashutil.hash_to_hex(actual_obj_id): + if hex_obj_id != hashutil.hash_to_hex(actual_obj_sha1): raise Error( "Corrupt object %s should have id %s" - % (hashutil.hash_to_hex(obj_id), hashutil.hash_to_hex(actual_obj_id)) + % (objid_to_default_hex(obj_id), hashutil.hash_to_hex(actual_obj_sha1)) ) def delete(self, obj_id: ObjId): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) - hex_obj_id = hashutil.hash_to_hex(obj_id) + hex_obj_id = objid_to_default_hex(obj_id) try: os.remove(self.slicer.get_path(hex_obj_id)) except FileNotFoundError: raise ObjNotFoundError(obj_id) return True # Streaming methods @contextmanager def chunk_writer(self, obj_id): - hex_obj_id = hashutil.hash_to_hex(obj_id) + hex_obj_id = objid_to_default_hex(obj_id) compressor = compressors[self.compression]() with self._write_obj_file(hex_obj_id) as f: yield lambda c: f.write(compressor.compress(c)) f.write(compressor.flush()) def list_content( self, last_obj_id: Optional[ObjId] = None, limit: int = DEFAULT_LIMIT - ) -> Iterator[ObjId]: + ) -> Iterator[CompositeObjId]: if last_obj_id: it = self.iter_from(last_obj_id) else: it = iter(self) return islice(it, limit) def iter_from(self, obj_id, n_leaf=False): - hex_obj_id = hashutil.hash_to_hex(obj_id) + hex_obj_id = objid_to_default_hex(obj_id) slices = self.slicer.get_slices(hex_obj_id) rlen = len(self.root.split("/")) i = 0 for root, dirs, files in os.walk(self.root): if not dirs: i += 1 level = len(root.split("/")) - rlen dirs.sort() if dirs and root == os.path.join(self.root, *slices[:level]): cslice = slices[level] for d in dirs[:]: if d < cslice: dirs.remove(d) for f in sorted(files): if f > hex_obj_id: yield bytes.fromhex(f) if n_leaf: yield i @contextmanager def _write_obj_file(self, hex_obj_id): """Context manager for writing object files to the object storage. During writing, data are written to a temporary file, which is atomically renamed to the right file name after closing. Usage sample: with objstorage._write_obj_file(hex_obj_id): f.write(obj_data) Yields: a file-like object open for writing bytes. """ # Get the final paths and create the directory if absent. dir = self.slicer.get_directory(hex_obj_id) if not os.path.isdir(dir): os.makedirs(dir, DIR_MODE, exist_ok=True) path = os.path.join(dir, hex_obj_id) # Create a temporary file. (tmp, tmp_path) = tempfile.mkstemp(suffix=".tmp", prefix="hex_obj_id.", dir=dir) # Open the file and yield it for writing. tmp_f = os.fdopen(tmp, "wb") yield tmp_f # Make sure the contents of the temporary file are written to disk tmp_f.flush() if self.use_fdatasync: os.fdatasync(tmp) else: os.fsync(tmp) # Then close the temporary file and move it to the right path. tmp_f.close() os.chmod(tmp_path, FILE_MODE) os.rename(tmp_path, path) diff --git a/swh/objstorage/backends/seaweedfs/objstorage.py b/swh/objstorage/backends/seaweedfs/objstorage.py index 09f45ca..b87ae9f 100644 --- a/swh/objstorage/backends/seaweedfs/objstorage.py +++ b/swh/objstorage/backends/seaweedfs/objstorage.py @@ -1,156 +1,157 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import io from itertools import islice import logging import os from typing import Iterator, Optional from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError -from swh.objstorage.interface import ObjId +from swh.objstorage.interface import CompositeObjId, ObjId from swh.objstorage.objstorage import ( DEFAULT_LIMIT, ObjStorage, compressors, compute_hash, decompressors, + objid_to_default_hex, ) from .http import HttpFiler LOGGER = logging.getLogger(__name__) class SeaweedFilerObjStorage(ObjStorage): """ObjStorage with seaweedfs abilities, using the Filer API. https://github.com/chrislusf/seaweedfs/wiki/Filer-Server-API """ def __init__(self, url, compression=None, **kwargs): super().__init__(**kwargs) self.wf = HttpFiler(url) self.compression = compression def check_config(self, *, check_write): """Check the configuration for this object storage""" # FIXME: hopefully this blew up during instantiation return True - def __contains__(self, obj_id): + def __contains__(self, obj_id: ObjId) -> bool: return self.wf.exists(self._path(obj_id)) - def __iter__(self): + def __iter__(self) -> Iterator[CompositeObjId]: """Iterate over the objects present in the storage Warning: Iteration over the contents of a cloud-based object storage may have bad efficiency: due to the very high amount of objects in it and the fact that it is remote, get all the contents of the current object storage may result in a lot of network requests. You almost certainly don't want to use this method in production. """ obj_id = last_obj_id = None while True: for obj_id in self.list_content(last_obj_id=last_obj_id): yield obj_id if last_obj_id == obj_id: break last_obj_id = obj_id def __len__(self): """Compute the number of objects in the current object storage. Warning: this currently uses `__iter__`, its warning about bad performance applies. Returns: number of objects contained in the storage. """ return sum(1 for i in self) def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: if check_presence and obj_id in self: return def compressor(data): comp = compressors[self.compression]() yield comp.compress(data) yield comp.flush() assert isinstance( content, bytes ), "list of content chunks is not supported anymore" self.wf.put(io.BytesIO(b"".join(compressor(content))), self._path(obj_id)) def restore(self, content: bytes, obj_id: ObjId) -> None: return self.add(content, obj_id, check_presence=False) def get(self, obj_id: ObjId) -> bytes: try: obj = self.wf.get(self._path(obj_id)) except Exception: raise ObjNotFoundError(obj_id) d = decompressors[self.compression]() ret = d.decompress(obj) if d.unused_data: hex_obj_id = hashutil.hash_to_hex(obj_id) raise Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret def check(self, obj_id: ObjId) -> None: # Check the content integrity obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id: ObjId): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) self.wf.delete(self._path(obj_id)) return True def list_content( self, last_obj_id: Optional[ObjId] = None, limit: int = DEFAULT_LIMIT, - ) -> Iterator[ObjId]: + ) -> Iterator[CompositeObjId]: if last_obj_id: - objid = hashutil.hash_to_hex(last_obj_id) + objid = objid_to_default_hex(last_obj_id) lastfilename = objid else: lastfilename = None for fname in islice(self.wf.iterfiles(last_file_name=lastfilename), limit): bytehex = fname.rsplit("/", 1)[-1] yield hashutil.bytehex_to_hash(bytehex.encode()) # internal methods def _put_object(self, content, obj_id): """Create an object in the cloud storage. Created object will contain the content and be referenced by the given id. """ def compressor(data): comp = compressors[self.compression]() for chunk in data: yield comp.compress(chunk) yield comp.flush() if isinstance(content, bytes): content = [content] self.wf.put(io.BytesIO(b"".join(compressor(content))), self._path(obj_id)) def _path(self, obj_id): return os.path.join(self.wf.basepath, hashutil.hash_to_hex(obj_id)) diff --git a/swh/objstorage/interface.py b/swh/objstorage/interface.py index 49099a1..aa9860c 100644 --- a/swh/objstorage/interface.py +++ b/swh/objstorage/interface.py @@ -1,191 +1,200 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from typing import Dict, Iterator, List, Optional +from typing import Dict, Iterator, List, Optional, Union -from typing_extensions import Protocol, runtime_checkable +from typing_extensions import Protocol, TypedDict, runtime_checkable from swh.core.api import remote_api_endpoint from swh.objstorage.constants import DEFAULT_LIMIT -ObjId = bytes -"""Type of object ids, which should be a sha1 hash.""" + +class CompositeObjId(TypedDict, total=False): + sha1: bytes + sha1_git: bytes + sha256: bytes + blake2s256: bytes + + +ObjId = Union[bytes, CompositeObjId] +"""Type of object ids, which should be ``{hash: value for hash in SUPPORTED_HASHES}``; +but single sha1 hashes are supported for legacy clients""" @runtime_checkable class ObjStorageInterface(Protocol): """High-level API to manipulate the Software Heritage object storage. Conceptually, the object storage offers the following methods: - check_config() check if the object storage is properly configured - __contains__() check if an object is present, by object id - add() add a new object, returning an object id - restore() same as add() but erase an already existed content - get() retrieve the content of an object, by object id - check() check the integrity of an object, by object id - delete() remove an object Each implementation of this interface can have a different behavior and its own way to store the contents. """ @remote_api_endpoint("check_config") def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ ... @remote_api_endpoint("content/contains") def __contains__(self, obj_id: ObjId) -> bool: """Indicate if the given object is present in the storage. Args: obj_id: object identifier. Returns: True if and only if the object is present in the current object storage. """ ... @remote_api_endpoint("content/add") def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: """Add a new object to the object storage. Args: content: object's raw content to add in storage. obj_id: checksum of [bytes] using [ID_HASH_ALGO] algorithm. It is trusted to match the bytes. check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ ... @remote_api_endpoint("content/add/batch") def add_batch(self, contents, check_presence=True) -> Dict: """Add a batch of new objects to the object storage. Args: contents: mapping from obj_id to object contents Returns: the summary of objects added to the storage (count of object, count of bytes object) """ ... def restore(self, content: bytes, obj_id: ObjId) -> None: """Restore a content that have been corrupted. This function is identical to add but does not check if the object id is already in the file system. The default implementation provided by the current class is suitable for most cases. Args: content: object's raw content to add in storage obj_id: dict of hashes of the content (or only the sha1, for legacy clients) """ ... @remote_api_endpoint("content/get") def get(self, obj_id: ObjId) -> bytes: """Retrieve the content of a given object. Args: obj_id: object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ ... @remote_api_endpoint("content/get/batch") def get_batch(self, obj_ids: List[ObjId]) -> Iterator[Optional[bytes]]: """Retrieve objects' raw content in bulk from storage. Note: This function does have a default implementation in ObjStorage that is suitable for most cases. For object storages that needs to do the minimal number of requests possible (ex: remote object storages), that method can be overridden to perform a more efficient operation. Args: obj_ids: list of object ids. Returns: list of resulting contents, or None if the content could not be retrieved. Do not raise any exception as a fail for one content will not cancel the whole request. """ ... @remote_api_endpoint("content/check") def check(self, obj_id: ObjId) -> None: """Perform an integrity check for a given object. Verify that the file object is in place and that the content matches the object id. Args: obj_id: object identifier. Raises: ObjNotFoundError: if the requested object is missing. Error: if the request object is corrupted. """ ... @remote_api_endpoint("content/delete") def delete(self, obj_id: ObjId): """Delete an object. Args: obj_id: object identifier. Raises: ObjNotFoundError: if the requested object is missing. """ ... - def __iter__(self) -> Iterator[ObjId]: + def __iter__(self) -> Iterator[CompositeObjId]: ... def list_content( self, last_obj_id: Optional[ObjId] = None, limit: int = DEFAULT_LIMIT - ) -> Iterator[ObjId]: + ) -> Iterator[CompositeObjId]: """Generates known object ids. Args: last_obj_id: object id from which to iterate from (excluded). limit (int): max number of object ids to generate. Generates: obj_id: object ids. """ ... diff --git a/swh/objstorage/multiplexer/filter/filter.py b/swh/objstorage/multiplexer/filter/filter.py index e2e8ade..60d051a 100644 --- a/swh/objstorage/multiplexer/filter/filter.py +++ b/swh/objstorage/multiplexer/filter/filter.py @@ -1,74 +1,77 @@ # Copyright (C) 2015-2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +from typing import Iterator + +from swh.objstorage.interface import CompositeObjId from swh.objstorage.objstorage import ObjStorage class ObjStorageFilter(ObjStorage): """Base implementation of a filter that allow inputs on ObjStorage or not. This class copy the API of ...objstorage in order to filter the inputs of this class. If the operation is allowed, return the result of this operation applied to the destination implementation. Otherwise, just return without any operation. This class is an abstract base class for a classic read/write storage. Filters can inherit from it and only redefine some methods in order to change behavior. """ def __init__(self, storage): self.storage = storage def check_config(self, *, check_write): """Check the object storage for proper configuration. Args: check_write: check whether writes to the objstorage will succeed Returns: True if the storage is properly configured """ return self.storage.check_config(check_write=check_write) def __contains__(self, *args, **kwargs): return self.storage.__contains__(*args, **kwargs) - def __iter__(self): + def __iter__(self) -> Iterator[CompositeObjId]: """Iterates over the content of each storages Warning: The `__iter__` methods frequently have bad performance. You almost certainly don't want to use this method in production as the wrapped storage may cause performance issues. """ return self.storage.__iter__() def __len__(self): """Compute the number of objects in the current object storage. Warning: performance issue in `__iter__` also applies here. Returns: number of objects contained in the storage. """ return self.storage.__len__() def add(self, content, obj_id, check_presence=True, *args, **kwargs): return self.storage.add(content, obj_id, check_presence, *args, **kwargs) def restore(self, content, obj_id, *args, **kwargs): return self.storage.restore(content, obj_id, *args, **kwargs) def get(self, obj_id, *args, **kwargs): return self.storage.get(obj_id, *args, **kwargs) def check(self, obj_id, *args, **kwargs): return self.storage.check(obj_id, *args, **kwargs) def delete(self, obj_id, *args, **kwargs): return self.storage.delete(obj_id, *args, **kwargs) diff --git a/swh/objstorage/multiplexer/filter/id_filter.py b/swh/objstorage/multiplexer/filter/id_filter.py index bd30580..5955a2e 100644 --- a/swh/objstorage/multiplexer/filter/id_filter.py +++ b/swh/objstorage/multiplexer/filter/id_filter.py @@ -1,79 +1,81 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import re +from typing import Iterator from swh.model import hashutil from swh.objstorage.exc import ObjNotFoundError +from swh.objstorage.interface import CompositeObjId from swh.objstorage.multiplexer.filter.filter import ObjStorageFilter class IdObjStorageFilter(ObjStorageFilter, metaclass=abc.ABCMeta): """Filter that only allow operations if the object id match a requirement. Even for read operations, check before if the id match the requirements. This may prevent for unnecessary disk access. """ @abc.abstractmethod def is_valid(self, obj_id): """Indicates if the given id is valid.""" raise NotImplementedError( "Implementations of an IdObjStorageFilter " 'must have a "is_valid" method' ) def __contains__(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.__contains__(*args, obj_id=obj_id, **kwargs) return False def __len__(self): return sum(1 for i in [id for id in self.storage if self.is_valid(id)]) - def __iter__(self): + def __iter__(self) -> Iterator[CompositeObjId]: yield from filter(lambda id: self.is_valid(id), iter(self.storage)) def add(self, content, obj_id, check_presence=True, *args, **kwargs): if self.is_valid(obj_id): return self.storage.add(content, *args, obj_id=obj_id, **kwargs) def restore(self, content, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.restore(content, *args, obj_id=obj_id, **kwargs) def get(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.get(*args, obj_id=obj_id, **kwargs) raise ObjNotFoundError(obj_id) def check(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.check(*args, obj_id=obj_id, **kwargs) raise ObjNotFoundError(obj_id) class RegexIdObjStorageFilter(IdObjStorageFilter): """Filter that allow operations if the content's id as hex match a regex.""" def __init__(self, storage, regex): super().__init__(storage) self.regex = re.compile(regex) def is_valid(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return self.regex.match(hex_obj_id) is not None class PrefixIdObjStorageFilter(IdObjStorageFilter): """Filter that allow operations if the hexlified id have a given prefix.""" def __init__(self, storage, prefix): super().__init__(storage) self.prefix = str(prefix) def is_valid(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return str(hex_obj_id).startswith(self.prefix) diff --git a/swh/objstorage/multiplexer/multiplexer_objstorage.py b/swh/objstorage/multiplexer/multiplexer_objstorage.py index 74fc670..eafe1cb 100644 --- a/swh/objstorage/multiplexer/multiplexer_objstorage.py +++ b/swh/objstorage/multiplexer/multiplexer_objstorage.py @@ -1,308 +1,308 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import queue import threading -from typing import Dict +from typing import Dict, Iterator from swh.objstorage.exc import ObjNotFoundError -from swh.objstorage.interface import ObjId +from swh.objstorage.interface import CompositeObjId, ObjId from swh.objstorage.objstorage import ObjStorage class ObjStorageThread(threading.Thread): def __init__(self, storage): super().__init__(daemon=True) self.storage = storage self.commands = queue.Queue() def run(self): while True: try: mailbox, command, args, kwargs = self.commands.get(True, 0.05) except queue.Empty: continue try: ret = getattr(self.storage, command)(*args, **kwargs) except Exception as exc: self.queue_result(mailbox, "exception", exc) else: self.queue_result(mailbox, "result", ret) def queue_command(self, command, *args, mailbox=None, **kwargs): """Enqueue a new command to be processed by the thread. Args: command (str): one of the method names for the underlying storage. mailbox (queue.Queue): explicit mailbox if the calling thread wants to override it. args, kwargs: arguments for the command. Returns: queue.Queue: The mailbox you can read the response from """ if not mailbox: mailbox = queue.Queue() self.commands.put((mailbox, command, args, kwargs)) return mailbox def queue_result(self, mailbox, result_type, result): """Enqueue a new result in the mailbox This also provides a reference to the storage, which can be useful when an exceptional condition arises. Args: mailbox (queue.Queue): the mailbox to which we need to enqueue the result result_type (str): one of 'result', 'exception' result: the result to pass back to the calling thread """ mailbox.put( { "type": result_type, "result": result, } ) @staticmethod def get_result_from_mailbox(mailbox, *args, **kwargs): """Unpack the result from the mailbox. Args: mailbox (queue.Queue): A mailbox to unpack a result from args: positional arguments to :func:`mailbox.get` kwargs: keyword arguments to :func:`mailbox.get` Returns: the next result unpacked from the queue Raises: either the exception we got back from the underlying storage, or :exc:`queue.Empty` if :func:`mailbox.get` raises that. """ result = mailbox.get(*args, **kwargs) if result["type"] == "exception": raise result["result"] from None else: return result["result"] @staticmethod def collect_results(mailbox, num_results): """Collect num_results from the mailbox""" collected = 0 ret = [] while collected < num_results: try: ret.append( ObjStorageThread.get_result_from_mailbox(mailbox, True, 0.05) ) except queue.Empty: continue collected += 1 return ret def __getattr__(self, attr): def call(*args, **kwargs): mailbox = self.queue_command(attr, *args, **kwargs) return self.get_result_from_mailbox(mailbox) return call def __contains__(self, *args, **kwargs): mailbox = self.queue_command("__contains__", *args, **kwargs) return self.get_result_from_mailbox(mailbox) class MultiplexerObjStorage(ObjStorage): """Implementation of ObjStorage that distributes between multiple storages. The multiplexer object storage allows an input to be demultiplexed among multiple storages that will or will not accept it by themselves (see .filter package). As the ids can be different, no pre-computed ids should be submitted. Also, there are no guarantees that the returned ids can be used directly into the storages that the multiplexer manage. Use case examples follow. Example 1:: storage_v1 = filter.read_only(PathSlicingObjStorage('/dir1', '0:2/2:4/4:6')) storage_v2 = PathSlicingObjStorage('/dir2', '0:1/0:5') storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using 'storage', all the new contents will only be added to the v2 storage, while it will be retrievable from both. Example 2:: storage_v1 = filter.id_regex( PathSlicingObjStorage('/dir1', '0:2/2:4/4:6'), r'[^012].*' ) storage_v2 = filter.if_regex( PathSlicingObjStorage('/dir2', '0:1/0:5'), r'[012]/*' ) storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using this storage, the contents with a sha1 starting with 0, 1 or 2 will be redirected (read AND write) to the storage_v2, while the others will be redirected to the storage_v1. If a content starting with 0, 1 or 2 is present in the storage_v1, it would be ignored anyway. """ def __init__(self, storages, **kwargs): super().__init__(**kwargs) self.storages = storages self.storage_threads = [ObjStorageThread(storage) for storage in storages] for thread in self.storage_threads: thread.start() def wrap_call(self, threads, call, *args, **kwargs): threads = list(threads) mailbox = queue.Queue() for thread in threads: thread.queue_command(call, *args, mailbox=mailbox, **kwargs) return ObjStorageThread.collect_results(mailbox, len(threads)) def get_read_threads(self, obj_id=None): yield from self.storage_threads def get_write_threads(self, obj_id=None): yield from self.storage_threads def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ return all( self.wrap_call( self.storage_threads, "check_config", check_write=check_write ) ) - def __contains__(self, obj_id): + def __contains__(self, obj_id: ObjId) -> bool: """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True if and only if the object is present in the current object storage. """ for storage in self.get_read_threads(obj_id): if obj_id in storage: return True return False - def __iter__(self): + def __iter__(self) -> Iterator[CompositeObjId]: def obj_iterator(): for storage in self.storages: yield from storage return obj_iterator() def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: """Add a new object to the object storage. If the adding step works in all the storages that accept this content, this is a success. Otherwise, the full adding step is an error even if it succeed in some of the storages. Args: content: content of the object to be added to the storage. obj_id: checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence: indicate if the presence of the content should be verified before adding the file. Returns: an id of the object into the storage. As the write-storages are always readable as well, any id will be valid to retrieve a content. """ self.wrap_call( self.get_write_threads(obj_id), "add", content, obj_id=obj_id, check_presence=check_presence, ) def add_batch(self, contents, check_presence=True) -> Dict: """Add a batch of new objects to the object storage.""" write_threads = list(self.get_write_threads()) results = self.wrap_call( write_threads, "add_batch", contents, check_presence=check_presence, ) summed = {"object:add": 0, "object:add:bytes": 0} for result in results: summed["object:add"] += result["object:add"] summed["object:add:bytes"] += result["object:add:bytes"] return { "object:add": summed["object:add"] // len(results), "object:add:bytes": summed["object:add:bytes"] // len(results), } def restore(self, content: bytes, obj_id: ObjId) -> None: return self.wrap_call( self.get_write_threads(obj_id), "restore", content, obj_id=obj_id, ).pop() def get(self, obj_id: ObjId) -> bytes: for storage in self.get_read_threads(obj_id): try: return storage.get(obj_id) except ObjNotFoundError: continue # If no storage contains this content, raise the error raise ObjNotFoundError(obj_id) def check(self, obj_id: ObjId) -> None: nb_present = 0 for storage in self.get_read_threads(obj_id): try: storage.check(obj_id) except ObjNotFoundError: continue else: nb_present += 1 # If there is an Error because of a corrupted file, then let it pass. # Raise the ObjNotFoundError only if the content couldn't be found in # all the storages. if nb_present == 0: raise ObjNotFoundError(obj_id) def delete(self, obj_id: ObjId): super().delete(obj_id) # Check delete permission return all(self.wrap_call(self.get_write_threads(obj_id), "delete", obj_id)) diff --git a/swh/objstorage/objstorage.py b/swh/objstorage/objstorage.py index c3e446d..f2d59cf 100644 --- a/swh/objstorage/objstorage.py +++ b/swh/objstorage/objstorage.py @@ -1,132 +1,144 @@ -# Copyright (C) 2015-2020 The Software Heritage developers +# Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import bz2 from itertools import dropwhile, islice import lzma from typing import Callable, Dict, Iterator, List, Optional import zlib from swh.model import hashutil from .constants import DEFAULT_LIMIT, ID_HASH_ALGO from .exc import ObjNotFoundError -from .interface import ObjId, ObjStorageInterface +from .interface import CompositeObjId, ObjId, ObjStorageInterface + + +def objid_to_default_hex(obj_id: ObjId) -> str: + """Converts SHA1 hashes and multi-hashes to the hexadecimal representation + of the SHA1.""" + if isinstance(obj_id, bytes): + return hashutil.hash_to_hex(obj_id) + elif isinstance(obj_id, str): + return obj_id + else: + return hashutil.hash_to_hex(obj_id[ID_HASH_ALGO]) def compute_hash(content, algo=ID_HASH_ALGO): """Compute the content's hash. Args: content (bytes): The raw content to hash hash_name (str): Hash's name (default to ID_HASH_ALGO) Returns: The ID_HASH_ALGO for the content """ return ( hashutil.MultiHash.from_data( content, hash_names=[algo], ) .digest() .get(algo) ) class NullCompressor: def compress(self, data): return data def flush(self): return b"" class NullDecompressor: def decompress(self, data: bytes) -> bytes: return data @property def unused_data(self) -> bytes: return b"" class _CompressorProtocol: def compress(self, data: bytes) -> bytes: ... def flush(self) -> bytes: ... class _DecompressorProtocol: def decompress(self, data: bytes) -> bytes: ... unused_data: bytes decompressors: Dict[str, Callable[[], _DecompressorProtocol]] = { "bz2": bz2.BZ2Decompressor, # type: ignore "lzma": lzma.LZMADecompressor, # type: ignore "gzip": lambda: zlib.decompressobj(wbits=31), # type: ignore "zlib": zlib.decompressobj, # type: ignore "none": NullDecompressor, # type: ignore } compressors: Dict[str, Callable[[], _CompressorProtocol]] = { "bz2": bz2.BZ2Compressor, # type: ignore "lzma": lzma.LZMACompressor, # type: ignore "gzip": lambda: zlib.compressobj(wbits=31), # type: ignore "zlib": zlib.compressobj, # type: ignore "none": NullCompressor, # type: ignore } class ObjStorage(metaclass=abc.ABCMeta): def __init__(self, *, allow_delete=False, **kwargs): # A more complete permission system could be used in place of that if # it becomes needed self.allow_delete = allow_delete def add_batch(self: ObjStorageInterface, contents, check_presence=True) -> Dict: summary = {"object:add": 0, "object:add:bytes": 0} for obj_id, content in contents.items(): if check_presence and obj_id in self: continue self.add(content, obj_id, check_presence=False) summary["object:add"] += 1 summary["object:add:bytes"] += len(content) return summary def restore(self: ObjStorageInterface, content: bytes, obj_id: ObjId) -> None: # check_presence to false will erase the potential previous content. self.add(content, obj_id, check_presence=False) def get_batch( self: ObjStorageInterface, obj_ids: List[ObjId] ) -> Iterator[Optional[bytes]]: for obj_id in obj_ids: try: yield self.get(obj_id) except ObjNotFoundError: yield None @abc.abstractmethod def delete(self, obj_id: ObjId): if not self.allow_delete: raise PermissionError("Delete is not allowed.") def list_content( self: ObjStorageInterface, last_obj_id: Optional[ObjId] = None, limit: int = DEFAULT_LIMIT, - ) -> Iterator[ObjId]: + ) -> Iterator[CompositeObjId]: it = iter(self) - if last_obj_id is not None: - it = dropwhile(last_obj_id.__ge__, it) + if last_obj_id: + last_obj_id_hex = objid_to_default_hex(last_obj_id) + it = dropwhile(lambda x: objid_to_default_hex(x) <= last_obj_id_hex, it) return islice(it, limit) diff --git a/swh/objstorage/tests/objstorage_testing.py b/swh/objstorage/tests/objstorage_testing.py index b40a262..438553c 100644 --- a/swh/objstorage/tests/objstorage_testing.py +++ b/swh/objstorage/tests/objstorage_testing.py @@ -1,213 +1,216 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import inspect from swh.objstorage import exc from swh.objstorage.interface import ObjStorageInterface from swh.objstorage.objstorage import compute_hash class ObjStorageTestFixture: def test_types(self): """Checks all methods of ObjStorageInterface are implemented by this backend, and that they have the same signature.""" # Create an instance of the protocol (which cannot be instantiated # directly, so this creates a subclass, then instantiates it) interface = type("_", (ObjStorageInterface,), {})() assert "get_batch" in dir(interface) missing_methods = [] for meth_name in dir(interface): - if meth_name.startswith("_"): + if meth_name.startswith("_") and meth_name not in ( + "__iter__", + "__contains__", + ): continue interface_meth = getattr(interface, meth_name) concrete_meth = getattr(self.storage, meth_name) expected_signature = inspect.signature(interface_meth) actual_signature = inspect.signature(concrete_meth) assert expected_signature == actual_signature, meth_name assert missing_methods == [] # If all the assertions above succeed, then this one should too. # But there's no harm in double-checking. # And we could replace the assertions above by this one, but unlike # the assertions above, it doesn't explain what is missing. assert isinstance(self.storage, ObjStorageInterface) def hash_content(self, content): obj_id = compute_hash(content) return content, obj_id def assertContentMatch(self, obj_id, expected_content): # noqa content = self.storage.get(obj_id) self.assertEqual(content, expected_content) def test_check_config(self): self.assertTrue(self.storage.check_config(check_write=False)) self.assertTrue(self.storage.check_config(check_write=True)) def test_contains(self): content_p, obj_id_p = self.hash_content(b"contains_present") content_m, obj_id_m = self.hash_content(b"contains_missing") self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) def test_add_get_w_id(self): content, obj_id = self.hash_content(b"add_get_w_id") self.storage.add(content, obj_id=obj_id) self.assertContentMatch(obj_id, content) def test_add_twice(self): content, obj_id = self.hash_content(b"add_twice") self.storage.add(content, obj_id=obj_id) self.assertContentMatch(obj_id, content) self.storage.add(content, obj_id=obj_id, check_presence=False) self.assertContentMatch(obj_id, content) def test_add_big(self): content, obj_id = self.hash_content(b"add_big" * 1024 * 1024) self.storage.add(content, obj_id=obj_id) self.assertContentMatch(obj_id, content) def test_add_get_batch(self): content1, obj_id1 = self.hash_content(b"add_get_batch_1") content2, obj_id2 = self.hash_content(b"add_get_batch_2") self.storage.add(content1, obj_id1) self.storage.add(content2, obj_id2) cr1, cr2 = self.storage.get_batch([obj_id1, obj_id2]) self.assertEqual(cr1, content1) self.assertEqual(cr2, content2) def test_get_batch_unexisting_content(self): content, obj_id = self.hash_content(b"get_batch_unexisting_content") result = list(self.storage.get_batch([obj_id])) self.assertTrue(len(result) == 1) self.assertIsNone(result[0]) def test_restore_content(self): self.storage.allow_delete = True valid_content, valid_obj_id = self.hash_content(b"restore_content") invalid_content = b"unexpected content" self.storage.add(invalid_content, valid_obj_id) with self.assertRaises(exc.Error): self.storage.check(valid_obj_id) self.storage.restore(valid_content, valid_obj_id) self.assertContentMatch(valid_obj_id, valid_content) def test_get_missing(self): content, obj_id = self.hash_content(b"get_missing") with self.assertRaises(exc.ObjNotFoundError) as e: self.storage.get(obj_id) self.assertIn(obj_id, e.exception.args) def test_check_missing(self): content, obj_id = self.hash_content(b"check_missing") with self.assertRaises(exc.Error): self.storage.check(obj_id) def test_check_present(self): content, obj_id = self.hash_content(b"check_present") self.storage.add(content, obj_id) try: self.storage.check(obj_id) except exc.Error: self.fail("Integrity check failed") def test_delete_missing(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b"missing_content_to_delete") with self.assertRaises(exc.Error): self.storage.delete(obj_id) def test_delete_present(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) self.assertTrue(self.storage.delete(obj_id)) with self.assertRaises(exc.Error): self.storage.get(obj_id) def test_delete_not_allowed(self): self.storage.allow_delete = False content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.storage.delete(obj_id) def test_delete_not_allowed_by_default(self): content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.assertTrue(self.storage.delete(obj_id)) def test_add_batch(self): contents = {} expected_content_add = 0 expected_content_add_bytes = 0 for i in range(50): content = b"Test content %02d" % i content, obj_id = self.hash_content(content) contents[obj_id] = content expected_content_add_bytes += len(content) expected_content_add += 1 ret = self.storage.add_batch(contents) self.assertEqual( ret, { "object:add": expected_content_add, "object:add:bytes": expected_content_add_bytes, }, ) for obj_id in contents: self.assertIn(obj_id, self.storage) def test_content_iterator(self): sto_obj_ids = iter(self.storage) sto_obj_ids = list(sto_obj_ids) self.assertFalse(sto_obj_ids) obj_ids = set() for i in range(100): content, obj_id = self.hash_content(b"content %d" % i) self.storage.add(content, obj_id=obj_id) obj_ids.add(obj_id) sto_obj_ids = set(self.storage) self.assertEqual(sto_obj_ids, obj_ids) def test_list_content(self): all_ids = [] for i in range(1200): content = b"example %d" % i obj_id = compute_hash(content) self.storage.add(content, obj_id) all_ids.append(obj_id) all_ids.sort() ids = list(self.storage.list_content()) self.assertEqual(len(ids), 1200) self.assertEqual(ids[0], all_ids[0]) self.assertEqual(ids[100], all_ids[100]) self.assertEqual(ids[999], all_ids[999]) ids = list(self.storage.list_content(limit=10)) self.assertEqual(len(ids), 10) self.assertEqual(ids[0], all_ids[0]) self.assertEqual(ids[9], all_ids[9]) ids = list(self.storage.list_content(last_obj_id=all_ids[999], limit=100)) self.assertEqual(len(ids), 100) self.assertEqual(ids[0], all_ids[1000]) self.assertEqual(ids[9], all_ids[1009]) diff --git a/swh/objstorage/tests/test_objstorage_random_generator.py b/swh/objstorage/tests/test_objstorage_random_generator.py index 83f79b4..9349572 100644 --- a/swh/objstorage/tests/test_objstorage_random_generator.py +++ b/swh/objstorage/tests/test_objstorage_random_generator.py @@ -1,39 +1,41 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections.abc import Iterator from swh.objstorage.factory import get_objstorage def test_random_generator_objstorage(): sto = get_objstorage("random") assert sto blobs = [sto.get(None) for i in range(100)] lengths = [len(x) for x in blobs] assert max(lengths) <= 55056238 def test_random_generator_objstorage_list_content(): sto = get_objstorage("random", total=100) assert isinstance(sto.list_content(), Iterator) - assert list(sto.list_content()) == [b"%d" % i for i in range(1, 101)] - assert list(sto.list_content(limit=10)) == [b"%d" % i for i in range(1, 11)] + assert list(sto.list_content()) == [{"sha1": b"%d" % i} for i in range(1, 101)] + assert list(sto.list_content(limit=10)) == [ + {"sha1": b"%d" % i} for i in range(1, 11) + ] assert list(sto.list_content(last_obj_id=b"10", limit=10)) == [ - b"%d" % i for i in range(11, 21) + {"sha1": b"%d" % i} for i in range(11, 21) ] def test_random_generator_objstorage_total(): sto = get_objstorage("random", total=5) assert len([x for x in sto]) == 5 def test_random_generator_objstorage_size(): sto = get_objstorage("random", filesize=10) for i in range(10): assert len(sto.get(None)) == 10