diff --git a/swh/objstorage/backends/azure.py b/swh/objstorage/backends/azure.py index 1cab6a2..ca4b786 100644 --- a/swh/objstorage/backends/azure.py +++ b/swh/objstorage/backends/azure.py @@ -1,419 +1,415 @@ # Copyright (C) 2016-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import asyncio import contextlib import datetime from itertools import product import string from typing import Dict, Optional, Union import warnings from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError from azure.storage.blob import ( ContainerClient, ContainerSasPermissions, generate_container_sas, ) from azure.storage.blob.aio import ContainerClient as AsyncContainerClient from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.objstorage import ( ObjStorage, compressors, compute_hash, decompressors, ) from swh.objstorage.utils import call_async def get_container_url( account_name: str, account_key: str, container_name: str, access_policy: str = "read_only", expiry: datetime.timedelta = datetime.timedelta(days=365), **kwargs, ) -> str: """Get the full url, for the given container on the given account, with a Shared Access Signature granting the specified access policy. Args: account_name: name of the storage account for which to generate the URL account_key: shared account key of the storage account used to generate the SAS container_name: name of the container for which to grant access in the storage account access_policy: one of ``read_only``, ``append_only``, ``full`` expiry: the interval in the future with which the signature will expire Returns: the full URL of the container, with the shared access signature. """ access_policies = { "read_only": ContainerSasPermissions( read=True, list=True, delete=False, write=False ), "append_only": ContainerSasPermissions( read=True, list=True, delete=False, write=True ), "full": ContainerSasPermissions(read=True, list=True, delete=True, write=True), } current_time = datetime.datetime.utcnow() signature = generate_container_sas( account_name, container_name, account_key=account_key, permission=access_policies[access_policy], start=current_time + datetime.timedelta(minutes=-1), expiry=current_time + expiry, ) return f"https://{account_name}.blob.core.windows.net/{container_name}?{signature}" class AzureCloudObjStorage(ObjStorage): """ObjStorage backend for Azure blob storage accounts. Args: container_url: the URL of the container in which the objects are stored. account_name: (deprecated) the name of the storage account under which objects are stored api_secret_key: (deprecated) the shared account key container_name: (deprecated) the name of the container under which objects are stored compression: the compression algorithm used to compress objects in storage Notes: The container url should contain the credentials via a "Shared Access Signature". The :func:`get_container_url` helper can be used to generate such a URL from the account's access keys. The ``account_name``, ``api_secret_key`` and ``container_name`` arguments are deprecated. """ def __init__( self, container_url: Optional[str] = None, account_name: Optional[str] = None, api_secret_key: Optional[str] = None, container_name: Optional[str] = None, compression="gzip", **kwargs, ): if container_url is None: if account_name is None or api_secret_key is None or container_name is None: raise ValueError( "AzureCloudObjStorage must have a container_url or all three " "account_name, api_secret_key and container_name" ) else: warnings.warn( "The Azure objstorage account secret key parameters are " "deprecated, please use container URLs instead.", DeprecationWarning, ) container_url = get_container_url( account_name=account_name, account_key=api_secret_key, container_name=container_name, access_policy="full", ) super().__init__(**kwargs) self.container_url = container_url self.compression = compression def get_container_client(self, hex_obj_id): """Get the container client for the container that contains the object with internal id hex_obj_id This is used to allow the PrefixedAzureCloudObjStorage to dispatch the client according to the prefix of the object id. """ return ContainerClient.from_container_url(self.container_url) @contextlib.asynccontextmanager async def get_async_container_clients(self): """Returns a collection of container clients, to be passed to ``get_async_blob_client``. Each container may not be used in more than one asyncio loop.""" client = AsyncContainerClient.from_container_url(self.container_url) async with client: yield {"": client} def get_blob_client(self, hex_obj_id): """Get the azure blob client for the given hex obj id""" container_client = self.get_container_client(hex_obj_id) return container_client.get_blob_client(blob=hex_obj_id) def get_async_blob_client(self, hex_obj_id, container_clients): """Get the azure blob client for the given hex obj id and a collection yielded by ``get_async_container_clients``.""" return container_clients[""].get_blob_client(blob=hex_obj_id) def get_all_container_clients(self): """Get all active block_blob_services""" yield self.get_container_client("") def _internal_id(self, obj_id): """Internal id is the hex version in objstorage.""" return hashutil.hash_to_hex(obj_id) def check_config(self, *, check_write): """Check the configuration for this object storage""" for container_client in self.get_all_container_clients(): props = container_client.get_container_properties() # FIXME: check_write is ignored here if not props: return False return True def __contains__(self, obj_id): """Does the storage contains the obj_id.""" hex_obj_id = self._internal_id(obj_id) client = self.get_blob_client(hex_obj_id) try: client.get_blob_properties() except ResourceNotFoundError: return False else: return True def __iter__(self): """Iterate over the objects present in the storage.""" for client in self.get_all_container_clients(): for obj in client.list_blobs(): yield hashutil.hash_to_bytes(obj.name) def __len__(self): """Compute the number of objects in the current object storage. Returns: number of objects contained in the storage. """ return sum(1 for i in self) - def add(self, content, obj_id=None, check_presence=True): + def add(self, content, obj_id, check_presence=True): """Add an obj in storage if it's not there already.""" - if obj_id is None: - # Checksum is missing, compute it on the fly. - obj_id = compute_hash(content) - if check_presence and obj_id in self: return obj_id hex_obj_id = self._internal_id(obj_id) # Send the compressed content compressor = compressors[self.compression]() data = compressor.compress(content) data += compressor.flush() client = self.get_blob_client(hex_obj_id) try: client.upload_blob(data=data, length=len(data)) except ResourceExistsError: # There's a race condition between check_presence and upload_blob, # that we can't get rid of as the azure api doesn't allow atomic # replaces or renaming a blob. As the restore operation explicitly # removes the blob, it should be safe to just ignore the error. pass return obj_id def restore(self, content, obj_id=None): """Restore a content.""" if obj_id is None: # Checksum is missing, compute it on the fly. obj_id = compute_hash(content) if obj_id in self: self.delete(obj_id) return self.add(content, obj_id, check_presence=False) def get(self, obj_id): """retrieve blob's content if found.""" return call_async(self._get_async, obj_id) async def _get_async(self, obj_id, container_clients=None): """Coroutine implementing ``get(obj_id)`` using azure-storage-blob's asynchronous implementation. While ``get(obj_id)`` does not need asynchronicity, this is useful to ``get_batch(obj_ids)``, as it can run multiple ``_get_async`` tasks concurrently.""" if container_clients is None: # If the container_clients argument is not passed, create a new # collection of container_clients and restart the function with it. async with self.get_async_container_clients() as container_clients: return await self._get_async(obj_id, container_clients) hex_obj_id = self._internal_id(obj_id) client = self.get_async_blob_client(hex_obj_id, container_clients) try: download = await client.download_blob() except ResourceNotFoundError: raise ObjNotFoundError(obj_id) from None else: data = await download.content_as_bytes() decompressor = decompressors[self.compression]() ret = decompressor.decompress(data) if decompressor.unused_data: raise Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret async def _get_async_or_none(self, obj_id, container_clients): """Like ``get_async(obj_id)``, but returns None instead of raising ResourceNotFoundError. Used by ``get_batch`` so other blobs can be returned even if one is missing.""" try: return await self._get_async(obj_id, container_clients) except ObjNotFoundError: return None async def _get_batch_async(self, obj_ids): async with self.get_async_container_clients() as container_clients: return await asyncio.gather( *[ self._get_async_or_none(obj_id, container_clients) for obj_id in obj_ids ] ) def get_batch(self, obj_ids): """Retrieve objects' raw content in bulk from storage, concurrently.""" return call_async(self._get_batch_async, obj_ids) def check(self, obj_id): """Check the content integrity.""" obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id): """Delete an object.""" super().delete(obj_id) # Check delete permission hex_obj_id = self._internal_id(obj_id) client = self.get_blob_client(hex_obj_id) try: client.delete_blob() except ResourceNotFoundError: raise ObjNotFoundError(obj_id) from None return True class PrefixedAzureCloudObjStorage(AzureCloudObjStorage): """ObjStorage with azure capabilities, striped by prefix. accounts is a dict containing entries of the form: : """ def __init__( self, accounts: Dict[str, Union[str, Dict[str, str]]], compression="gzip", **kwargs, ): # shortcut AzureCloudObjStorage __init__ ObjStorage.__init__(self, **kwargs) self.compression = compression # Definition sanity check prefix_lengths = set(len(prefix) for prefix in accounts) if not len(prefix_lengths) == 1: raise ValueError( "Inconsistent prefixes, found lengths %s" % ", ".join(str(lst) for lst in sorted(prefix_lengths)) ) self.prefix_len = prefix_lengths.pop() expected_prefixes = set( "".join(letters) for letters in product( set(string.hexdigits.lower()), repeat=self.prefix_len ) ) missing_prefixes = expected_prefixes - set(accounts) if missing_prefixes: raise ValueError( "Missing prefixes %s" % ", ".join(sorted(missing_prefixes)) ) do_warning = False self.container_urls = {} for prefix, container_url in accounts.items(): if isinstance(container_url, dict): do_warning = True container_url = get_container_url( account_name=container_url["account_name"], account_key=container_url["api_secret_key"], container_name=container_url["container_name"], access_policy="full", ) self.container_urls[prefix] = container_url if do_warning: warnings.warn( "The Azure objstorage account secret key parameters are " "deprecated, please use container URLs instead.", DeprecationWarning, ) def get_container_client(self, hex_obj_id): """Get the block_blob_service and container that contains the object with internal id hex_obj_id """ prefix = hex_obj_id[: self.prefix_len] return ContainerClient.from_container_url(self.container_urls[prefix]) @contextlib.asynccontextmanager async def get_async_container_clients(self): # This is equivalent to: # client1 = AsyncContainerClient.from_container_url(url1) # ... # client16 = AsyncContainerClient.from_container_url(url16) # async with client1, ..., client16: # yield {prefix1: client1, ..., prefix16: client16} clients = { prefix: AsyncContainerClient.from_container_url(url) for (prefix, url) in self.container_urls.items() } async with contextlib.AsyncExitStack() as stack: for client in clients.values(): await stack.enter_async_context(client) yield clients def get_async_blob_client(self, hex_obj_id, container_clients): """Get the azure blob client for the given hex obj id and a collection yielded by ``get_async_container_clients``.""" prefix = hex_obj_id[: self.prefix_len] return container_clients[prefix].get_blob_client(blob=hex_obj_id) def get_all_container_clients(self): """Get all active container clients""" # iterate on items() to sort blob services; # needed to be able to paginate in the list_content() method yield from ( self.get_container_client(prefix) for prefix in sorted(self.container_urls) ) diff --git a/swh/objstorage/backends/generator.py b/swh/objstorage/backends/generator.py index 7d8b9db..d2cbe6f 100644 --- a/swh/objstorage/backends/generator.py +++ b/swh/objstorage/backends/generator.py @@ -1,219 +1,219 @@ from itertools import count, islice, repeat import logging import random from swh.objstorage.objstorage import DEFAULT_LIMIT, ObjStorage logger = logging.getLogger(__name__) class Randomizer: def __init__(self): self.size = 0 self.read(1024) # create a not-so-small initial buffer def read(self, size): if size > self.size: with open("/dev/urandom", "rb") as fobj: self.data = fobj.read(2 * size) self.size = len(self.data) # pick a random subset of our existing buffer idx = random.randint(0, self.size - size - 1) return self.data[idx : idx + size] def gen_sizes(): """generates numbers according to the rought distribution of file size in the SWH archive """ # these are the histogram bounds of the pg content.length column bounds = [ 0, 2, 72, 119, 165, 208, 256, 300, 345, 383, 429, 474, 521, 572, 618, 676, 726, 779, 830, 879, 931, 992, 1054, 1119, 1183, 1244, 1302, 1370, 1437, 1504, 1576, 1652, 1725, 1806, 1883, 1968, 2045, 2133, 2236, 2338, 2433, 2552, 2659, 2774, 2905, 3049, 3190, 3322, 3489, 3667, 3834, 4013, 4217, 4361, 4562, 4779, 5008, 5233, 5502, 5788, 6088, 6396, 6728, 7094, 7457, 7835, 8244, 8758, 9233, 9757, 10313, 10981, 11693, 12391, 13237, 14048, 14932, 15846, 16842, 18051, 19487, 20949, 22595, 24337, 26590, 28840, 31604, 34653, 37982, 41964, 46260, 51808, 58561, 66584, 78645, 95743, 122883, 167016, 236108, 421057, 1047367, 55056238, ] nbounds = len(bounds) for i in count(): idx = random.randint(1, nbounds - 1) lower = bounds[idx - 1] upper = bounds[idx] yield random.randint(lower, upper - 1) def gen_random_content(total=None, filesize=None): """generates random (file) content which sizes roughly follows the SWH archive file size distribution (by default). Args: total (int): the total number of objects to generate. Infinite if unset. filesize (int): generate objects with fixed size instead of random ones. """ randomizer = Randomizer() if filesize: gen = repeat(filesize) else: gen = gen_sizes() if total: gen = islice(gen, total) for objsize in gen: yield randomizer.read(objsize) class RandomGeneratorObjStorage(ObjStorage): """A stupid read-only storage that generates blobs for testing purpose.""" def __init__(self, filesize=None, total=None, **kwargs): super().__init__() if filesize: filesize = int(filesize) self.filesize = filesize if total: total = int(total) self.total = total self._content_generator = None @property def content_generator(self): if self._content_generator is None: self._content_generator = gen_random_content(self.total, self.filesize) return self._content_generator def check_config(self, *, check_write): return True def __contains__(self, obj_id, *args, **kwargs): return False def __iter__(self): i = 1 while True: j = yield (b"%d" % i) if self.total and i >= self.total: logger.debug("DONE") break if j is not None: i = j else: i += 1 def get(self, obj_id, *args, **kwargs): return next(self.content_generator) - def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): + def add(self, content, obj_id, check_presence=True, *args, **kwargs): pass def check(self, obj_id, *args, **kwargs): return True def delete(self, obj_id, *args, **kwargs): return True def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): it = iter(self) if last_obj_id: next(it) it.send(int(last_obj_id)) return islice(it, limit) diff --git a/swh/objstorage/backends/http.py b/swh/objstorage/backends/http.py index b90f2f3..8bef36d 100644 --- a/swh/objstorage/backends/http.py +++ b/swh/objstorage/backends/http.py @@ -1,92 +1,92 @@ # Copyright (C) 2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging from urllib.parse import urljoin import requests from swh.model import hashutil from swh.objstorage import exc from swh.objstorage.objstorage import ( DEFAULT_LIMIT, ObjStorage, compute_hash, decompressors, ) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.ERROR) class HTTPReadOnlyObjStorage(ObjStorage): """Simple ObjStorage retrieving objects from an HTTP server. For example, can be used to retrieve objects from S3: objstorage: cls: http url: https://softwareheritage.s3.amazonaws.com/content/ """ def __init__(self, url=None, compression=None, **kwargs): super().__init__(**kwargs) self.session = requests.sessions.Session() self.root_path = url if not self.root_path.endswith("/"): self.root_path += "/" self.compression = compression def check_config(self, *, check_write): """Check the configuration for this object storage""" return True def __contains__(self, obj_id): resp = self.session.head(self._path(obj_id)) return resp.status_code == 200 def __iter__(self): raise exc.NonIterableObjStorage("__iter__") def __len__(self): raise exc.NonIterableObjStorage("__len__") - def add(self, content, obj_id=None, check_presence=True): + def add(self, content, obj_id, check_presence=True): raise exc.ReadOnlyObjStorage("add") def delete(self, obj_id): raise exc.ReadOnlyObjStorage("delete") def restore(self, content, obj_id=None): raise exc.ReadOnlyObjStorage("restore") def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): raise exc.NonIterableObjStorage("__len__") def get(self, obj_id): try: resp = self.session.get(self._path(obj_id)) resp.raise_for_status() except Exception: raise exc.ObjNotFoundError(obj_id) ret: bytes = resp.content if self.compression: d = decompressors[self.compression]() ret = d.decompress(ret) if d.unused_data: hex_obj_id = hashutil.hash_to_hex(obj_id) raise exc.Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret def check(self, obj_id): # Check the content integrity obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise exc.Error(obj_id) def _path(self, obj_id): return urljoin(self.root_path, hashutil.hash_to_hex(obj_id)) diff --git a/swh/objstorage/backends/in_memory.py b/swh/objstorage/backends/in_memory.py index e102c9b..1245ee4 100644 --- a/swh/objstorage/backends/in_memory.py +++ b/swh/objstorage/backends/in_memory.py @@ -1,60 +1,57 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.objstorage import ObjStorage, compute_hash class InMemoryObjStorage(ObjStorage): """In-Memory objstorage. Intended for test purposes. """ def __init__(self, **args): super().__init__() self.state = {} def check_config(self, *, check_write): return True def __contains__(self, obj_id): return obj_id in self.state def __iter__(self): return iter(sorted(self.state)) - def add(self, content, obj_id=None, check_presence=True): - if obj_id is None: - obj_id = compute_hash(content) - + def add(self, content, obj_id, check_presence=True): if check_presence and obj_id in self: return obj_id self.state[obj_id] = content return obj_id def get(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) return self.state[obj_id] def check(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) if compute_hash(self.state[obj_id]) != obj_id: raise Error("Corrupt object %s" % obj_id) return True def delete(self, obj_id): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) self.state.pop(obj_id) return True diff --git a/swh/objstorage/backends/libcloud.py b/swh/objstorage/backends/libcloud.py index a908d8d..34ea172 100644 --- a/swh/objstorage/backends/libcloud.py +++ b/swh/objstorage/backends/libcloud.py @@ -1,255 +1,251 @@ # Copyright (C) 2016-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc from collections import OrderedDict from collections.abc import Iterator from typing import Optional from urllib.parse import urlencode from libcloud.storage import providers import libcloud.storage.drivers.s3 from libcloud.storage.types import ObjectDoesNotExistError, Provider from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.objstorage import ( ObjStorage, compressors, compute_hash, decompressors, ) def patch_libcloud_s3_urlencode(): """Patches libcloud's S3 backend to properly sign queries. Recent versions of libcloud are not affected (they use signature V4), but 1.5.0 (the one in Debian 9) is.""" def s3_urlencode(params): """Like urllib.parse.urlencode, but sorts the parameters first. This is required to properly compute the request signature, see https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#ConstructingTheCanonicalizedResourceElement """ # noqa return urlencode(OrderedDict(sorted(params.items()))) libcloud.storage.drivers.s3.urlencode = s3_urlencode patch_libcloud_s3_urlencode() class CloudObjStorage(ObjStorage, metaclass=abc.ABCMeta): """Abstract ObjStorage that connect to a cloud using Libcloud Implementations of this class must redefine the _get_provider method to make it return a driver provider (i.e. object that supports `get_driver` method) which return a LibCloud driver (see https://libcloud.readthedocs.io/en/latest/storage/api.html). Args: container_name: Name of the base container path_prefix: prefix to prepend to object paths in the container, separated with a slash compression: compression algorithm to use for objects kwargs: extra arguments are passed through to the LibCloud driver """ def __init__( self, container_name: str, compression: Optional[str] = None, path_prefix: Optional[str] = None, **kwargs, ): super().__init__(**kwargs) self.driver = self._get_driver(**kwargs) self.container_name = container_name self.container = self.driver.get_container(container_name=container_name) self.compression = compression self.path_prefix = None if path_prefix: self.path_prefix = path_prefix.rstrip("/") + "/" def _get_driver(self, **kwargs): """Initialize a driver to communicate with the cloud Kwargs: arguments passed to the StorageDriver class, typically key: key to connect to the API. secret: secret key for authentication. secure: (bool) support HTTPS host: (str) port: (int) api_version: (str) region: (str) Returns: a Libcloud driver to a cloud storage. """ # Get the driver class from its description. cls = providers.get_driver(self._get_provider()) # Initialize the driver. return cls(**kwargs) @abc.abstractmethod def _get_provider(self): """Get a libcloud driver provider This method must be overridden by subclasses to specify which of the native libcloud driver the current storage should connect to. Alternatively, provider for a custom driver may be returned, in which case the provider will have to support `get_driver` method. """ raise NotImplementedError( "%s must implement `get_provider` method" % type(self) ) def check_config(self, *, check_write): """Check the configuration for this object storage""" # FIXME: hopefully this blew up during instantiation return True def __contains__(self, obj_id): try: self._get_object(obj_id) except ObjNotFoundError: return False else: return True def __iter__(self): """Iterate over the objects present in the storage Warning: Iteration over the contents of a cloud-based object storage may have bad efficiency: due to the very high amount of objects in it and the fact that it is remote, get all the contents of the current object storage may result in a lot of network requests. You almost certainly don't want to use this method in production. """ for obj in self.driver.iterate_container_objects(self.container): name = obj.name if self.path_prefix and not name.startswith(self.path_prefix): continue if self.path_prefix: name = name[len(self.path_prefix) :] yield hashutil.hash_to_bytes(name) def __len__(self): """Compute the number of objects in the current object storage. Warning: this currently uses `__iter__`, its warning about bad performance applies. Returns: number of objects contained in the storage. """ return sum(1 for i in self) - def add(self, content, obj_id=None, check_presence=True): - if obj_id is None: - # Checksum is missing, compute it on the fly. - obj_id = compute_hash(content) - + def add(self, content, obj_id, check_presence=True): if check_presence and obj_id in self: return obj_id self._put_object(content, obj_id) return obj_id def restore(self, content, obj_id=None): return self.add(content, obj_id, check_presence=False) def get(self, obj_id): obj = b"".join(self._get_object(obj_id).as_stream()) d = decompressors[self.compression]() ret = d.decompress(obj) if d.unused_data: hex_obj_id = hashutil.hash_to_hex(obj_id) raise Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret def check(self, obj_id): # Check that the file exists, as _get_object raises ObjNotFoundError self._get_object(obj_id) # Check the content integrity obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission obj = self._get_object(obj_id) return self.driver.delete_object(obj) def _object_path(self, obj_id): """Get the full path to an object""" hex_obj_id = hashutil.hash_to_hex(obj_id) if self.path_prefix: return self.path_prefix + hex_obj_id else: return hex_obj_id def _get_object(self, obj_id): """Get a Libcloud wrapper for an object pointer. This wrapper does not retrieve the content of the object directly. """ object_path = self._object_path(obj_id) try: return self.driver.get_object(self.container_name, object_path) except ObjectDoesNotExistError: raise ObjNotFoundError(obj_id) def _compressor(self, data): comp = compressors[self.compression]() for chunk in data: cchunk = comp.compress(chunk) if cchunk: yield cchunk trail = comp.flush() if trail: yield trail def _put_object(self, content, obj_id): """Create an object in the cloud storage. Created object will contain the content and be referenced by the given id. """ object_path = self._object_path(obj_id) if not isinstance(content, Iterator): content = (content,) self.driver.upload_object_via_stream( self._compressor(content), self.container, object_path ) class AwsCloudObjStorage(CloudObjStorage): """Amazon's S3 Cloud-based object storage""" def _get_provider(self): return Provider.S3 class OpenStackCloudObjStorage(CloudObjStorage): """OpenStack Swift Cloud based object storage""" def _get_provider(self): return Provider.OPENSTACK_SWIFT diff --git a/swh/objstorage/backends/noop.py b/swh/objstorage/backends/noop.py index a6edfd5..73cc6b3 100644 --- a/swh/objstorage/backends/noop.py +++ b/swh/objstorage/backends/noop.py @@ -1,35 +1,35 @@ # Copyright (C) 2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.objstorage.objstorage import ObjStorage class NoopObjStorage(ObjStorage): """Noop objstorage. Basic implementation which does no operations at all. Only intended for test purposes to avoid either memory or i/o operations. This allows swh clients to use the swh stack without having to deal with objstorage configuration. So users can concentrate on testing the remaining part of the stack without the objstorage. """ def check_config(self, *, check_write): return True def __contains__(self, obj_id, *args, **kwargs): return False - def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): + def add(self, content, obj_id, check_presence=True, *args, **kwargs): pass def get(self, obj_id, *args, **kwargs): return None def check(self, obj_id, *args, **kwargs): pass def delete(self, obj_id, *args, **kwargs): pass diff --git a/swh/objstorage/backends/pathslicing.py b/swh/objstorage/backends/pathslicing.py index df1ac04..be2c301 100644 --- a/swh/objstorage/backends/pathslicing.py +++ b/swh/objstorage/backends/pathslicing.py @@ -1,406 +1,403 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections.abc import Iterator from contextlib import contextmanager from itertools import islice import os import random import tempfile from typing import List from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.objstorage import ( DEFAULT_LIMIT, ID_HASH_ALGO, ID_HEXDIGEST_LENGTH, ObjStorage, compressors, - compute_hash, decompressors, ) BUFSIZ = 1048576 DIR_MODE = 0o755 FILE_MODE = 0o644 class PathSlicer: """Helper class to compute a path based on a hash. Used to compute a directory path based on the object hash according to a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. For instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will have the following computed path: - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 Args: root (str): path to the root directory of the storage on the disk. slicing (str): the slicing configuration. """ def __init__(self, root: str, slicing: str): self.root = root # Make a list of tuples where each tuple contains the beginning # and the end of each slicing. try: self.bounds = [ slice(*(int(x) if x else None for x in sbounds.split(":"))) for sbounds in slicing.split("/") if sbounds ] except TypeError: raise ValueError( "Invalid slicing declaration; " "it should be a of the form ':[/:]..." ) def check_config(self): """Check the slicing configuration is valid. Raises: ValueError: if the slicing configuration is invalid. """ if len(self): max_char = max( max(bound.start or 0, bound.stop or 0) for bound in self.bounds ) if ID_HEXDIGEST_LENGTH < max_char: raise ValueError( "Algorithm %s has too short hash for slicing to char %d" % (ID_HASH_ALGO, max_char) ) def get_directory(self, hex_obj_id: str) -> str: """Compute the storage directory of an object. See also: PathSlicer::get_path Args: hex_obj_id: object id as hexlified string. Returns: Absolute path (including root) to the directory that contains the given object id. """ return os.path.join(self.root, *self.get_slices(hex_obj_id)) def get_path(self, hex_obj_id: str) -> str: """Compute the full path to an object into the current storage. See also: PathSlicer::get_directory Args: hex_obj_id(str): object id as hexlified string. Returns: Absolute path (including root) to the object corresponding to the given object id. """ return os.path.join(self.get_directory(hex_obj_id), hex_obj_id) def get_slices(self, hex_obj_id: str) -> List[str]: """Compute the path elements for the given hash. Args: hex_obj_id(str): object id as hexlified string. Returns: Relative path to the actual object corresponding to the given id as a list. """ assert len(hex_obj_id) == ID_HEXDIGEST_LENGTH return [hex_obj_id[bound] for bound in self.bounds] def __len__(self) -> int: """Number of slices of the slicer""" return len(self.bounds) class PathSlicingObjStorage(ObjStorage): """Implementation of the ObjStorage API based on the hash of the content. On disk, an object storage is a directory tree containing files named after their object IDs. An object ID is a checksum of its content, depending on the value of the ID_HASH_ALGO constant (see swh.model.hashutil for its meaning). To avoid directories that contain too many files, the object storage has a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. So for instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will be stored in the given object storages : - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 The files in the storage are stored in gzipped compressed format. Args: root (str): path to the root directory of the storage on the disk. slicing (str): string that indicates the slicing to perform on the hash of the content to know the path where it should be stored (see the documentation of the PathSlicer class). """ def __init__(self, root, slicing, compression="gzip", **kwargs): super().__init__(**kwargs) self.root = root self.slicer = PathSlicer(root, slicing) self.use_fdatasync = hasattr(os, "fdatasync") self.compression = compression self.check_config(check_write=False) def check_config(self, *, check_write): """Check whether this object storage is properly configured""" self.slicer.check_config() if not os.path.isdir(self.root): raise ValueError( 'PathSlicingObjStorage root "%s" is not a directory' % self.root ) if check_write: if not os.access(self.root, os.W_OK): raise PermissionError( 'PathSlicingObjStorage root "%s" is not writable' % self.root ) if self.compression not in compressors: raise ValueError( 'Unknown compression algorithm "%s" for ' "PathSlicingObjStorage" % self.compression ) return True def __contains__(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return os.path.isfile(self.slicer.get_path(hex_obj_id)) def __iter__(self): """Iterate over the object identifiers currently available in the storage. Warning: with the current implementation of the object storage, this method will walk the filesystem to list objects, meaning that listing all objects will be very slow for large storages. You almost certainly don't want to use this method in production. Return: Iterator over object IDs """ def obj_iterator(): # XXX hackish: it does not verify that the depth of found files # matches the slicing depth of the storage for root, _dirs, files in os.walk(self.root): _dirs.sort() for f in sorted(files): yield bytes.fromhex(f) return obj_iterator() def __len__(self): """Compute the number of objects available in the storage. Warning: this currently uses `__iter__`, its warning about bad performances applies Return: number of objects contained in the storage """ return sum(1 for i in self) - def add(self, content, obj_id=None, check_presence=True): - if obj_id is None: - obj_id = compute_hash(content) + def add(self, content, obj_id, check_presence=True): if check_presence and obj_id in self: # If the object is already present, return immediately. return obj_id hex_obj_id = hashutil.hash_to_hex(obj_id) if not isinstance(content, Iterator): content = [content] compressor = compressors[self.compression]() with self._write_obj_file(hex_obj_id) as f: for chunk in content: f.write(compressor.compress(chunk)) f.write(compressor.flush()) return obj_id def get(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) # Open the file and return its content as bytes hex_obj_id = hashutil.hash_to_hex(obj_id) d = decompressors[self.compression]() with open(self.slicer.get_path(hex_obj_id), "rb") as f: out = d.decompress(f.read()) if d.unused_data: raise Error( "Corrupt object %s: trailing data found" % hex_obj_id, ) return out def check(self, obj_id): try: data = self.get(obj_id) except OSError: hex_obj_id = hashutil.hash_to_hex(obj_id) raise Error( "Corrupt object %s: not a proper compressed file" % hex_obj_id, ) checksums = hashutil.MultiHash.from_data( data, hash_names=[ID_HASH_ALGO] ).digest() actual_obj_id = checksums[ID_HASH_ALGO] hex_obj_id = hashutil.hash_to_hex(obj_id) if hex_obj_id != hashutil.hash_to_hex(actual_obj_id): raise Error( "Corrupt object %s should have id %s" % (hashutil.hash_to_hex(obj_id), hashutil.hash_to_hex(actual_obj_id)) ) def delete(self, obj_id): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) try: os.remove(self.slicer.get_path(hex_obj_id)) except FileNotFoundError: raise ObjNotFoundError(obj_id) return True # Management methods def get_random(self, batch_size): def get_random_content(self, batch_size): """Get a batch of content inside a single directory. Returns: a tuple (batch size, batch). """ dirs = [] for level in range(len(self.slicer)): path = os.path.join(self.root, *dirs) dir_list = next(os.walk(path))[1] if "tmp" in dir_list: dir_list.remove("tmp") dirs.append(random.choice(dir_list)) path = os.path.join(self.root, *dirs) content_list = next(os.walk(path))[2] length = min(batch_size, len(content_list)) return ( length, map(hashutil.hash_to_bytes, random.sample(content_list, length)), ) while batch_size: length, it = get_random_content(self, batch_size) batch_size = batch_size - length yield from it # Streaming methods @contextmanager def chunk_writer(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) compressor = compressors[self.compression]() with self._write_obj_file(hex_obj_id) as f: yield lambda c: f.write(compressor.compress(c)) f.write(compressor.flush()) def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): if last_obj_id: it = self.iter_from(last_obj_id) else: it = iter(self) return islice(it, limit) def iter_from(self, obj_id, n_leaf=False): hex_obj_id = hashutil.hash_to_hex(obj_id) slices = self.slicer.get_slices(hex_obj_id) rlen = len(self.root.split("/")) i = 0 for root, dirs, files in os.walk(self.root): if not dirs: i += 1 level = len(root.split("/")) - rlen dirs.sort() if dirs and root == os.path.join(self.root, *slices[:level]): cslice = slices[level] for d in dirs[:]: if d < cslice: dirs.remove(d) for f in sorted(files): if f > hex_obj_id: yield bytes.fromhex(f) if n_leaf: yield i @contextmanager def _write_obj_file(self, hex_obj_id): """Context manager for writing object files to the object storage. During writing, data are written to a temporary file, which is atomically renamed to the right file name after closing. Usage sample: with objstorage._write_obj_file(hex_obj_id): f.write(obj_data) Yields: a file-like object open for writing bytes. """ # Get the final paths and create the directory if absent. dir = self.slicer.get_directory(hex_obj_id) if not os.path.isdir(dir): os.makedirs(dir, DIR_MODE, exist_ok=True) path = os.path.join(dir, hex_obj_id) # Create a temporary file. (tmp, tmp_path) = tempfile.mkstemp(suffix=".tmp", prefix="hex_obj_id.", dir=dir) # Open the file and yield it for writing. tmp_f = os.fdopen(tmp, "wb") yield tmp_f # Make sure the contents of the temporary file are written to disk tmp_f.flush() if self.use_fdatasync: os.fdatasync(tmp) else: os.fsync(tmp) # Then close the temporary file and move it to the right path. tmp_f.close() os.chmod(tmp_path, FILE_MODE) os.rename(tmp_path, path) diff --git a/swh/objstorage/backends/seaweedfs/objstorage.py b/swh/objstorage/backends/seaweedfs/objstorage.py index 24e14e2..68480b4 100644 --- a/swh/objstorage/backends/seaweedfs/objstorage.py +++ b/swh/objstorage/backends/seaweedfs/objstorage.py @@ -1,156 +1,152 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import io from itertools import islice import logging import os from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.objstorage import ( DEFAULT_LIMIT, ObjStorage, compressors, compute_hash, decompressors, ) from .http import HttpFiler LOGGER = logging.getLogger(__name__) class SeaweedFilerObjStorage(ObjStorage): """ObjStorage with seaweedfs abilities, using the Filer API. https://github.com/chrislusf/seaweedfs/wiki/Filer-Server-API """ def __init__(self, url, compression=None, **kwargs): super().__init__(**kwargs) self.wf = HttpFiler(url) self.compression = compression def check_config(self, *, check_write): """Check the configuration for this object storage""" # FIXME: hopefully this blew up during instantiation return True def __contains__(self, obj_id): return self.wf.exists(self._path(obj_id)) def __iter__(self): """Iterate over the objects present in the storage Warning: Iteration over the contents of a cloud-based object storage may have bad efficiency: due to the very high amount of objects in it and the fact that it is remote, get all the contents of the current object storage may result in a lot of network requests. You almost certainly don't want to use this method in production. """ obj_id = last_obj_id = None while True: for obj_id in self.list_content(last_obj_id=last_obj_id): yield obj_id if last_obj_id == obj_id: break last_obj_id = obj_id def __len__(self): """Compute the number of objects in the current object storage. Warning: this currently uses `__iter__`, its warning about bad performance applies. Returns: number of objects contained in the storage. """ return sum(1 for i in self) - def add(self, content, obj_id=None, check_presence=True): - if obj_id is None: - # Checksum is missing, compute it on the fly. - obj_id = compute_hash(content) - + def add(self, content, obj_id, check_presence=True): if check_presence and obj_id in self: return obj_id def compressor(data): comp = compressors[self.compression]() for chunk in data: yield comp.compress(chunk) yield comp.flush() if isinstance(content, bytes): content = [content] # XXX should handle streaming correctly... self.wf.put(io.BytesIO(b"".join(compressor(content))), self._path(obj_id)) return obj_id def restore(self, content, obj_id=None): return self.add(content, obj_id, check_presence=False) def get(self, obj_id): try: obj = self.wf.get(self._path(obj_id)) except Exception: raise ObjNotFoundError(obj_id) d = decompressors[self.compression]() ret = d.decompress(obj) if d.unused_data: hex_obj_id = hashutil.hash_to_hex(obj_id) raise Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret def check(self, obj_id): # Check the content integrity obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) self.wf.delete(self._path(obj_id)) return True def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): if last_obj_id: objid = hashutil.hash_to_hex(last_obj_id) lastfilename = objid else: lastfilename = None for fname in islice(self.wf.iterfiles(last_file_name=lastfilename), limit): bytehex = fname.rsplit("/", 1)[-1] yield hashutil.bytehex_to_hash(bytehex.encode()) # internal methods def _put_object(self, content, obj_id): """Create an object in the cloud storage. Created object will contain the content and be referenced by the given id. """ def compressor(data): comp = compressors[self.compression]() for chunk in data: yield comp.compress(chunk) yield comp.flush() if isinstance(content, bytes): content = [content] self.wf.put(io.BytesIO(b"".join(compressor(content))), self._path(obj_id)) def _path(self, obj_id): return os.path.join(self.wf.basepath, hashutil.hash_to_hex(obj_id)) diff --git a/swh/objstorage/backends/winery/objstorage.py b/swh/objstorage/backends/winery/objstorage.py index ade9a2d..86219ba 100644 --- a/swh/objstorage/backends/winery/objstorage.py +++ b/swh/objstorage/backends/winery/objstorage.py @@ -1,192 +1,176 @@ # Copyright (C) 2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging from multiprocessing import Process -from swh.model import hashutil from swh.objstorage import exc from swh.objstorage.objstorage import ObjStorage from .roshard import ROShard from .rwshard import RWShard from .sharedbase import SharedBase from .stats import Stats logger = logging.getLogger(__name__) -def compute_hash(content): - algo = "sha256" - return ( - hashutil.MultiHash.from_data( - content, - hash_names=[algo], - ) - .digest() - .get(algo) - ) - - class WineryObjStorage(ObjStorage): def __init__(self, **kwargs): super().__init__(**kwargs) if kwargs.get("readonly"): self.winery = WineryReader(**kwargs) else: self.winery = WineryWriter(**kwargs) def uninit(self): self.winery.uninit() def get(self, obj_id): return self.winery.get(obj_id) def check_config(self, *, check_write): return True def __contains__(self, obj_id): return obj_id in self.winery - def add(self, content, obj_id=None, check_presence=True): + def add(self, content, obj_id, check_presence=True): return self.winery.add(content, obj_id, check_presence) def check(self, obj_id): return self.winery.check(obj_id) def delete(self, obj_id): raise PermissionError("Delete is not allowed.") class WineryBase: def __init__(self, **kwargs): self.args = kwargs self.init() def init(self): self.base = SharedBase(**self.args) def uninit(self): self.base.uninit() def __contains__(self, obj_id): return self.base.contains(obj_id) class WineryReader(WineryBase): def __init__(self, **kwargs): super().__init__(**kwargs) self.shards = {} def roshard(self, name): if name not in self.shards: shard = ROShard(name, **self.args) shard.load() self.shards[name] = shard return self.shards[name] def get(self, obj_id): shard_info = self.base.get(obj_id) if shard_info is None: raise exc.ObjNotFoundError(obj_id) name, readonly = shard_info if readonly: shard = self.roshard(name) content = shard.get(obj_id) del shard else: shard = RWShard(name, **self.args) content = shard.get(obj_id) if content is None: raise exc.ObjNotFoundError(obj_id) return content def pack(shard, **kwargs): return Packer(shard, **kwargs).run() class Packer: def __init__(self, shard, **kwargs): self.stats = Stats(kwargs.get("output_dir")) self.args = kwargs self.shard = shard self.init() def init(self): self.rw = RWShard(self.shard, **self.args) self.ro = ROShard(self.shard, **self.args) def uninit(self): del self.ro self.rw.uninit() def run(self): self.ro.create(self.rw.count()) for obj_id, content in self.rw.all(): self.ro.add(content, obj_id) if self.stats.stats_active: self.stats.stats_read(obj_id, content) self.stats.stats_write(obj_id, content) self.ro.save() base = SharedBase(**self.args) base.shard_packing_ends(self.shard) base.uninit() self.rw.uninit() self.rw.drop() return True class WineryWriter(WineryReader): def __init__(self, **kwargs): super().__init__(**kwargs) self.packers = [] self.init() def init(self): super().init() self.shard = RWShard(self.base.whoami, **self.args) def uninit(self): self.shard.uninit() super().uninit() - def add(self, content, obj_id=None, check_presence=True): - if obj_id is None: - obj_id = compute_hash(content) - + def add(self, content, obj_id, check_presence=True): if check_presence and obj_id in self: return obj_id shard = self.base.add_phase_1(obj_id) if shard != self.base.id: # this object is the responsibility of another shard return obj_id self.shard.add(obj_id, content) self.base.add_phase_2(obj_id) if self.shard.is_full(): self.pack() return obj_id def check(self, obj_id): # load all shards packing == True and not locked (i.e. packer # was interrupted for whatever reason) run pack for each of them pass def pack(self): self.base.shard_packing_starts() p = Process(target=pack, args=(self.shard.name,), kwargs=self.args) self.uninit() p.start() self.packers.append(p) self.init() def __del__(self): for p in self.packers: p.kill() p.join() diff --git a/swh/objstorage/cli.py b/swh/objstorage/cli.py index c27a532..a3e890a 100644 --- a/swh/objstorage/cli.py +++ b/swh/objstorage/cli.py @@ -1,135 +1,137 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging # WARNING: do not import unnecessary things here to keep cli startup time under # control import os import time import click from swh.core.cli import CONTEXT_SETTINGS from swh.core.cli import swh as swh_cli_group @swh_cli_group.group(name="objstorage", context_settings=CONTEXT_SETTINGS) @click.option( "--config-file", "-C", default=None, type=click.Path( exists=True, dir_okay=False, ), help="Configuration file.", ) @click.pass_context def objstorage_cli_group(ctx, config_file): """Software Heritage Objstorage tools.""" from swh.core import config if not config_file: config_file = os.environ.get("SWH_CONFIG_FILENAME") if config_file: if not os.path.exists(config_file): raise ValueError("%s does not exist" % config_file) conf = config.read(config_file) else: conf = {} ctx.ensure_object(dict) ctx.obj["config"] = conf # for BW compat cli = objstorage_cli_group @objstorage_cli_group.command("rpc-serve") @click.option( "--host", default="0.0.0.0", metavar="IP", show_default=True, help="Host ip address to bind the server on", ) @click.option( "--port", "-p", default=5003, type=click.INT, metavar="PORT", show_default=True, help="Binding port of the server", ) @click.option( "--debug/--no-debug", default=True, help="Indicates if the server should run in debug mode", ) @click.pass_context def serve(ctx, host, port, debug): """Run a standalone objstorage server. This is not meant to be run on production systems. """ from swh.objstorage.api.server import app, validate_config if "log_level" in ctx.obj: logging.getLogger("werkzeug").setLevel(ctx.obj["log_level"]) validate_config(ctx.obj["config"]) app.config.update(ctx.obj["config"]) app.run(host, port=int(port), debug=bool(debug)) @objstorage_cli_group.command("import") @click.argument("directory", required=True, nargs=-1) @click.pass_context def import_directories(ctx, directory): """Import a local directory in an existing objstorage.""" from swh.objstorage.factory import get_objstorage + from swh.objstorage.objstorage import compute_hash objstorage = get_objstorage(**ctx.obj["config"]["objstorage"]) nobj = 0 volume = 0 t0 = time.time() for dirname in directory: for root, _dirs, files in os.walk(dirname): for name in files: path = os.path.join(root, name) with open(path, "rb") as f: - objstorage.add(f.read()) - volume += os.stat(path).st_size - nobj += 1 + content = f.read() + objstorage.add(content, obj_id=compute_hash(content)) + volume += os.stat(path).st_size + nobj += 1 click.echo( "Imported %d files for a volume of %s bytes in %d seconds" % (nobj, volume, time.time() - t0) ) @objstorage_cli_group.command("fsck") @click.pass_context def fsck(ctx): """Check the objstorage is not corrupted.""" from swh.objstorage.factory import get_objstorage objstorage = get_objstorage(**ctx.obj["config"]["objstorage"]) for obj_id in objstorage: try: objstorage.check(obj_id) except objstorage.Error as err: logging.error(err) def main(): return cli(auto_envvar_prefix="SWH_OBJSTORAGE") if __name__ == "__main__": main() diff --git a/swh/objstorage/interface.py b/swh/objstorage/interface.py index 49ef9c4..865fe04 100644 --- a/swh/objstorage/interface.py +++ b/swh/objstorage/interface.py @@ -1,216 +1,214 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Dict from typing_extensions import Protocol, runtime_checkable from swh.core.api import remote_api_endpoint from swh.objstorage.objstorage import DEFAULT_LIMIT @runtime_checkable class ObjStorageInterface(Protocol): """High-level API to manipulate the Software Heritage object storage. Conceptually, the object storage offers the following methods: - check_config() check if the object storage is properly configured - __contains__() check if an object is present, by object id - add() add a new object, returning an object id - restore() same as add() but erase an already existed content - get() retrieve the content of an object, by object id - check() check the integrity of an object, by object id - delete() remove an object And some management methods: - get_random() get random object id of existing contents (used for the content integrity checker). Each implementation of this interface can have a different behavior and its own way to store the contents. """ @remote_api_endpoint("check_config") def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ ... @remote_api_endpoint("content/contains") def __contains__(self, obj_id): """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True if and only if the object is present in the current object storage. """ ... @remote_api_endpoint("content/add") - def add(self, content, obj_id=None, check_presence=True): + def add(self, content, obj_id, check_presence=True): """Add a new object to the object storage. Args: content (bytes): object's raw content to add in storage. obj_id (bytes): checksum of [bytes] using [ID_HASH_ALGO] - algorithm. When given, obj_id will be trusted to match - the bytes. If missing, obj_id will be computed on the - fly. + algorithm. It is trusted to match the bytes. check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ ... @remote_api_endpoint("content/add/batch") def add_batch(self, contents, check_presence=True) -> Dict: """Add a batch of new objects to the object storage. Args: contents: mapping from obj_id to object contents Returns: the summary of objects added to the storage (count of object, count of bytes object) """ ... def restore(self, content, obj_id=None): """Restore a content that have been corrupted. This function is identical to add but does not check if the object id is already in the file system. The default implementation provided by the current class is suitable for most cases. Args: content (bytes): object's raw content to add in storage obj_id (bytes): checksum of `bytes` as computed by ID_HASH_ALGO. When given, obj_id will be trusted to match bytes. If missing, obj_id will be computed on the fly. """ ... @remote_api_endpoint("content/get") def get(self, obj_id): """Retrieve the content of a given object. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ ... @remote_api_endpoint("content/get/batch") def get_batch(self, obj_ids): """Retrieve objects' raw content in bulk from storage. Note: This function does have a default implementation in ObjStorage that is suitable for most cases. For object storages that needs to do the minimal number of requests possible (ex: remote object storages), that method can be overridden to perform a more efficient operation. Args: obj_ids ([bytes]: list of object ids. Returns: list of resulting contents, or None if the content could not be retrieved. Do not raise any exception as a fail for one content will not cancel the whole request. """ ... @remote_api_endpoint("content/check") def check(self, obj_id): """Perform an integrity check for a given object. Verify that the file object is in place and that the content matches the object id. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. Error: if the request object is corrupted. """ ... @remote_api_endpoint("content/delete") def delete(self, obj_id): """Delete an object. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. """ ... # Management methods @remote_api_endpoint("content/get/random") def get_random(self, batch_size): """Get random ids of existing contents. This method is used in order to get random ids to perform content integrity verifications on random contents. Args: batch_size (int): Number of ids that will be given Yields: An iterable of ids (bytes) of contents that are in the current object storage. """ ... def __iter__(self): ... def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): """Generates known object ids. Args: last_obj_id (bytes): object id from which to iterate from (excluded). limit (int): max number of object ids to generate. Generates: obj_id (bytes): object ids. """ ... diff --git a/swh/objstorage/multiplexer/filter/filter.py b/swh/objstorage/multiplexer/filter/filter.py index 35ac628..dc13a2e 100644 --- a/swh/objstorage/multiplexer/filter/filter.py +++ b/swh/objstorage/multiplexer/filter/filter.py @@ -1,77 +1,77 @@ # Copyright (C) 2015-2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.objstorage.objstorage import ObjStorage class ObjStorageFilter(ObjStorage): """Base implementation of a filter that allow inputs on ObjStorage or not. This class copy the API of ...objstorage in order to filter the inputs of this class. If the operation is allowed, return the result of this operation applied to the destination implementation. Otherwise, just return without any operation. This class is an abstract base class for a classic read/write storage. Filters can inherit from it and only redefine some methods in order to change behavior. """ def __init__(self, storage): self.storage = storage def check_config(self, *, check_write): """Check the object storage for proper configuration. Args: check_write: check whether writes to the objstorage will succeed Returns: True if the storage is properly configured """ return self.storage.check_config(check_write=check_write) def __contains__(self, *args, **kwargs): return self.storage.__contains__(*args, **kwargs) def __iter__(self): """Iterates over the content of each storages Warning: The `__iter__` methods frequently have bad performance. You almost certainly don't want to use this method in production as the wrapped storage may cause performance issues. """ return self.storage.__iter__() def __len__(self): """Compute the number of objects in the current object storage. Warning: performance issue in `__iter__` also applies here. Returns: number of objects contained in the storage. """ return self.storage.__len__() - def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): + def add(self, content, obj_id, check_presence=True, *args, **kwargs): return self.storage.add(content, obj_id, check_presence, *args, **kwargs) def restore(self, content, obj_id=None, *args, **kwargs): return self.storage.restore(content, obj_id, *args, **kwargs) def get(self, obj_id, *args, **kwargs): return self.storage.get(obj_id, *args, **kwargs) def check(self, obj_id, *args, **kwargs): return self.storage.check(obj_id, *args, **kwargs) def delete(self, obj_id, *args, **kwargs): return self.storage.delete(obj_id, *args, **kwargs) def get_random(self, batch_size, *args, **kwargs): return self.storage.get_random(batch_size, *args, **kwargs) diff --git a/swh/objstorage/multiplexer/filter/id_filter.py b/swh/objstorage/multiplexer/filter/id_filter.py index 5557b3e..9619a32 100644 --- a/swh/objstorage/multiplexer/filter/id_filter.py +++ b/swh/objstorage/multiplexer/filter/id_filter.py @@ -1,89 +1,87 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import re from swh.model import hashutil from swh.objstorage.exc import ObjNotFoundError from swh.objstorage.multiplexer.filter.filter import ObjStorageFilter from swh.objstorage.objstorage import compute_hash class IdObjStorageFilter(ObjStorageFilter, metaclass=abc.ABCMeta): """Filter that only allow operations if the object id match a requirement. Even for read operations, check before if the id match the requirements. This may prevent for unnecessary disk access. """ @abc.abstractmethod def is_valid(self, obj_id): """Indicates if the given id is valid.""" raise NotImplementedError( "Implementations of an IdObjStorageFilter " 'must have a "is_valid" method' ) def __contains__(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.__contains__(*args, obj_id=obj_id, **kwargs) return False def __len__(self): return sum(1 for i in [id for id in self.storage if self.is_valid(id)]) def __iter__(self): yield from filter(lambda id: self.is_valid(id), iter(self.storage)) - def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): - if obj_id is None: - obj_id = compute_hash(content) + def add(self, content, obj_id, check_presence=True, *args, **kwargs): if self.is_valid(obj_id): return self.storage.add(content, *args, obj_id=obj_id, **kwargs) def restore(self, content, obj_id=None, *args, **kwargs): if obj_id is None: obj_id = compute_hash(content) if self.is_valid(obj_id): return self.storage.restore(content, *args, obj_id=obj_id, **kwargs) def get(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.get(*args, obj_id=obj_id, **kwargs) raise ObjNotFoundError(obj_id) def check(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.check(*args, obj_id=obj_id, **kwargs) raise ObjNotFoundError(obj_id) def get_random(self, *args, **kwargs): yield from filter( lambda id: self.is_valid(id), self.storage.get_random(*args, **kwargs) ) class RegexIdObjStorageFilter(IdObjStorageFilter): """Filter that allow operations if the content's id as hex match a regex.""" def __init__(self, storage, regex): super().__init__(storage) self.regex = re.compile(regex) def is_valid(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return self.regex.match(hex_obj_id) is not None class PrefixIdObjStorageFilter(IdObjStorageFilter): """Filter that allow operations if the hexlified id have a given prefix.""" def __init__(self, storage, prefix): super().__init__(storage) self.prefix = str(prefix) def is_valid(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return str(hex_obj_id).startswith(self.prefix) diff --git a/swh/objstorage/multiplexer/multiplexer_objstorage.py b/swh/objstorage/multiplexer/multiplexer_objstorage.py index 73b2bd5..d6929f8 100644 --- a/swh/objstorage/multiplexer/multiplexer_objstorage.py +++ b/swh/objstorage/multiplexer/multiplexer_objstorage.py @@ -1,330 +1,330 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import queue import random import threading from typing import Dict from swh.objstorage.exc import ObjNotFoundError from swh.objstorage.objstorage import ObjStorage class ObjStorageThread(threading.Thread): def __init__(self, storage): super().__init__(daemon=True) self.storage = storage self.commands = queue.Queue() def run(self): while True: try: mailbox, command, args, kwargs = self.commands.get(True, 0.05) except queue.Empty: continue try: ret = getattr(self.storage, command)(*args, **kwargs) except Exception as exc: self.queue_result(mailbox, "exception", exc) else: self.queue_result(mailbox, "result", ret) def queue_command(self, command, *args, mailbox=None, **kwargs): """Enqueue a new command to be processed by the thread. Args: command (str): one of the method names for the underlying storage. mailbox (queue.Queue): explicit mailbox if the calling thread wants to override it. args, kwargs: arguments for the command. Returns: queue.Queue: The mailbox you can read the response from """ if not mailbox: mailbox = queue.Queue() self.commands.put((mailbox, command, args, kwargs)) return mailbox def queue_result(self, mailbox, result_type, result): """Enqueue a new result in the mailbox This also provides a reference to the storage, which can be useful when an exceptional condition arises. Args: mailbox (queue.Queue): the mailbox to which we need to enqueue the result result_type (str): one of 'result', 'exception' result: the result to pass back to the calling thread """ mailbox.put( { "type": result_type, "result": result, } ) @staticmethod def get_result_from_mailbox(mailbox, *args, **kwargs): """Unpack the result from the mailbox. Args: mailbox (queue.Queue): A mailbox to unpack a result from args: positional arguments to :func:`mailbox.get` kwargs: keyword arguments to :func:`mailbox.get` Returns: the next result unpacked from the queue Raises: either the exception we got back from the underlying storage, or :exc:`queue.Empty` if :func:`mailbox.get` raises that. """ result = mailbox.get(*args, **kwargs) if result["type"] == "exception": raise result["result"] from None else: return result["result"] @staticmethod def collect_results(mailbox, num_results): """Collect num_results from the mailbox""" collected = 0 ret = [] while collected < num_results: try: ret.append( ObjStorageThread.get_result_from_mailbox(mailbox, True, 0.05) ) except queue.Empty: continue collected += 1 return ret def __getattr__(self, attr): def call(*args, **kwargs): mailbox = self.queue_command(attr, *args, **kwargs) return self.get_result_from_mailbox(mailbox) return call def __contains__(self, *args, **kwargs): mailbox = self.queue_command("__contains__", *args, **kwargs) return self.get_result_from_mailbox(mailbox) class MultiplexerObjStorage(ObjStorage): """Implementation of ObjStorage that distributes between multiple storages. The multiplexer object storage allows an input to be demultiplexed among multiple storages that will or will not accept it by themselves (see .filter package). As the ids can be different, no pre-computed ids should be submitted. Also, there are no guarantees that the returned ids can be used directly into the storages that the multiplexer manage. Use case examples follow. Example 1:: storage_v1 = filter.read_only(PathSlicingObjStorage('/dir1', '0:2/2:4/4:6')) storage_v2 = PathSlicingObjStorage('/dir2', '0:1/0:5') storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using 'storage', all the new contents will only be added to the v2 storage, while it will be retrievable from both. Example 2:: storage_v1 = filter.id_regex( PathSlicingObjStorage('/dir1', '0:2/2:4/4:6'), r'[^012].*' ) storage_v2 = filter.if_regex( PathSlicingObjStorage('/dir2', '0:1/0:5'), r'[012]/*' ) storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using this storage, the contents with a sha1 starting with 0, 1 or 2 will be redirected (read AND write) to the storage_v2, while the others will be redirected to the storage_v1. If a content starting with 0, 1 or 2 is present in the storage_v1, it would be ignored anyway. """ def __init__(self, storages, **kwargs): super().__init__(**kwargs) self.storages = storages self.storage_threads = [ObjStorageThread(storage) for storage in storages] for thread in self.storage_threads: thread.start() def wrap_call(self, threads, call, *args, **kwargs): threads = list(threads) mailbox = queue.Queue() for thread in threads: thread.queue_command(call, *args, mailbox=mailbox, **kwargs) return ObjStorageThread.collect_results(mailbox, len(threads)) def get_read_threads(self, obj_id=None): yield from self.storage_threads def get_write_threads(self, obj_id=None): yield from self.storage_threads def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ return all( self.wrap_call( self.storage_threads, "check_config", check_write=check_write ) ) def __contains__(self, obj_id): """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True if and only if the object is present in the current object storage. """ for storage in self.get_read_threads(obj_id): if obj_id in storage: return True return False def __iter__(self): def obj_iterator(): for storage in self.storages: yield from storage return obj_iterator() - def add(self, content, obj_id=None, check_presence=True): + def add(self, content, obj_id, check_presence=True): """Add a new object to the object storage. If the adding step works in all the storages that accept this content, this is a success. Otherwise, the full adding step is an error even if it succeed in some of the storages. Args: content: content of the object to be added to the storage. obj_id: checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence: indicate if the presence of the content should be verified before adding the file. Returns: an id of the object into the storage. As the write-storages are always readable as well, any id will be valid to retrieve a content. """ results = self.wrap_call( self.get_write_threads(obj_id), "add", content, obj_id=obj_id, check_presence=check_presence, ) for result in results: if not result: continue return result def add_batch(self, contents, check_presence=True) -> Dict: """Add a batch of new objects to the object storage.""" write_threads = list(self.get_write_threads()) results = self.wrap_call( write_threads, "add_batch", contents, check_presence=check_presence, ) summed = {"object:add": 0, "object:add:bytes": 0} for result in results: summed["object:add"] += result["object:add"] summed["object:add:bytes"] += result["object:add:bytes"] return { "object:add": summed["object:add"] // len(results), "object:add:bytes": summed["object:add:bytes"] // len(results), } def restore(self, content, obj_id=None): return self.wrap_call( self.get_write_threads(obj_id), "restore", content, obj_id=obj_id, ).pop() def get(self, obj_id): for storage in self.get_read_threads(obj_id): try: return storage.get(obj_id) except ObjNotFoundError: continue # If no storage contains this content, raise the error raise ObjNotFoundError(obj_id) def check(self, obj_id): nb_present = 0 for storage in self.get_read_threads(obj_id): try: storage.check(obj_id) except ObjNotFoundError: continue else: nb_present += 1 # If there is an Error because of a corrupted file, then let it pass. # Raise the ObjNotFoundError only if the content couldn't be found in # all the storages. if nb_present == 0: raise ObjNotFoundError(obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission return all(self.wrap_call(self.get_write_threads(obj_id), "delete", obj_id)) def get_random(self, batch_size): storages_set = [storage for storage in self.storages if len(storage) > 0] if len(storages_set) <= 0: return [] while storages_set: storage = random.choice(storages_set) try: return storage.get_random(batch_size) except NotImplementedError: storages_set.remove(storage) # There is no storage that allow the get_random operation raise NotImplementedError( "There is no storage implementation into the multiplexer that " "support the 'get_random' operation" ) diff --git a/swh/objstorage/objstorage.py b/swh/objstorage/objstorage.py index ea732e9..ae4cacd 100644 --- a/swh/objstorage/objstorage.py +++ b/swh/objstorage/objstorage.py @@ -1,147 +1,147 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import bz2 from itertools import dropwhile, islice import lzma from typing import Dict import zlib from swh.model import hashutil from .exc import ObjNotFoundError ID_HASH_ALGO = "sha1" ID_HEXDIGEST_LENGTH = 40 """Size in bytes of the hash hexadecimal representation.""" ID_DIGEST_LENGTH = 20 """Size in bytes of the hash""" DEFAULT_LIMIT = 10000 """Default number of results of ``list_content``.""" -def compute_hash(content): +def compute_hash(content, algo=ID_HASH_ALGO): """Compute the content's hash. Args: content (bytes): The raw content to hash hash_name (str): Hash's name (default to ID_HASH_ALGO) Returns: The ID_HASH_ALGO for the content """ return ( hashutil.MultiHash.from_data( content, - hash_names=[ID_HASH_ALGO], + hash_names=[algo], ) .digest() - .get(ID_HASH_ALGO) + .get(algo) ) class NullCompressor: def compress(self, data): return data def flush(self): return b"" class NullDecompressor: def decompress(self, data): return data @property def unused_data(self): return b"" decompressors = { "bz2": bz2.BZ2Decompressor, "lzma": lzma.LZMADecompressor, "gzip": lambda: zlib.decompressobj(wbits=31), "zlib": zlib.decompressobj, "none": NullDecompressor, } compressors = { "bz2": bz2.BZ2Compressor, "lzma": lzma.LZMACompressor, "gzip": lambda: zlib.compressobj(wbits=31), "zlib": zlib.compressobj, "none": NullCompressor, } class ObjStorage(metaclass=abc.ABCMeta): def __init__(self, *, allow_delete=False, **kwargs): # A more complete permission system could be used in place of that if # it becomes needed self.allow_delete = allow_delete @abc.abstractmethod def check_config(self, *, check_write): pass @abc.abstractmethod def __contains__(self, obj_id): pass @abc.abstractmethod - def add(self, content, obj_id=None, check_presence=True): + def add(self, content, obj_id, check_presence=True): pass def add_batch(self, contents, check_presence=True) -> Dict: summary = {"object:add": 0, "object:add:bytes": 0} for obj_id, content in contents.items(): if check_presence and obj_id in self: continue self.add(content, obj_id, check_presence=False) summary["object:add"] += 1 summary["object:add:bytes"] += len(content) return summary def restore(self, content, obj_id=None): # check_presence to false will erase the potential previous content. return self.add(content, obj_id, check_presence=False) @abc.abstractmethod def get(self, obj_id): pass def get_batch(self, obj_ids): for obj_id in obj_ids: try: yield self.get(obj_id) except ObjNotFoundError: yield None @abc.abstractmethod def check(self, obj_id): pass @abc.abstractmethod def delete(self, obj_id): if not self.allow_delete: raise PermissionError("Delete is not allowed.") # Management methods def get_random(self, batch_size): pass # Streaming methods def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): it = iter(self) if last_obj_id: it = dropwhile(lambda x: x <= last_obj_id, it) return islice(it, limit) diff --git a/swh/objstorage/tests/objstorage_testing.py b/swh/objstorage/tests/objstorage_testing.py index 953fbd4..e72b5be 100644 --- a/swh/objstorage/tests/objstorage_testing.py +++ b/swh/objstorage/tests/objstorage_testing.py @@ -1,225 +1,219 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import inspect from swh.objstorage import exc from swh.objstorage.interface import ObjStorageInterface from swh.objstorage.objstorage import compute_hash class ObjStorageTestFixture: def test_types(self): """Checks all methods of ObjStorageInterface are implemented by this backend, and that they have the same signature.""" # Create an instance of the protocol (which cannot be instantiated # directly, so this creates a subclass, then instantiates it) interface = type("_", (ObjStorageInterface,), {})() assert "get_batch" in dir(interface) missing_methods = [] for meth_name in dir(interface): if meth_name.startswith("_"): continue interface_meth = getattr(interface, meth_name) concrete_meth = getattr(self.storage, meth_name) expected_signature = inspect.signature(interface_meth) actual_signature = inspect.signature(concrete_meth) assert expected_signature == actual_signature, meth_name assert missing_methods == [] # If all the assertions above succeed, then this one should too. # But there's no harm in double-checking. # And we could replace the assertions above by this one, but unlike # the assertions above, it doesn't explain what is missing. assert isinstance(self.storage, ObjStorageInterface) def hash_content(self, content): obj_id = compute_hash(content) return content, obj_id def assertContentMatch(self, obj_id, expected_content): # noqa content = self.storage.get(obj_id) self.assertEqual(content, expected_content) def test_check_config(self): self.assertTrue(self.storage.check_config(check_write=False)) self.assertTrue(self.storage.check_config(check_write=True)) def test_contains(self): content_p, obj_id_p = self.hash_content(b"contains_present") content_m, obj_id_m = self.hash_content(b"contains_missing") self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) def test_add_get_w_id(self): content, obj_id = self.hash_content(b"add_get_w_id") r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) def test_add_twice(self): content, obj_id = self.hash_content(b"add_twice") r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) r = self.storage.add(content, obj_id=obj_id, check_presence=False) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) def test_add_big(self): content, obj_id = self.hash_content(b"add_big" * 1024 * 1024) r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) - def test_add_get_wo_id(self): - content, obj_id = self.hash_content(b"add_get_wo_id") - r = self.storage.add(content) - self.assertEqual(obj_id, r) - self.assertContentMatch(obj_id, content) - def test_add_get_batch(self): content1, obj_id1 = self.hash_content(b"add_get_batch_1") content2, obj_id2 = self.hash_content(b"add_get_batch_2") self.storage.add(content1, obj_id1) self.storage.add(content2, obj_id2) cr1, cr2 = self.storage.get_batch([obj_id1, obj_id2]) self.assertEqual(cr1, content1) self.assertEqual(cr2, content2) def test_get_batch_unexisting_content(self): content, obj_id = self.hash_content(b"get_batch_unexisting_content") result = list(self.storage.get_batch([obj_id])) self.assertTrue(len(result) == 1) self.assertIsNone(result[0]) def test_restore_content(self): self.storage.allow_delete = True valid_content, valid_obj_id = self.hash_content(b"restore_content") invalid_content = b"unexpected content" id_adding = self.storage.add(invalid_content, valid_obj_id) self.assertEqual(id_adding, valid_obj_id) with self.assertRaises(exc.Error): self.storage.check(id_adding) id_restore = self.storage.restore(valid_content, valid_obj_id) self.assertEqual(id_restore, valid_obj_id) self.assertContentMatch(valid_obj_id, valid_content) def test_get_missing(self): content, obj_id = self.hash_content(b"get_missing") with self.assertRaises(exc.ObjNotFoundError) as e: self.storage.get(obj_id) self.assertIn(obj_id, e.exception.args) def test_check_missing(self): content, obj_id = self.hash_content(b"check_missing") with self.assertRaises(exc.Error): self.storage.check(obj_id) def test_check_present(self): content, obj_id = self.hash_content(b"check_present") self.storage.add(content, obj_id) try: self.storage.check(obj_id) except exc.Error: self.fail("Integrity check failed") def test_delete_missing(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b"missing_content_to_delete") with self.assertRaises(exc.Error): self.storage.delete(obj_id) def test_delete_present(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) self.assertTrue(self.storage.delete(obj_id)) with self.assertRaises(exc.Error): self.storage.get(obj_id) def test_delete_not_allowed(self): self.storage.allow_delete = False content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.storage.delete(obj_id) def test_delete_not_allowed_by_default(self): content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.assertTrue(self.storage.delete(obj_id)) def test_add_batch(self): contents = {} expected_content_add = 0 expected_content_add_bytes = 0 for i in range(50): content = b"Test content %02d" % i content, obj_id = self.hash_content(content) contents[obj_id] = content expected_content_add_bytes += len(content) expected_content_add += 1 ret = self.storage.add_batch(contents) self.assertEqual( ret, { "object:add": expected_content_add, "object:add:bytes": expected_content_add_bytes, }, ) for obj_id in contents: self.assertIn(obj_id, self.storage) def test_content_iterator(self): sto_obj_ids = iter(self.storage) sto_obj_ids = list(sto_obj_ids) self.assertFalse(sto_obj_ids) obj_ids = set() for i in range(100): content, obj_id = self.hash_content(b"content %d" % i) self.storage.add(content, obj_id=obj_id) obj_ids.add(obj_id) sto_obj_ids = set(self.storage) self.assertEqual(sto_obj_ids, obj_ids) def test_list_content(self): all_ids = [] for i in range(1200): content = b"example %d" % i obj_id = compute_hash(content) self.storage.add(content, obj_id) all_ids.append(obj_id) all_ids.sort() ids = list(self.storage.list_content()) self.assertEqual(len(ids), 1200) self.assertEqual(ids[0], all_ids[0]) self.assertEqual(ids[100], all_ids[100]) self.assertEqual(ids[999], all_ids[999]) ids = list(self.storage.list_content(limit=10)) self.assertEqual(len(ids), 10) self.assertEqual(ids[0], all_ids[0]) self.assertEqual(ids[9], all_ids[9]) ids = list(self.storage.list_content(last_obj_id=all_ids[999], limit=100)) self.assertEqual(len(ids), 100) self.assertEqual(ids[0], all_ids[1000]) self.assertEqual(ids[9], all_ids[1009]) diff --git a/swh/objstorage/tests/test_multiplexer_filter.py b/swh/objstorage/tests/test_multiplexer_filter.py index e773253..85070c9 100644 --- a/swh/objstorage/tests/test_multiplexer_filter.py +++ b/swh/objstorage/tests/test_multiplexer_filter.py @@ -1,332 +1,330 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import random import shutil from string import ascii_lowercase import tempfile import unittest from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.factory import get_objstorage from swh.objstorage.multiplexer.filter import id_prefix, id_regex, read_only from swh.objstorage.objstorage import compute_hash def get_random_content(): return bytes("".join(random.sample(ascii_lowercase, 10)), "utf8") class MixinTestReadFilter(unittest.TestCase): # Read only filter should not allow writing def setUp(self): super().setUp() self.tmpdir = tempfile.mkdtemp() pstorage = { "cls": "pathslicing", "root": self.tmpdir, "slicing": "0:5", } base_storage = get_objstorage(**pstorage) - base_storage.id = compute_hash self.storage = get_objstorage( "filtered", storage_conf=pstorage, filters_conf=[read_only()] ) self.valid_content = b"pre-existing content" self.invalid_content = b"invalid_content" self.true_invalid_content = b"Anything that is not correct" self.absent_content = b"non-existent content" # Create a valid content. - self.valid_id = base_storage.add(self.valid_content) + self.valid_id = compute_hash(self.valid_content) + base_storage.add(self.valid_content, obj_id=self.valid_id) # Create an invalid id and add a content with it. - self.invalid_id = base_storage.id(self.true_invalid_content) + self.invalid_id = compute_hash(self.true_invalid_content) base_storage.add(self.invalid_content, obj_id=self.invalid_id) # Compute an id for a non-existing content. - self.absent_id = base_storage.id(self.absent_content) + self.absent_id = compute_hash(self.absent_content) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def test_can_contains(self): self.assertTrue(self.valid_id in self.storage) self.assertTrue(self.invalid_id in self.storage) self.assertFalse(self.absent_id in self.storage) def test_can_iter(self): self.assertIn(self.valid_id, iter(self.storage)) self.assertIn(self.invalid_id, iter(self.storage)) def test_can_len(self): self.assertEqual(2, len(self.storage)) def test_can_get(self): self.assertEqual(self.valid_content, self.storage.get(self.valid_id)) self.assertEqual(self.invalid_content, self.storage.get(self.invalid_id)) def test_can_check(self): with self.assertRaises(ObjNotFoundError): self.storage.check(self.absent_id) with self.assertRaises(Error): self.storage.check(self.invalid_id) self.storage.check(self.valid_id) def test_can_get_random(self): self.assertEqual(1, len(list(self.storage.get_random(1)))) self.assertEqual( len(list(self.storage)), len(set(self.storage.get_random(1000))) ) def test_cannot_add(self): new_id = self.storage.add(b"New content") result = self.storage.add(self.valid_content, self.valid_id) self.assertIsNone(new_id, self.storage) self.assertIsNone(result) def test_cannot_restore(self): result = self.storage.restore(self.valid_content, self.valid_id) self.assertIsNone(result) class MixinTestIdFilter: """Mixin class that tests the filters based on filter.IdFilter Methods "make_valid", "make_invalid" and "filter_storage" must be implemented by subclasses. """ def setUp(self): super().setUp() # Use a hack here : as the mock uses the content as id, it is easy to # create contents that are filtered or not. self.prefix = "71" self.tmpdir = tempfile.mkdtemp() # Make the storage filtered self.sconf = { "cls": "pathslicing", "args": {"root": self.tmpdir, "slicing": "0:5"}, } storage = get_objstorage(**self.sconf) self.base_storage = storage self.storage = self.filter_storage(self.sconf) - # Set the id calculators - storage.id = compute_hash # Present content with valid id self.present_valid_content = self.ensure_valid(b"yroqdtotji") - self.present_valid_id = storage.id(self.present_valid_content) + self.present_valid_id = compute_hash(self.present_valid_content) # Present content with invalid id self.present_invalid_content = self.ensure_invalid(b"glxddlmmzb") - self.present_invalid_id = storage.id(self.present_invalid_content) + self.present_invalid_id = compute_hash(self.present_invalid_content) # Missing content with valid id self.missing_valid_content = self.ensure_valid(b"rmzkdclkez") - self.missing_valid_id = storage.id(self.missing_valid_content) + self.missing_valid_id = compute_hash(self.missing_valid_content) # Missing content with invalid id self.missing_invalid_content = self.ensure_invalid(b"hlejfuginh") - self.missing_invalid_id = storage.id(self.missing_invalid_content) + self.missing_invalid_id = compute_hash(self.missing_invalid_content) # Present corrupted content with valid id self.present_corrupted_valid_content = self.ensure_valid(b"cdsjwnpaij") self.true_present_corrupted_valid_content = self.ensure_valid(b"mgsdpawcrr") - self.present_corrupted_valid_id = storage.id( + self.present_corrupted_valid_id = compute_hash( self.true_present_corrupted_valid_content ) # Present corrupted content with invalid id self.present_corrupted_invalid_content = self.ensure_invalid(b"pspjljnrco") self.true_present_corrupted_invalid_content = self.ensure_invalid(b"rjocbnnbso") - self.present_corrupted_invalid_id = storage.id( + self.present_corrupted_invalid_id = compute_hash( self.true_present_corrupted_invalid_content ) # Missing (potentially) corrupted content with valid id self.missing_corrupted_valid_content = self.ensure_valid(b"zxkokfgtou") self.true_missing_corrupted_valid_content = self.ensure_valid(b"royoncooqa") - self.missing_corrupted_valid_id = storage.id( + self.missing_corrupted_valid_id = compute_hash( self.true_missing_corrupted_valid_content ) # Missing (potentially) corrupted content with invalid id self.missing_corrupted_invalid_content = self.ensure_invalid(b"hxaxnrmnyk") self.true_missing_corrupted_invalid_content = self.ensure_invalid(b"qhbolyuifr") - self.missing_corrupted_invalid_id = storage.id( + self.missing_corrupted_invalid_id = compute_hash( self.true_missing_corrupted_invalid_content ) # Add the content that are supposed to be present - self.storage.add(self.present_valid_content) - self.storage.add(self.present_invalid_content) + self.storage.add(self.present_valid_content, obj_id=self.present_valid_id) + self.storage.add(self.present_invalid_content, obj_id=self.present_invalid_id) self.storage.add( self.present_corrupted_valid_content, obj_id=self.present_corrupted_valid_id ) self.storage.add( self.present_corrupted_invalid_content, obj_id=self.present_corrupted_invalid_id, ) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def filter_storage(self, sconf): raise NotImplementedError( "Id_filter test class must have a filter_storage method" ) def ensure_valid(self, content=None): if content is None: content = get_random_content() - while not self.storage.is_valid(self.base_storage.id(content)): + while not self.storage.is_valid(compute_hash(content)): content = get_random_content() return content def ensure_invalid(self, content=None): if content is None: content = get_random_content() - while self.storage.is_valid(self.base_storage.id(content)): + while self.storage.is_valid(compute_hash(content)): content = get_random_content() return content def test_contains(self): # Both contents are present, but the invalid one should be ignored. self.assertTrue(self.present_valid_id in self.storage) self.assertFalse(self.present_invalid_id in self.storage) self.assertFalse(self.missing_valid_id in self.storage) self.assertFalse(self.missing_invalid_id in self.storage) self.assertTrue(self.present_corrupted_valid_id in self.storage) self.assertFalse(self.present_corrupted_invalid_id in self.storage) self.assertFalse(self.missing_corrupted_valid_id in self.storage) self.assertFalse(self.missing_corrupted_invalid_id in self.storage) def test_iter(self): self.assertIn(self.present_valid_id, iter(self.storage)) self.assertNotIn(self.present_invalid_id, iter(self.storage)) self.assertNotIn(self.missing_valid_id, iter(self.storage)) self.assertNotIn(self.missing_invalid_id, iter(self.storage)) self.assertIn(self.present_corrupted_valid_id, iter(self.storage)) self.assertNotIn(self.present_corrupted_invalid_id, iter(self.storage)) self.assertNotIn(self.missing_corrupted_valid_id, iter(self.storage)) self.assertNotIn(self.missing_corrupted_invalid_id, iter(self.storage)) def test_len(self): # Four contents are present, but only two should be valid. self.assertEqual(2, len(self.storage)) def test_get(self): self.assertEqual( self.present_valid_content, self.storage.get(self.present_valid_id) ) with self.assertRaises(ObjNotFoundError): self.storage.get(self.present_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_invalid_id) self.assertEqual( self.present_corrupted_valid_content, self.storage.get(self.present_corrupted_valid_id), ) with self.assertRaises(ObjNotFoundError): self.storage.get(self.present_corrupted_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_corrupted_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_corrupted_invalid_id) def test_check(self): self.storage.check(self.present_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.present_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_invalid_id) with self.assertRaises(Error): self.storage.check(self.present_corrupted_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.present_corrupted_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_corrupted_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_corrupted_invalid_id) def test_get_random(self): self.assertEqual(0, len(list(self.storage.get_random(0)))) random_content = list(self.storage.get_random(1000)) self.assertIn(self.present_valid_id, random_content) self.assertNotIn(self.present_invalid_id, random_content) self.assertNotIn(self.missing_valid_id, random_content) self.assertNotIn(self.missing_invalid_id, random_content) self.assertIn(self.present_corrupted_valid_id, random_content) self.assertNotIn(self.present_corrupted_invalid_id, random_content) self.assertNotIn(self.missing_corrupted_valid_id, random_content) self.assertNotIn(self.missing_corrupted_invalid_id, random_content) def test_add(self): # Add valid and invalid contents to the storage and check their # presence with the unfiltered storage. valid_content = self.ensure_valid(b"ulepsrjbgt") - valid_id = self.base_storage.id(valid_content) + valid_id = compute_hash(valid_content) invalid_content = self.ensure_invalid(b"znvghkjked") - invalid_id = self.base_storage.id(invalid_content) - self.storage.add(valid_content) - self.storage.add(invalid_content) + invalid_id = compute_hash(invalid_content) + self.storage.add(valid_content, obj_id=valid_id) + self.storage.add(invalid_content, obj_id=invalid_id) self.assertTrue(valid_id in self.base_storage) self.assertFalse(invalid_id in self.base_storage) def test_restore(self): # Add corrupted content to the storage and the try to restore it valid_content = self.ensure_valid(b"ulepsrjbgt") - valid_id = self.base_storage.id(valid_content) + valid_id = compute_hash(valid_content) corrupted_content = self.ensure_valid(b"ltjkjsloyb") - corrupted_id = self.base_storage.id(corrupted_content) + corrupted_id = compute_hash(corrupted_content) self.storage.add(corrupted_content, obj_id=valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(corrupted_id) with self.assertRaises(Error): self.storage.check(valid_id) self.storage.restore(valid_content) self.storage.check(valid_id) class TestPrefixFilter(MixinTestIdFilter, unittest.TestCase): def setUp(self): self.prefix = b"71" super().setUp() def ensure_valid(self, content): obj_id = compute_hash(content) hex_obj_id = hashutil.hash_to_hex(obj_id) self.assertTrue(hex_obj_id.startswith(self.prefix)) return content def ensure_invalid(self, content): obj_id = compute_hash(content) hex_obj_id = hashutil.hash_to_hex(obj_id) self.assertFalse(hex_obj_id.startswith(self.prefix)) return content def filter_storage(self, sconf): return get_objstorage( "filtered", {"storage_conf": sconf, "filters_conf": [id_prefix(self.prefix)]}, ) class TestRegexFilter(MixinTestIdFilter, unittest.TestCase): def setUp(self): self.regex = r"[a-f][0-9].*" super().setUp() def filter_storage(self, sconf): return get_objstorage( "filtered", {"storage_conf": sconf, "filters_conf": [id_regex(self.regex)]} ) diff --git a/swh/objstorage/tests/test_objstorage_http.py b/swh/objstorage/tests/test_objstorage_http.py index c28a8ce..ab7d687 100644 --- a/swh/objstorage/tests/test_objstorage_http.py +++ b/swh/objstorage/tests/test_objstorage_http.py @@ -1,116 +1,120 @@ # Copyright (C) 2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import pytest import requests_mock from requests_mock.contrib import fixture from swh.objstorage import exc from swh.objstorage.factory import get_objstorage +from swh.objstorage.objstorage import compute_hash def build_objstorage(): """Build an HTTPReadOnlyObjStorage suitable for tests this instancaite 2 ObjStorage, one HTTPReadOnlyObjStorage (the "front" one being under test), and one InMemoryObjStorage (which actually stores the test content), and install a request mock fixture to route HTTP requests from the HTTPReadOnlyObjStorage to query the InMemoryStorage. Also fills the backend storage with a 100 objects. """ sto_back = get_objstorage(cls="memory") objids = [] for i in range(100): - objids.append(sto_back.add(f"some content {i}".encode())) + content = f"some content {i}".encode() + objids.append(sto_back.add(content, obj_id=compute_hash(content))) url = "http://127.0.0.1/content/" sto_front = get_objstorage(cls="http", url=url) mock = fixture.Fixture() mock.setUp() def get_cb(request, context): dirname, basename = request.path.rsplit("/", 1) objid = bytes.fromhex(basename) if dirname == "/content" and objid in sto_back: return sto_back.get(objid) context.status_code = 404 def head_cb(request, context): dirname, basename = request.path.rsplit("/", 1) objid = bytes.fromhex(basename) if dirname != "/content" or objid not in sto_back: context.status_code = 404 return b"Not Found" return b"Found" mock.register_uri(requests_mock.GET, requests_mock.ANY, content=get_cb) mock.register_uri(requests_mock.HEAD, requests_mock.ANY, content=head_cb) return sto_front, sto_back, objids def test_http_objstorage(): sto_front, sto_back, objids = build_objstorage() for objid in objids: assert objid in sto_front assert sto_front.get(objid) == sto_back.get(objid) assert sto_front.get(objid).decode().startswith("some content ") def test_http_objstorage_missing(): sto_front, sto_back, objids = build_objstorage() assert b"\x00" * 20 not in sto_front def test_http_objstorage_get_missing(): sto_front, sto_back, objids = build_objstorage() with pytest.raises(exc.ObjNotFoundError): sto_front.get(b"\x00" * 20) def test_http_objstorage_check(): sto_front, sto_back, objids = build_objstorage() for objid in objids: assert sto_front.check(objid) is None # no Exception means OK # create an invalid object in the in-memory objstorage invalid_content = b"p0wn3d content" fake_objid = "\x01" * 20 id_added = sto_back.add(invalid_content, fake_objid) assert id_added == fake_objid # the http objstorage should report it as invalid with pytest.raises(exc.Error): sto_front.check(id_added) def test_http_objstorage_read_only(): sto_front, sto_back, objids = build_objstorage() + content = b"" + obj_id = compute_hash(content) with pytest.raises(exc.ReadOnlyObjStorage): - sto_front.add(b"") + sto_front.add(content, obj_id=obj_id) with pytest.raises(exc.ReadOnlyObjStorage): sto_front.restore(b"") with pytest.raises(exc.ReadOnlyObjStorage): sto_front.delete(b"\x00" * 20) def test_http_objstorage_not_iterable(): sto_front, sto_back, objids = build_objstorage() with pytest.raises(exc.NonIterableObjStorage): len(sto_front) with pytest.raises(exc.NonIterableObjStorage): iter(sto_front) def test_http_cannonical_url(): url = "http://127.0.0.1/content" sto = get_objstorage(cls="http", url=url) assert sto.root_path == url + "/" diff --git a/swh/objstorage/tests/test_objstorage_multiplexer.py b/swh/objstorage/tests/test_objstorage_multiplexer.py index c3670a4..bfda77b 100644 --- a/swh/objstorage/tests/test_objstorage_multiplexer.py +++ b/swh/objstorage/tests/test_objstorage_multiplexer.py @@ -1,68 +1,68 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import shutil import tempfile import unittest from swh.objstorage.backends.pathslicing import PathSlicingObjStorage from swh.objstorage.multiplexer import MultiplexerObjStorage from swh.objstorage.multiplexer.filter import add_filter, read_only from .objstorage_testing import ObjStorageTestFixture class TestMultiplexerObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() self.tmpdir = tempfile.mkdtemp() os.mkdir(os.path.join(self.tmpdir, "root1")) os.mkdir(os.path.join(self.tmpdir, "root2")) self.storage_v1 = PathSlicingObjStorage( os.path.join(self.tmpdir, "root1"), "0:2/2:4" ) self.storage_v2 = PathSlicingObjStorage( os.path.join(self.tmpdir, "root2"), "0:1/0:5" ) self.r_storage = add_filter(self.storage_v1, read_only()) self.w_storage = self.storage_v2 self.storage = MultiplexerObjStorage([self.r_storage, self.w_storage]) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def test_contains(self): content_p, obj_id_p = self.hash_content(b"contains_present") content_m, obj_id_m = self.hash_content(b"contains_missing") self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) def test_delete_missing(self): self.storage_v1.allow_delete = True self.storage_v2.allow_delete = True super().test_delete_missing() def test_delete_present(self): self.storage_v1.allow_delete = True self.storage_v2.allow_delete = True super().test_delete_present() def test_get_random_contents(self): content, obj_id = self.hash_content(b"get_random_content") - self.storage.add(content) + self.storage.add(content, obj_id=obj_id) random_contents = list(self.storage.get_random(1)) self.assertEqual(1, len(random_contents)) self.assertIn(obj_id, random_contents) def test_access_readonly(self): # Add a content to the readonly storage content, obj_id = self.hash_content(b"content in read-only") - self.storage_v1.add(content) + self.storage_v1.add(content, obj_id=obj_id) # Try to retrieve it on the main storage self.assertIn(obj_id, self.storage) diff --git a/swh/objstorage/tests/test_objstorage_striping.py b/swh/objstorage/tests/test_objstorage_striping.py index 7cc0ec2..a96f35c 100644 --- a/swh/objstorage/tests/test_objstorage_striping.py +++ b/swh/objstorage/tests/test_objstorage_striping.py @@ -1,80 +1,77 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import shutil import tempfile import unittest from swh.objstorage.factory import get_objstorage from .objstorage_testing import ObjStorageTestFixture class TestStripingObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() self.base_dir = tempfile.mkdtemp() os.mkdir(os.path.join(self.base_dir, "root1")) os.mkdir(os.path.join(self.base_dir, "root2")) storage_config = { "cls": "striping", "args": { "objstorages": [ { "cls": "pathslicing", "args": { "root": os.path.join(self.base_dir, "root1"), "slicing": "0:2", "allow_delete": True, }, }, { "cls": "pathslicing", "args": { "root": os.path.join(self.base_dir, "root2"), "slicing": "0:2", "allow_delete": True, }, }, ] }, } self.storage = get_objstorage(**storage_config) def tearDown(self): shutil.rmtree(self.base_dir) - def test_add_get_wo_id(self): - self.skipTest("can't add without id in the multiplexer storage") - def test_add_striping_behavior(self): exp_storage_counts = [0, 0] storage_counts = [0, 0] for i in range(100): content, obj_id = self.hash_content(b"striping_behavior_test%02d" % i) self.storage.add(content, obj_id) exp_storage_counts[self.storage.get_storage_index(obj_id)] += 1 count = 0 for i, storage in enumerate(self.storage.storages): if obj_id not in storage: continue count += 1 storage_counts[i] += 1 self.assertEqual(count, 1) self.assertEqual(storage_counts, exp_storage_counts) def test_get_striping_behavior(self): # Make sure we can read objects that are available in any backend # storage content, obj_id = self.hash_content(b"striping_behavior_test") for storage in self.storage.storages: storage.add(content, obj_id) self.assertIn(obj_id, self.storage) storage.delete(obj_id) self.assertNotIn(obj_id, self.storage) def test_list_content(self): self.skipTest("Quite a chellenge to make it work") diff --git a/swh/objstorage/tests/test_objstorage_winery.py b/swh/objstorage/tests/test_objstorage_winery.py index 018fce7..1b2ba8b 100644 --- a/swh/objstorage/tests/test_objstorage_winery.py +++ b/swh/objstorage/tests/test_objstorage_winery.py @@ -1,401 +1,402 @@ # Copyright (C) 2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import time import pytest import sh from swh.objstorage import exc from swh.objstorage.backends.winery.database import DatabaseAdmin from swh.objstorage.backends.winery.objstorage import Packer, pack from swh.objstorage.backends.winery.stats import Stats from swh.objstorage.backends.winery.throttler import ( BandwidthCalculator, IOThrottler, LeakyBucket, Throttler, ) from swh.objstorage.factory import get_objstorage +from swh.objstorage.objstorage import compute_hash from swh.objstorage.utils import call_async from .winery_benchmark import Bench, work from .winery_testing_helpers import PoolHelper, SharedBaseHelper @pytest.fixture def needs_ceph(): try: sh.ceph("--version") except sh.CommandNotFound: pytest.skip("the ceph CLI was not found") @pytest.fixture def ceph_pool(needs_ceph): pool = PoolHelper(shard_max_size=10 * 1024 * 1024) pool.clobber() pool.pool_create() yield pool pool.images_clobber() pool.clobber() @pytest.fixture def storage(request, postgresql): marker = request.node.get_closest_marker("shard_max_size") if marker is None: shard_max_size = 1024 else: shard_max_size = marker.args[0] dsn = ( f"postgres://{postgresql.info.user}" f":@{postgresql.info.host}:{postgresql.info.port}" ) storage = get_objstorage( cls="winery", base_dsn=dsn, shard_dsn=dsn, shard_max_size=shard_max_size, throttle_write=200 * 1024 * 1024, throttle_read=100 * 1024 * 1024, ) yield storage storage.winery.uninit() # # pytest-postgresql will not remove databases that it did not # create between tests (only at the very end). # d = DatabaseAdmin(dsn) for database in d.list_databases(): if database != postgresql.info.dbname and database != "tests_tmpl": DatabaseAdmin(dsn, database).drop_database() @pytest.fixture def winery(storage): return storage.winery def test_winery_sharedbase(winery): base = winery.base shard1 = base.whoami assert shard1 is not None assert shard1 == base.whoami id1 = base.id assert id1 is not None assert id1 == base.id def test_winery_add_get(winery): shard = winery.base.whoami content = b"SOMETHING" - obj_id = winery.add(content=content) + obj_id = winery.add(content=content, obj_id=compute_hash(content, "sha256")) assert ( obj_id.hex() == "866878b165607851782d8d233edf0c261172ff67926330d3bbd10c705b92d24f" ) assert winery.add(content=content, obj_id=obj_id) == obj_id assert winery.add(content=content, obj_id=obj_id, check_presence=False) == obj_id assert winery.base.whoami == shard assert winery.get(obj_id) == content with pytest.raises(exc.ObjNotFoundError): winery.get(b"unknown") winery.shard.drop() @pytest.mark.shard_max_size(1) def test_winery_add_and_pack(winery, mocker): mocker.patch("swh.objstorage.backends.winery.objstorage.pack", return_value=True) shard = winery.base.whoami content = b"SOMETHING" - obj_id = winery.add(content=content) + obj_id = winery.add(content=content, obj_id=compute_hash(content, "sha256")) assert ( obj_id.hex() == "866878b165607851782d8d233edf0c261172ff67926330d3bbd10c705b92d24f" ) assert winery.base.whoami != shard assert len(winery.packers) == 1 packer = winery.packers[0] packer.join() assert packer.exitcode == 0 def test_winery_delete(storage): with pytest.raises(PermissionError): storage.delete(None) def test_winery_get_shard_info(winery): assert winery.base.get_shard_info(1234) is None assert SharedBaseHelper(winery.base).get_shard_info_by_name("nothing") is None @pytest.mark.shard_max_size(10 * 1024 * 1024) def test_winery_packer(winery, ceph_pool): shard = winery.base.whoami content = b"SOMETHING" winery.add(content=content) winery.base.shard_packing_starts() packer = Packer(shard, **winery.args) try: assert packer.run() is True finally: packer.uninit() readonly, packing = SharedBaseHelper(winery.base).get_shard_info_by_name(shard) assert readonly is True assert packing is False @pytest.mark.shard_max_size(10 * 1024 * 1024) def test_winery_get_object(winery, ceph_pool): shard = winery.base.whoami content = b"SOMETHING" obj_id = winery.add(content=content) winery.base.shard_packing_starts() assert pack(shard, **winery.args) is True assert winery.get(obj_id) == content def test_winery_ceph_pool(needs_ceph): name = "IMAGE" pool = PoolHelper(shard_max_size=10 * 1024 * 1024) pool.clobber() pool.pool_create() pool.image_create(name) p = pool.image_path(name) assert p.endswith(name) something = "SOMETHING" open(p, "w").write(something) assert open(p).read(len(something)) == something assert pool.image_list() == [name] pool.image_remap_ro(name) pool.images_clobber() assert pool.image_list() == [name] pool.clobber() assert pool.image_list() == [] @pytest.mark.shard_max_size(10 * 1024 * 1024) def test_winery_bench_work(winery, ceph_pool, tmpdir): # # rw worker creates a shard # whoami = winery.base.whoami shards_info = list(winery.base.list_shards()) assert len(shards_info) == 1 shard, readonly, packing = shards_info[0] assert (readonly, packing) == (False, False) winery.args["dir"] = str(tmpdir) assert work("rw", winery.args) == "rw" shards_info = { name: (readonly, packing) for name, readonly, packing in winery.base.list_shards() } assert len(shards_info) == 2 assert shards_info[whoami] == (True, False) # # ro worker reads a shard # winery.args["ro_worker_max_request"] = 1 assert work("ro", winery.args) == "ro" def test_winery_bench_real(pytestconfig, postgresql, ceph_pool): dsn = ( f"postgres://{postgresql.info.user}" f":@{postgresql.info.host}:{postgresql.info.port}" ) kwargs = { "output_dir": pytestconfig.getoption("--winery-bench-output-directory"), "rw_workers": pytestconfig.getoption("--winery-bench-rw-workers"), "ro_workers": pytestconfig.getoption("--winery-bench-ro-workers"), "shard_max_size": pytestconfig.getoption("--winery-shard-max-size"), "ro_worker_max_request": pytestconfig.getoption( "--winery-bench-ro-worker-max-request" ), "duration": pytestconfig.getoption("--winery-bench-duration"), "base_dsn": dsn, "shard_dsn": dsn, "throttle_read": pytestconfig.getoption("--winery-bench-throttle-read"), "throttle_write": pytestconfig.getoption("--winery-bench-throttle-write"), } count = call_async(Bench(kwargs).run) assert count > 0 def test_winery_bench_fake(pytestconfig, mocker): kwargs = { "rw_workers": pytestconfig.getoption("--winery-bench-rw-workers"), "ro_workers": pytestconfig.getoption("--winery-bench-ro-workers"), "duration": pytestconfig.getoption("--winery-bench-duration"), } def run(kind): time.sleep(kwargs["duration"] * 2) return kind mocker.patch("swh.objstorage.tests.winery_benchmark.Worker.run", side_effect=run) assert call_async(Bench(kwargs).run) == kwargs["rw_workers"] + kwargs["ro_workers"] def test_winery_leaky_bucket_tick(mocker): total = 100 half = 50 b = LeakyBucket(total) sleep = mocker.spy(time, "sleep") assert b.current == b.total sleep.assert_not_called() # # Bucket is at 100, add(50) => drops to 50 # b.add(half) assert b.current == half sleep.assert_not_called() # # Bucket is at 50, add(50) => drops to 0 # b.add(half) assert b.current == 0 sleep.assert_not_called() # # Bucket is at 0, add(50) => waits until it is at 50 and then drops to 0 # b.add(half) assert b.current == 0 sleep.assert_called_once() # # Sleep more than one second, bucket is full again, i.e. at 100 # time.sleep(2) mocker.resetall() b.add(0) assert b.current == total sleep.assert_not_called() # # Bucket is full at 100 and and waits when requesting 150 which is # more than it can contain # b.add(total + half) assert b.current == 0 sleep.assert_called_once() mocker.resetall() # # Bucket is empty and and waits when requesting 150 which is more # than it can contain # b.add(total + half) assert b.current == 0 sleep.assert_called_once() mocker.resetall() def test_winery_leaky_bucket_reset(): b = LeakyBucket(100) assert b.total == 100 assert b.current == b.total b.reset(50) assert b.total == 50 assert b.current == b.total b.reset(100) assert b.total == 100 assert b.current == 50 def test_winery_bandwidth_calculator(mocker): now = 1 def monotonic(): return now mocker.patch("time.monotonic", side_effect=monotonic) b = BandwidthCalculator() assert b.get() == 0 count = 100 * 1024 * 1024 going_up = [] for t in range(b.duration): now += 1 b.add(count) going_up.append(b.get()) assert b.get() == count going_down = [] for t in range(b.duration - 1): now += 1 b.add(0) going_down.append(b.get()) going_down.reverse() assert going_up[:-1] == going_down assert len(b.history) == b.duration - 1 def test_winery_io_throttler(postgresql, mocker): dsn = ( f"postgres://{postgresql.info.user}" f":@{postgresql.info.host}:{postgresql.info.port}" ) DatabaseAdmin(dsn, "throttler").create_database() sleep = mocker.spy(time, "sleep") speed = 100 i = IOThrottler("read", base_dsn=dsn, throttle_read=100) count = speed i.add(count) sleep.assert_not_called() i.add(count) sleep.assert_called_once() # # Force slow down # mocker.resetall() i.sync_interval = 0 i.max_speed = 1 assert i.max_speed != i.bucket.total i.add(2) assert i.max_speed == i.bucket.total sleep.assert_called_once() def test_winery_throttler(postgresql): dsn = ( f"postgres://{postgresql.info.user}" f":@{postgresql.info.host}:{postgresql.info.port}" ) t = Throttler(base_dsn=dsn, throttle_read=100, throttle_write=100) base = {} key = "KEY" content = "CONTENT" def reader(k): return base[k] def writer(k, v): base[k] = v return True assert t.throttle_add(writer, key, content) is True assert t.throttle_get(reader, key) == content def test_winery_stats(tmpdir): s = Stats(None) assert s.stats_active is False s = Stats(tmpdir / "stats") assert s.stats_active is True assert os.path.exists(s.stats_filename) size = os.path.getsize(s.stats_filename) s._stats_flush_interval = 0 k = "KEY" v = "CONTENT" s.stats_read(k, v) s.stats_write(k, v) s.stats_read(k, v) s.stats_write(k, v) s.__del__() assert os.path.getsize(s.stats_filename) > size