diff --git a/swh/objstorage/backends/azure.py b/swh/objstorage/backends/azure.py index 5b897f1..ae52678 100644 --- a/swh/objstorage/backends/azure.py +++ b/swh/objstorage/backends/azure.py @@ -1,413 +1,417 @@ # Copyright (C) 2016-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import asyncio import contextlib import datetime from itertools import product import string from typing import Dict, Iterator, List, Optional, Union import warnings from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError from azure.storage.blob import ( ContainerClient, ContainerSasPermissions, generate_container_sas, ) from azure.storage.blob.aio import ContainerClient as AsyncContainerClient from typing_extensions import Literal from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.interface import CompositeObjId, ObjId from swh.objstorage.objstorage import ( ObjStorage, compressors, compute_hash, decompressors, ) from swh.objstorage.utils import call_async def get_container_url( account_name: str, account_key: str, container_name: str, access_policy: str = "read_only", expiry: datetime.timedelta = datetime.timedelta(days=365), **kwargs, ) -> str: """Get the full url, for the given container on the given account, with a Shared Access Signature granting the specified access policy. Args: account_name: name of the storage account for which to generate the URL account_key: shared account key of the storage account used to generate the SAS container_name: name of the container for which to grant access in the storage account access_policy: one of ``read_only``, ``append_only``, ``full`` expiry: the interval in the future with which the signature will expire Returns: the full URL of the container, with the shared access signature. """ access_policies = { "read_only": ContainerSasPermissions( read=True, list=True, delete=False, write=False ), "append_only": ContainerSasPermissions( read=True, list=True, delete=False, write=True ), "full": ContainerSasPermissions(read=True, list=True, delete=True, write=True), } current_time = datetime.datetime.utcnow() signature = generate_container_sas( account_name, container_name, account_key=account_key, permission=access_policies[access_policy], start=current_time + datetime.timedelta(minutes=-1), expiry=current_time + expiry, ) return f"https://{account_name}.blob.core.windows.net/{container_name}?{signature}" class AzureCloudObjStorage(ObjStorage): """ObjStorage backend for Azure blob storage accounts. Args: container_url: the URL of the container in which the objects are stored. account_name: (deprecated) the name of the storage account under which objects are stored api_secret_key: (deprecated) the shared account key container_name: (deprecated) the name of the container under which objects are stored compression: the compression algorithm used to compress objects in storage Notes: The container url should contain the credentials via a "Shared Access Signature". The :func:`get_container_url` helper can be used to generate such a URL from the account's access keys. The ``account_name``, ``api_secret_key`` and ``container_name`` arguments are deprecated. """ PRIMARY_HASH: Literal["sha1"] = "sha1" def __init__( self, container_url: Optional[str] = None, account_name: Optional[str] = None, api_secret_key: Optional[str] = None, container_name: Optional[str] = None, compression="gzip", **kwargs, ): if container_url is None: if account_name is None or api_secret_key is None or container_name is None: raise ValueError( "AzureCloudObjStorage must have a container_url or all three " "account_name, api_secret_key and container_name" ) else: warnings.warn( "The Azure objstorage account secret key parameters are " "deprecated, please use container URLs instead.", DeprecationWarning, ) container_url = get_container_url( account_name=account_name, account_key=api_secret_key, container_name=container_name, access_policy="full", ) super().__init__(**kwargs) self.container_url = container_url self.compression = compression def get_container_client(self, hex_obj_id): """Get the container client for the container that contains the object with internal id hex_obj_id This is used to allow the PrefixedAzureCloudObjStorage to dispatch the client according to the prefix of the object id. """ return ContainerClient.from_container_url(self.container_url) @contextlib.asynccontextmanager async def get_async_container_clients(self): """Returns a collection of container clients, to be passed to ``get_async_blob_client``. Each container may not be used in more than one asyncio loop.""" client = AsyncContainerClient.from_container_url(self.container_url) async with client: yield {"": client} def get_blob_client(self, hex_obj_id): """Get the azure blob client for the given hex obj id""" container_client = self.get_container_client(hex_obj_id) return container_client.get_blob_client(blob=hex_obj_id) def get_async_blob_client(self, hex_obj_id, container_clients): """Get the azure blob client for the given hex obj id and a collection yielded by ``get_async_container_clients``.""" return container_clients[""].get_blob_client(blob=hex_obj_id) def get_all_container_clients(self): """Get all active block_blob_services""" yield self.get_container_client("") - def _internal_id(self, obj_id): + def _internal_id(self, obj_id: ObjId) -> str: """Internal id is the hex version in objstorage.""" + if isinstance(obj_id, dict): + obj_id = obj_id[self.PRIMARY_HASH] return hashutil.hash_to_hex(obj_id) def check_config(self, *, check_write): """Check the configuration for this object storage""" for container_client in self.get_all_container_clients(): props = container_client.get_container_properties() # FIXME: check_write is ignored here if not props: return False return True def __contains__(self, obj_id: ObjId) -> bool: """Does the storage contains the obj_id.""" hex_obj_id = self._internal_id(obj_id) client = self.get_blob_client(hex_obj_id) try: client.get_blob_properties() except ResourceNotFoundError: return False else: return True def __iter__(self) -> Iterator[CompositeObjId]: """Iterate over the objects present in the storage.""" for client in self.get_all_container_clients(): for obj in client.list_blobs(): yield {self.PRIMARY_HASH: hashutil.hash_to_bytes(obj.name)} def __len__(self): """Compute the number of objects in the current object storage. Returns: number of objects contained in the storage. """ return sum(1 for i in self) def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: """Add an obj in storage if it's not there already.""" if check_presence and obj_id in self: return hex_obj_id = self._internal_id(obj_id) # Send the compressed content compressor = compressors[self.compression]() data = compressor.compress(content) data += compressor.flush() client = self.get_blob_client(hex_obj_id) try: client.upload_blob(data=data, length=len(data)) except ResourceExistsError: # There's a race condition between check_presence and upload_blob, # that we can't get rid of as the azure api doesn't allow atomic # replaces or renaming a blob. As the restore operation explicitly # removes the blob, it should be safe to just ignore the error. pass def restore(self, content: bytes, obj_id: ObjId) -> None: """Restore a content.""" if obj_id in self: self.delete(obj_id) return self.add(content, obj_id, check_presence=False) def get(self, obj_id: ObjId) -> bytes: """retrieve blob's content if found.""" return call_async(self._get_async, obj_id) async def _get_async(self, obj_id, container_clients=None): """Coroutine implementing ``get(obj_id)`` using azure-storage-blob's asynchronous implementation. While ``get(obj_id)`` does not need asynchronicity, this is useful to ``get_batch(obj_ids)``, as it can run multiple ``_get_async`` tasks concurrently.""" if container_clients is None: # If the container_clients argument is not passed, create a new # collection of container_clients and restart the function with it. async with self.get_async_container_clients() as container_clients: return await self._get_async(obj_id, container_clients) hex_obj_id = self._internal_id(obj_id) client = self.get_async_blob_client(hex_obj_id, container_clients) try: download = await client.download_blob() except ResourceNotFoundError: raise ObjNotFoundError(obj_id) from None else: data = await download.content_as_bytes() decompressor = decompressors[self.compression]() ret = decompressor.decompress(data) if decompressor.unused_data: raise Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret async def _get_async_or_none(self, obj_id, container_clients): """Like ``get_async(obj_id)``, but returns None instead of raising ResourceNotFoundError. Used by ``get_batch`` so other blobs can be returned even if one is missing.""" try: return await self._get_async(obj_id, container_clients) except ObjNotFoundError: return None async def _get_batch_async(self, obj_ids): async with self.get_async_container_clients() as container_clients: return await asyncio.gather( *[ self._get_async_or_none(obj_id, container_clients) for obj_id in obj_ids ] ) def get_batch(self, obj_ids: List[ObjId]) -> Iterator[Optional[bytes]]: """Retrieve objects' raw content in bulk from storage, concurrently.""" return call_async(self._get_batch_async, obj_ids) def check(self, obj_id: ObjId) -> None: """Check the content integrity.""" obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) + if isinstance(obj_id, dict): + obj_id = obj_id[self.PRIMARY_HASH] if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id: ObjId): """Delete an object.""" super().delete(obj_id) # Check delete permission hex_obj_id = self._internal_id(obj_id) client = self.get_blob_client(hex_obj_id) try: client.delete_blob() except ResourceNotFoundError: raise ObjNotFoundError(obj_id) from None return True class PrefixedAzureCloudObjStorage(AzureCloudObjStorage): """ObjStorage with azure capabilities, striped by prefix. accounts is a dict containing entries of the form: : """ def __init__( self, accounts: Dict[str, Union[str, Dict[str, str]]], compression="gzip", **kwargs, ): # shortcut AzureCloudObjStorage __init__ ObjStorage.__init__(self, **kwargs) self.compression = compression # Definition sanity check prefix_lengths = set(len(prefix) for prefix in accounts) if not len(prefix_lengths) == 1: raise ValueError( "Inconsistent prefixes, found lengths %s" % ", ".join(str(lst) for lst in sorted(prefix_lengths)) ) self.prefix_len = prefix_lengths.pop() expected_prefixes = set( "".join(letters) for letters in product( set(string.hexdigits.lower()), repeat=self.prefix_len ) ) missing_prefixes = expected_prefixes - set(accounts) if missing_prefixes: raise ValueError( "Missing prefixes %s" % ", ".join(sorted(missing_prefixes)) ) do_warning = False self.container_urls = {} for prefix, container_url in accounts.items(): if isinstance(container_url, dict): do_warning = True container_url = get_container_url( account_name=container_url["account_name"], account_key=container_url["api_secret_key"], container_name=container_url["container_name"], access_policy="full", ) self.container_urls[prefix] = container_url if do_warning: warnings.warn( "The Azure objstorage account secret key parameters are " "deprecated, please use container URLs instead.", DeprecationWarning, ) def get_container_client(self, hex_obj_id): """Get the block_blob_service and container that contains the object with internal id hex_obj_id """ prefix = hex_obj_id[: self.prefix_len] return ContainerClient.from_container_url(self.container_urls[prefix]) @contextlib.asynccontextmanager async def get_async_container_clients(self): # This is equivalent to: # client1 = AsyncContainerClient.from_container_url(url1) # ... # client16 = AsyncContainerClient.from_container_url(url16) # async with client1, ..., client16: # yield {prefix1: client1, ..., prefix16: client16} clients = { prefix: AsyncContainerClient.from_container_url(url) for (prefix, url) in self.container_urls.items() } async with contextlib.AsyncExitStack() as stack: for client in clients.values(): await stack.enter_async_context(client) yield clients def get_async_blob_client(self, hex_obj_id, container_clients): """Get the azure blob client for the given hex obj id and a collection yielded by ``get_async_container_clients``.""" prefix = hex_obj_id[: self.prefix_len] return container_clients[prefix].get_blob_client(blob=hex_obj_id) def get_all_container_clients(self): """Get all active container clients""" # iterate on items() to sort blob services; # needed to be able to paginate in the list_content() method yield from ( self.get_container_client(prefix) for prefix in sorted(self.container_urls) ) diff --git a/swh/objstorage/backends/in_memory.py b/swh/objstorage/backends/in_memory.py index 6764bbd..5d74c98 100644 --- a/swh/objstorage/backends/in_memory.py +++ b/swh/objstorage/backends/in_memory.py @@ -1,62 +1,68 @@ -# Copyright (C) 2017 The Software Heritage developers +# Copyright (C) 2017-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from typing import Iterator +from typing import Dict, Iterator from typing_extensions import Literal from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.interface import CompositeObjId, ObjId from swh.objstorage.objstorage import ObjStorage, compute_hash, objid_to_default_hex class InMemoryObjStorage(ObjStorage): """In-Memory objstorage. Intended for test purposes. """ PRIMARY_HASH: Literal["sha1"] = "sha1" def __init__(self, **args): super().__init__() - self.state = {} + self.state: Dict[bytes, bytes] = {} def check_config(self, *, check_write): return True + def _state_key(self, obj_id: ObjId) -> bytes: + if isinstance(obj_id, dict): + return obj_id[self.PRIMARY_HASH] + else: + return obj_id + def __contains__(self, obj_id: ObjId) -> bool: - return obj_id in self.state + return self._state_key(obj_id) in self.state def __iter__(self) -> Iterator[CompositeObjId]: for id_ in sorted(self.state): yield {self.PRIMARY_HASH: id_} def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: if check_presence and obj_id in self: return - self.state[obj_id] = content + self.state[self._state_key(obj_id)] = content def get(self, obj_id: ObjId) -> bytes: if obj_id not in self: raise ObjNotFoundError(obj_id) - return self.state[obj_id] + return self.state[self._state_key(obj_id)] def check(self, obj_id: ObjId) -> None: if obj_id not in self: raise ObjNotFoundError(obj_id) - if compute_hash(self.state[obj_id]) != obj_id: + if compute_hash(self.state[self._state_key(obj_id)]) != self._state_key(obj_id): raise Error("Corrupt object %s" % objid_to_default_hex(obj_id)) def delete(self, obj_id: ObjId): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) - self.state.pop(obj_id) + self.state.pop(self._state_key(obj_id)) return True diff --git a/swh/objstorage/backends/libcloud.py b/swh/objstorage/backends/libcloud.py index c1626b9..f17a81f 100644 --- a/swh/objstorage/backends/libcloud.py +++ b/swh/objstorage/backends/libcloud.py @@ -1,254 +1,259 @@ # Copyright (C) 2016-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc from collections import OrderedDict from typing import Iterator, Optional from urllib.parse import urlencode from libcloud.storage import providers import libcloud.storage.drivers.s3 from libcloud.storage.types import ObjectDoesNotExistError, Provider from typing_extensions import Literal from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.interface import CompositeObjId, ObjId from swh.objstorage.objstorage import ( ObjStorage, compressors, compute_hash, decompressors, objid_to_default_hex, ) def patch_libcloud_s3_urlencode(): """Patches libcloud's S3 backend to properly sign queries. Recent versions of libcloud are not affected (they use signature V4), but 1.5.0 (the one in Debian 9) is.""" def s3_urlencode(params): """Like urllib.parse.urlencode, but sorts the parameters first. This is required to properly compute the request signature, see https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#ConstructingTheCanonicalizedResourceElement """ # noqa return urlencode(OrderedDict(sorted(params.items()))) libcloud.storage.drivers.s3.urlencode = s3_urlencode patch_libcloud_s3_urlencode() class CloudObjStorage(ObjStorage, metaclass=abc.ABCMeta): """Abstract ObjStorage that connect to a cloud using Libcloud Implementations of this class must redefine the _get_provider method to make it return a driver provider (i.e. object that supports `get_driver` method) which return a LibCloud driver (see https://libcloud.readthedocs.io/en/latest/storage/api.html). Args: container_name: Name of the base container path_prefix: prefix to prepend to object paths in the container, separated with a slash compression: compression algorithm to use for objects kwargs: extra arguments are passed through to the LibCloud driver """ PRIMARY_HASH: Literal["sha1"] = "sha1" def __init__( self, container_name: str, compression: str = "gzip", path_prefix: Optional[str] = None, **kwargs, ): super().__init__(**kwargs) self.driver = self._get_driver(**kwargs) self.container_name = container_name self.container = self.driver.get_container(container_name=container_name) self.compression = compression self.path_prefix = None if path_prefix: self.path_prefix = path_prefix.rstrip("/") + "/" def _get_driver(self, **kwargs): """Initialize a driver to communicate with the cloud Kwargs: arguments passed to the StorageDriver class, typically key: key to connect to the API. secret: secret key for authentication. secure: (bool) support HTTPS host: (str) port: (int) api_version: (str) region: (str) Returns: a Libcloud driver to a cloud storage. """ # Get the driver class from its description. cls = providers.get_driver(self._get_provider()) # Initialize the driver. return cls(**kwargs) @abc.abstractmethod def _get_provider(self): """Get a libcloud driver provider This method must be overridden by subclasses to specify which of the native libcloud driver the current storage should connect to. Alternatively, provider for a custom driver may be returned, in which case the provider will have to support `get_driver` method. """ raise NotImplementedError( "%s must implement `get_provider` method" % type(self) ) def check_config(self, *, check_write): """Check the configuration for this object storage""" # FIXME: hopefully this blew up during instantiation return True def __contains__(self, obj_id: ObjId) -> bool: try: self._get_object(obj_id) except ObjNotFoundError: return False else: return True def __iter__(self) -> Iterator[CompositeObjId]: """Iterate over the objects present in the storage Warning: Iteration over the contents of a cloud-based object storage may have bad efficiency: due to the very high amount of objects in it and the fact that it is remote, get all the contents of the current object storage may result in a lot of network requests. You almost certainly don't want to use this method in production. """ for obj in self.driver.iterate_container_objects(self.container): name = obj.name if self.path_prefix and not name.startswith(self.path_prefix): continue if self.path_prefix: name = name[len(self.path_prefix) :] yield {self.PRIMARY_HASH: hashutil.hash_to_bytes(name)} def __len__(self): """Compute the number of objects in the current object storage. Warning: this currently uses `__iter__`, its warning about bad performance applies. Returns: number of objects contained in the storage. """ return sum(1 for i in self) def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: if check_presence and obj_id in self: return self._put_object(content, obj_id) def restore(self, content: bytes, obj_id: ObjId) -> None: return self.add(content, obj_id, check_presence=False) def get(self, obj_id: ObjId) -> bytes: obj = b"".join(self._get_object(obj_id).as_stream()) d = decompressors[self.compression]() ret = d.decompress(obj) if d.unused_data: hex_obj_id = objid_to_default_hex(obj_id) raise Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret def check(self, obj_id: ObjId) -> None: # Check that the file exists, as _get_object raises ObjNotFoundError self._get_object(obj_id) # Check the content integrity obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) + if isinstance(obj_id, dict): + obj_id = obj_id[self.PRIMARY_HASH] if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id: ObjId): super().delete(obj_id) # Check delete permission obj = self._get_object(obj_id) return self.driver.delete_object(obj) - def _object_path(self, obj_id): + def _object_path(self, obj_id: ObjId) -> str: """Get the full path to an object""" + if isinstance(obj_id, dict): + obj_id = obj_id[self.PRIMARY_HASH] + hex_obj_id = hashutil.hash_to_hex(obj_id) if self.path_prefix: return self.path_prefix + hex_obj_id else: return hex_obj_id - def _get_object(self, obj_id): + def _get_object(self, obj_id: ObjId): """Get a Libcloud wrapper for an object pointer. This wrapper does not retrieve the content of the object directly. """ object_path = self._object_path(obj_id) try: return self.driver.get_object(self.container_name, object_path) except ObjectDoesNotExistError: raise ObjNotFoundError(obj_id) def _compressor(self, data): comp = compressors[self.compression]() for chunk in data: cchunk = comp.compress(chunk) if cchunk: yield cchunk trail = comp.flush() if trail: yield trail def _put_object(self, content, obj_id): """Create an object in the cloud storage. Created object will contain the content and be referenced by the given id. """ object_path = self._object_path(obj_id) if not isinstance(content, Iterator): content = (content,) self.driver.upload_object_via_stream( self._compressor(content), self.container, object_path ) class AwsCloudObjStorage(CloudObjStorage): """Amazon's S3 Cloud-based object storage""" def _get_provider(self): return Provider.S3 class OpenStackCloudObjStorage(CloudObjStorage): """OpenStack Swift Cloud based object storage""" def _get_provider(self): return Provider.OPENSTACK_SWIFT diff --git a/swh/objstorage/backends/seaweedfs/objstorage.py b/swh/objstorage/backends/seaweedfs/objstorage.py index 62a1d47..0d0f05b 100644 --- a/swh/objstorage/backends/seaweedfs/objstorage.py +++ b/swh/objstorage/backends/seaweedfs/objstorage.py @@ -1,161 +1,163 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import io from itertools import islice import logging import os from typing import Iterator, Optional from typing_extensions import Literal from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.interface import CompositeObjId, ObjId from swh.objstorage.objstorage import ( DEFAULT_LIMIT, ObjStorage, compressors, compute_hash, decompressors, objid_to_default_hex, ) from .http import HttpFiler LOGGER = logging.getLogger(__name__) class SeaweedFilerObjStorage(ObjStorage): """ObjStorage with seaweedfs abilities, using the Filer API. https://github.com/chrislusf/seaweedfs/wiki/Filer-Server-API """ PRIMARY_HASH: Literal["sha1"] = "sha1" def __init__(self, url, compression=None, **kwargs): super().__init__(**kwargs) self.wf = HttpFiler(url) self.compression = compression def check_config(self, *, check_write): """Check the configuration for this object storage""" # FIXME: hopefully this blew up during instantiation return True def __contains__(self, obj_id: ObjId) -> bool: return self.wf.exists(self._path(obj_id)) def __iter__(self) -> Iterator[CompositeObjId]: """Iterate over the objects present in the storage Warning: Iteration over the contents of a cloud-based object storage may have bad efficiency: due to the very high amount of objects in it and the fact that it is remote, get all the contents of the current object storage may result in a lot of network requests. You almost certainly don't want to use this method in production. """ obj_id = last_obj_id = None while True: for obj_id in self.list_content(last_obj_id=last_obj_id): yield obj_id if last_obj_id == obj_id: break last_obj_id = obj_id def __len__(self): """Compute the number of objects in the current object storage. Warning: this currently uses `__iter__`, its warning about bad performance applies. Returns: number of objects contained in the storage. """ return sum(1 for i in self) def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: if check_presence and obj_id in self: return def compressor(data): comp = compressors[self.compression]() yield comp.compress(data) yield comp.flush() assert isinstance( content, bytes ), "list of content chunks is not supported anymore" self.wf.put(io.BytesIO(b"".join(compressor(content))), self._path(obj_id)) def restore(self, content: bytes, obj_id: ObjId) -> None: return self.add(content, obj_id, check_presence=False) def get(self, obj_id: ObjId) -> bytes: try: obj = self.wf.get(self._path(obj_id)) except Exception: raise ObjNotFoundError(obj_id) d = decompressors[self.compression]() ret = d.decompress(obj) if d.unused_data: hex_obj_id = objid_to_default_hex(obj_id) raise Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret def check(self, obj_id: ObjId) -> None: # Check the content integrity obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) + if isinstance(obj_id, dict): + obj_id = obj_id[self.PRIMARY_HASH] if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id: ObjId): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) self.wf.delete(self._path(obj_id)) return True def list_content( self, last_obj_id: Optional[ObjId] = None, limit: int = DEFAULT_LIMIT, ) -> Iterator[CompositeObjId]: if last_obj_id: objid = objid_to_default_hex(last_obj_id) lastfilename = objid else: lastfilename = None for fname in islice(self.wf.iterfiles(last_file_name=lastfilename), limit): bytehex = fname.rsplit("/", 1)[-1] yield {self.PRIMARY_HASH: hashutil.bytehex_to_hash(bytehex.encode())} # internal methods def _put_object(self, content, obj_id): """Create an object in the cloud storage. Created object will contain the content and be referenced by the given id. """ def compressor(data): comp = compressors[self.compression]() for chunk in data: yield comp.compress(chunk) yield comp.flush() if isinstance(content, bytes): content = [content] self.wf.put(io.BytesIO(b"".join(compressor(content))), self._path(obj_id)) def _path(self, obj_id: ObjId): return os.path.join(self.wf.basepath, objid_to_default_hex(obj_id)) diff --git a/swh/objstorage/multiplexer/striping_objstorage.py b/swh/objstorage/multiplexer/striping_objstorage.py index 3fa3ffa..c20b069 100644 --- a/swh/objstorage/multiplexer/striping_objstorage.py +++ b/swh/objstorage/multiplexer/striping_objstorage.py @@ -1,76 +1,82 @@ -# Copyright (C) 2018-2020 The Software Heritage developers +# Copyright (C) 2018-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict import queue from typing import Dict +from typing_extensions import Literal + +from swh.objstorage.interface import ObjId from swh.objstorage.multiplexer.multiplexer_objstorage import ( MultiplexerObjStorage, ObjStorageThread, ) class StripingObjStorage(MultiplexerObjStorage): """Stripes objects across multiple objstorages This objstorage implementation will write objects to objstorages in a predictable way: it takes the modulo of the last 8 bytes of the object identifier with the number of object storages passed, which will yield an (almost) even distribution. Objects are read from all storages in turn until it succeeds. """ MOD_BYTES = 8 + PRIMARY_HASH: Literal["sha1"] = "sha1" def __init__(self, storages, **kwargs): super().__init__(storages, **kwargs) self.num_storages = len(storages) - def get_storage_index(self, obj_id): + def get_storage_index(self, obj_id: ObjId): if obj_id is None: raise ValueError("StripingObjStorage always needs obj_id to be set") + if isinstance(obj_id, dict): + obj_id = obj_id[self.PRIMARY_HASH] index = int.from_bytes(obj_id[: -self.MOD_BYTES], "big") return index % self.num_storages def get_write_threads(self, obj_id): idx = self.get_storage_index(obj_id) yield self.storage_threads[idx] def get_read_threads(self, obj_id=None): if obj_id: idx = self.get_storage_index(obj_id) else: idx = 0 for i in range(self.num_storages): yield self.storage_threads[(idx + i) % self.num_storages] def add_batch(self, contents, check_presence=True) -> Dict: """Add a batch of new objects to the object storage.""" content_by_storage_index: Dict[bytes, Dict] = defaultdict(dict) for obj_id, content in contents.items(): storage_index = self.get_storage_index(obj_id) content_by_storage_index[storage_index][obj_id] = content mailbox: queue.Queue[Dict] = queue.Queue() for storage_index, contents in content_by_storage_index.items(): self.storage_threads[storage_index].queue_command( "add_batch", contents, check_presence=check_presence, mailbox=mailbox, ) results = ObjStorageThread.collect_results( mailbox, len(content_by_storage_index) ) summed = {"object:add": 0, "object:add:bytes": 0} for result in results: summed["object:add"] += result["object:add"] summed["object:add:bytes"] += result["object:add:bytes"] return summed diff --git a/swh/objstorage/tests/objstorage_testing.py b/swh/objstorage/tests/objstorage_testing.py index 4463ecc..c04a90d 100644 --- a/swh/objstorage/tests/objstorage_testing.py +++ b/swh/objstorage/tests/objstorage_testing.py @@ -1,216 +1,276 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import inspect +from typing import Tuple from swh.objstorage import exc -from swh.objstorage.interface import ObjStorageInterface +from swh.objstorage.interface import CompositeObjId, ObjStorageInterface from swh.objstorage.objstorage import compute_hash class ObjStorageTestFixture: def test_types(self): """Checks all methods of ObjStorageInterface are implemented by this backend, and that they have the same signature.""" # Create an instance of the protocol (which cannot be instantiated # directly, so this creates a subclass, then instantiates it) interface = type("_", (ObjStorageInterface,), {})() assert "get_batch" in dir(interface) missing_methods = [] for meth_name in dir(interface): if meth_name.startswith("_") and meth_name not in ( "__iter__", "__contains__", ): continue interface_meth = getattr(interface, meth_name) concrete_meth = getattr(self.storage, meth_name) expected_signature = inspect.signature(interface_meth) actual_signature = inspect.signature(concrete_meth) assert expected_signature == actual_signature, meth_name assert missing_methods == [] # If all the assertions above succeed, then this one should too. # But there's no harm in double-checking. # And we could replace the assertions above by this one, but unlike # the assertions above, it doesn't explain what is missing. assert isinstance(self.storage, ObjStorageInterface) def hash_content(self, content): obj_id = compute_hash(content) return content, obj_id + def compositehash_content(self, content) -> Tuple[bytes, CompositeObjId]: + obj_id = compute_hash(content) + return content, {"sha1": obj_id} + def assertContentMatch(self, obj_id, expected_content): # noqa content = self.storage.get(obj_id) self.assertEqual(content, expected_content) def test_check_config(self): self.assertTrue(self.storage.check_config(check_write=False)) self.assertTrue(self.storage.check_config(check_write=True)) def test_contains(self): content_p, obj_id_p = self.hash_content(b"contains_present") content_m, obj_id_m = self.hash_content(b"contains_missing") self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) + def test_contains_composite(self): + content_p, obj_id_p = self.compositehash_content(b"contains_present") + content_m, obj_id_m = self.compositehash_content(b"contains_missing") + self.storage.add(content_p, obj_id=obj_id_p) + self.assertIn(obj_id_p, self.storage) + self.assertNotIn(obj_id_m, self.storage) + def test_add_get_w_id(self): content, obj_id = self.hash_content(b"add_get_w_id") self.storage.add(content, obj_id=obj_id) self.assertContentMatch(obj_id, content) + def test_add_get_w_composite_id(self): + content, obj_id = self.compositehash_content(b"add_get_w_id") + self.storage.add(content, obj_id=obj_id) + self.assertContentMatch(obj_id, content) + def test_add_twice(self): content, obj_id = self.hash_content(b"add_twice") self.storage.add(content, obj_id=obj_id) self.assertContentMatch(obj_id, content) self.storage.add(content, obj_id=obj_id, check_presence=False) self.assertContentMatch(obj_id, content) def test_add_big(self): content, obj_id = self.hash_content(b"add_big" * 1024 * 1024) self.storage.add(content, obj_id=obj_id) self.assertContentMatch(obj_id, content) def test_add_get_batch(self): content1, obj_id1 = self.hash_content(b"add_get_batch_1") content2, obj_id2 = self.hash_content(b"add_get_batch_2") self.storage.add(content1, obj_id1) self.storage.add(content2, obj_id2) cr1, cr2 = self.storage.get_batch([obj_id1, obj_id2]) self.assertEqual(cr1, content1) self.assertEqual(cr2, content2) + def test_add_get_batch_composite(self): + content1, obj_id1 = self.compositehash_content(b"add_get_batch_1") + content2, obj_id2 = self.compositehash_content(b"add_get_batch_2") + self.storage.add(content1, obj_id1) + self.storage.add(content2, obj_id2) + cr1, cr2 = self.storage.get_batch([obj_id1, obj_id2]) + self.assertEqual(cr1, content1) + self.assertEqual(cr2, content2) + def test_get_batch_unexisting_content(self): content, obj_id = self.hash_content(b"get_batch_unexisting_content") result = list(self.storage.get_batch([obj_id])) self.assertTrue(len(result) == 1) self.assertIsNone(result[0]) def test_restore_content(self): self.storage.allow_delete = True valid_content, valid_obj_id = self.hash_content(b"restore_content") invalid_content = b"unexpected content" self.storage.add(invalid_content, valid_obj_id) with self.assertRaises(exc.Error): self.storage.check(valid_obj_id) self.storage.restore(valid_content, valid_obj_id) self.assertContentMatch(valid_obj_id, valid_content) def test_get_missing(self): content, obj_id = self.hash_content(b"get_missing") with self.assertRaises(exc.ObjNotFoundError) as e: self.storage.get(obj_id) self.assertIn(obj_id, e.exception.args) + def test_get_missing_composite(self): + content, obj_id = self.compositehash_content(b"get_missing") + with self.assertRaises(exc.ObjNotFoundError) as e: + self.storage.get(obj_id) + + self.assertIn(obj_id, e.exception.args) + def test_check_missing(self): content, obj_id = self.hash_content(b"check_missing") with self.assertRaises(exc.Error): self.storage.check(obj_id) + def test_check_missing_composite(self): + content, obj_id = self.compositehash_content(b"check_missing") + with self.assertRaises(exc.Error): + self.storage.check(obj_id) + def test_check_present(self): content, obj_id = self.hash_content(b"check_present") self.storage.add(content, obj_id) try: self.storage.check(obj_id) except exc.Error: self.fail("Integrity check failed") + def test_check_present_composite(self): + content, obj_id = self.compositehash_content(b"check_present") + self.storage.add(content, obj_id) + try: + self.storage.check(obj_id) + except exc.Error: + self.fail("Integrity check failed") + def test_delete_missing(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b"missing_content_to_delete") with self.assertRaises(exc.Error): self.storage.delete(obj_id) + def test_delete_missing_composite(self): + self.storage.allow_delete = True + content, obj_id = self.compositehash_content(b"missing_content_to_delete") + with self.assertRaises(exc.Error): + self.storage.delete(obj_id) + def test_delete_present(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) self.assertTrue(self.storage.delete(obj_id)) with self.assertRaises(exc.Error): self.storage.get(obj_id) + def test_delete_present_composite(self): + self.storage.allow_delete = True + content, obj_id = self.compositehash_content(b"content_to_delete") + self.storage.add(content, obj_id=obj_id) + self.assertTrue(self.storage.delete(obj_id)) + with self.assertRaises(exc.Error): + self.storage.get(obj_id) + def test_delete_not_allowed(self): self.storage.allow_delete = False content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.storage.delete(obj_id) def test_delete_not_allowed_by_default(self): content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.assertTrue(self.storage.delete(obj_id)) def test_add_batch(self): contents = {} expected_content_add = 0 expected_content_add_bytes = 0 for i in range(50): content = b"Test content %02d" % i content, obj_id = self.hash_content(content) contents[obj_id] = content expected_content_add_bytes += len(content) expected_content_add += 1 ret = self.storage.add_batch(contents) self.assertEqual( ret, { "object:add": expected_content_add, "object:add:bytes": expected_content_add_bytes, }, ) for obj_id in contents: self.assertIn(obj_id, self.storage) def test_content_iterator(self): sto_obj_ids = iter(self.storage) sto_obj_ids = list(sto_obj_ids) self.assertFalse(sto_obj_ids) obj_ids = [] for i in range(100): content, obj_id = self.hash_content(b"content %d" % i) self.storage.add(content, obj_id=obj_id) obj_ids.append({"sha1": obj_id}) sto_obj_ids = list(self.storage) self.assertCountEqual(sto_obj_ids, obj_ids) def test_list_content(self): all_ids = [] for i in range(1200): content = b"example %d" % i obj_id = compute_hash(content) self.storage.add(content, obj_id) all_ids.append({"sha1": obj_id}) all_ids.sort(key=lambda d: d["sha1"]) ids = list(self.storage.list_content()) self.assertEqual(len(ids), 1200) self.assertEqual(ids[0], all_ids[0]) self.assertEqual(ids[100], all_ids[100]) self.assertEqual(ids[999], all_ids[999]) ids = list(self.storage.list_content(limit=10)) self.assertEqual(len(ids), 10) self.assertEqual(ids[0], all_ids[0]) self.assertEqual(ids[9], all_ids[9]) ids = list(self.storage.list_content(last_obj_id=all_ids[999], limit=100)) self.assertEqual(len(ids), 100) self.assertEqual(ids[0], all_ids[1000]) self.assertEqual(ids[9], all_ids[1009]) diff --git a/swh/objstorage/tests/test_objstorage_multiplexer.py b/swh/objstorage/tests/test_objstorage_multiplexer.py index bd3164b..9b8be87 100644 --- a/swh/objstorage/tests/test_objstorage_multiplexer.py +++ b/swh/objstorage/tests/test_objstorage_multiplexer.py @@ -1,61 +1,71 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import shutil import tempfile import unittest from swh.objstorage.backends.pathslicing import PathSlicingObjStorage from swh.objstorage.multiplexer import MultiplexerObjStorage from swh.objstorage.multiplexer.filter import add_filter, read_only from .objstorage_testing import ObjStorageTestFixture class TestMultiplexerObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() self.tmpdir = tempfile.mkdtemp() os.mkdir(os.path.join(self.tmpdir, "root1")) os.mkdir(os.path.join(self.tmpdir, "root2")) self.storage_v1 = PathSlicingObjStorage( os.path.join(self.tmpdir, "root1"), "0:2/2:4" ) self.storage_v2 = PathSlicingObjStorage( os.path.join(self.tmpdir, "root2"), "0:1/0:5" ) self.r_storage = add_filter(self.storage_v1, read_only()) self.w_storage = self.storage_v2 self.storage = MultiplexerObjStorage([self.r_storage, self.w_storage]) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def test_contains(self): content_p, obj_id_p = self.hash_content(b"contains_present") content_m, obj_id_m = self.hash_content(b"contains_missing") self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) def test_delete_missing(self): self.storage_v1.allow_delete = True self.storage_v2.allow_delete = True super().test_delete_missing() + def test_delete_missing_composite(self): + self.storage_v1.allow_delete = True + self.storage_v2.allow_delete = True + super().test_delete_missing_composite() + def test_delete_present(self): self.storage_v1.allow_delete = True self.storage_v2.allow_delete = True super().test_delete_present() + def test_delete_present_composite(self): + self.storage_v1.allow_delete = True + self.storage_v2.allow_delete = True + super().test_delete_present_composite() + def test_access_readonly(self): # Add a content to the readonly storage content, obj_id = self.hash_content(b"content in read-only") self.storage_v1.add(content, obj_id=obj_id) # Try to retrieve it on the main storage self.assertIn(obj_id, self.storage)