diff --git a/requirements.txt b/requirements.txt index 6abe10a..252f61a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,14 @@ # Add here external Python modules dependencies, one per line. Module names # should match https://pypi.python.org/pypi names. For the full spec or # dependency lines, see https://pip.readthedocs.org/en/1.1/requirements.html # remote storage API server click requests psycopg2 sh +typing-extensions # optional dependencies # apache-libcloud # azure-storage-blob >= 12.0 diff --git a/swh/objstorage/api/client.py b/swh/objstorage/api/client.py index 1fa3bed..6642cd6 100644 --- a/swh/objstorage/api/client.py +++ b/swh/objstorage/api/client.py @@ -1,54 +1,54 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.core.api import RPCClient from swh.core.utils import iter_chunks from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError, ObjStorageAPIError from swh.objstorage.interface import ObjStorageInterface from swh.objstorage.objstorage import DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT SHA1_SIZE = 20 class RemoteObjStorage(RPCClient): """Proxy to a remote object storage. This class allows to connect to an object storage server via http protocol. Attributes: url (string): The url of the server to connect. Must end with a '/' session: The session to send requests. """ api_exception = ObjStorageAPIError reraise_exceptions = [ObjNotFoundError, Error] backend_class = ObjStorageInterface - def restore(self, content, obj_id=None, *args, **kwargs): + def restore(self, content, obj_id=None): return self.add(content, obj_id, check_presence=False) def add_stream(self, content_iter, obj_id, check_presence=True): raise NotImplementedError def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): obj_id = hashutil.hash_to_hex(obj_id) return self._get_stream( "content/get_stream/{}".format(obj_id), chunk_size=chunk_size ) def __iter__(self): yield from self.list_content() def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): params = {"limit": limit} if last_obj_id: params["last_obj_id"] = hashutil.hash_to_hex(last_obj_id) yield from iter_chunks( self._get_stream("content", params=params), chunk_size=SHA1_SIZE ) diff --git a/swh/objstorage/backends/in_memory.py b/swh/objstorage/backends/in_memory.py index c8a80a9..5a26c75 100644 --- a/swh/objstorage/backends/in_memory.py +++ b/swh/objstorage/backends/in_memory.py @@ -1,71 +1,71 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import functools import io from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.objstorage import DEFAULT_CHUNK_SIZE, ObjStorage, compute_hash class InMemoryObjStorage(ObjStorage): """In-Memory objstorage. Intended for test purposes. """ def __init__(self, **args): super().__init__() self.state = {} def check_config(self, *, check_write): return True - def __contains__(self, obj_id, *args, **kwargs): + def __contains__(self, obj_id): return obj_id in self.state def __iter__(self): return iter(sorted(self.state)) - def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): + def add(self, content, obj_id=None, check_presence=True): if obj_id is None: obj_id = compute_hash(content) if check_presence and obj_id in self: return obj_id self.state[obj_id] = content return obj_id - def get(self, obj_id, *args, **kwargs): + def get(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) return self.state[obj_id] - def check(self, obj_id, *args, **kwargs): + def check(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) if compute_hash(self.state[obj_id]) != obj_id: raise Error("Corrupt object %s" % obj_id) return True - def delete(self, obj_id, *args, **kwargs): + def delete(self, obj_id): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) self.state.pop(obj_id) return True def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): if obj_id not in self: raise ObjNotFoundError(obj_id) data = io.BytesIO(self.state[obj_id]) reader = functools.partial(data.read, chunk_size) yield from iter(reader, b"") diff --git a/swh/objstorage/interface.py b/swh/objstorage/interface.py index 3cfd79b..23b37a8 100644 --- a/swh/objstorage/interface.py +++ b/swh/objstorage/interface.py @@ -1,232 +1,237 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +from typing import Dict + +from typing_extensions import Protocol, runtime_checkable + from swh.core.api import remote_api_endpoint from swh.objstorage.objstorage import DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT -class ObjStorageInterface: +@runtime_checkable +class ObjStorageInterface(Protocol): """ High-level API to manipulate the Software Heritage object storage. Conceptually, the object storage offers the following methods: - check_config() check if the object storage is properly configured - __contains__() check if an object is present, by object id - add() add a new object, returning an object id - restore() same as add() but erase an already existed content - get() retrieve the content of an object, by object id - check() check the integrity of an object, by object id - delete() remove an object And some management methods: - get_random() get random object id of existing contents (used for the content integrity checker). Some of the methods have available streaming equivalents: - get_stream() same as get() but returns a chunked iterator Each implementation of this interface can have a different behavior and its own way to store the contents. """ @remote_api_endpoint("check_config") def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ ... @remote_api_endpoint("content/contains") def __contains__(self, obj_id): """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True if and only if the object is present in the current object storage. """ ... @remote_api_endpoint("content/add") def add(self, content, obj_id=None, check_presence=True): """Add a new object to the object storage. Args: content (bytes): object's raw content to add in storage. obj_id (bytes): checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ ... @remote_api_endpoint("content/add/batch") - def add_batch(self, contents, check_presence=True): + def add_batch(self, contents, check_presence=True) -> Dict: """Add a batch of new objects to the object storage. Args: contents: mapping from obj_id to object contents Returns: the summary of objects added to the storage (count of object, count of bytes object) """ ... def restore(self, content, obj_id=None): """Restore a content that have been corrupted. This function is identical to add but does not check if the object id is already in the file system. The default implementation provided by the current class is suitable for most cases. Args: content (bytes): object's raw content to add in storage obj_id (bytes): checksum of `bytes` as computed by ID_HASH_ALGO. When given, obj_id will be trusted to match bytes. If missing, obj_id will be computed on the fly. """ ... @remote_api_endpoint("content/get") def get(self, obj_id): """Retrieve the content of a given object. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ ... @remote_api_endpoint("content/get/batch") def get_batch(self, obj_ids): """Retrieve objects' raw content in bulk from storage. Note: This function does have a default implementation in ObjStorage that is suitable for most cases. For object storages that needs to do the minimal number of requests possible (ex: remote object storages), that method can be overridden to perform a more efficient operation. Args: obj_ids ([bytes]: list of object ids. Returns: list of resulting contents, or None if the content could not be retrieved. Do not raise any exception as a fail for one content will not cancel the whole request. """ ... @remote_api_endpoint("content/check") def check(self, obj_id): """Perform an integrity check for a given object. Verify that the file object is in place and that the content matches the object id. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. Error: if the request object is corrupted. """ ... @remote_api_endpoint("content/delete") def delete(self, obj_id): """Delete an object. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. """ ... # Management methods @remote_api_endpoint("content/get/random") def get_random(self, batch_size): """Get random ids of existing contents. This method is used in order to get random ids to perform content integrity verifications on random contents. Args: batch_size (int): Number of ids that will be given Yields: An iterable of ids (bytes) of contents that are in the current object storage. """ ... # Streaming methods def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): """Retrieve the content of a given object as a chunked iterator. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ ... def __iter__(self): ... def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): """Generates known object ids. Args: last_obj_id (bytes): object id from which to iterate from (excluded). limit (int): max number of object ids to generate. Generates: obj_id (bytes): object ids. """ ... diff --git a/swh/objstorage/multiplexer/multiplexer_objstorage.py b/swh/objstorage/multiplexer/multiplexer_objstorage.py index dd6188b..ae86e06 100644 --- a/swh/objstorage/multiplexer/multiplexer_objstorage.py +++ b/swh/objstorage/multiplexer/multiplexer_objstorage.py @@ -1,322 +1,323 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import queue import random import threading +from typing import Dict from swh.objstorage.exc import ObjNotFoundError from swh.objstorage.objstorage import ObjStorage class ObjStorageThread(threading.Thread): def __init__(self, storage): super().__init__(daemon=True) self.storage = storage self.commands = queue.Queue() def run(self): while True: try: mailbox, command, args, kwargs = self.commands.get(True, 0.05) except queue.Empty: continue try: ret = getattr(self.storage, command)(*args, **kwargs) except Exception as exc: self.queue_result(mailbox, "exception", exc) else: self.queue_result(mailbox, "result", ret) def queue_command(self, command, *args, mailbox=None, **kwargs): """Enqueue a new command to be processed by the thread. Args: command (str): one of the method names for the underlying storage. mailbox (queue.Queue): explicit mailbox if the calling thread wants to override it. args, kwargs: arguments for the command. Returns: queue.Queue: The mailbox you can read the response from """ if not mailbox: mailbox = queue.Queue() self.commands.put((mailbox, command, args, kwargs)) return mailbox def queue_result(self, mailbox, result_type, result): """Enqueue a new result in the mailbox This also provides a reference to the storage, which can be useful when an exceptional condition arises. Args: mailbox (queue.Queue): the mailbox to which we need to enqueue the result result_type (str): one of 'result', 'exception' result: the result to pass back to the calling thread """ mailbox.put( {"type": result_type, "result": result,} ) @staticmethod def get_result_from_mailbox(mailbox, *args, **kwargs): """Unpack the result from the mailbox. Args: mailbox (queue.Queue): A mailbox to unpack a result from args: positional arguments to :func:`mailbox.get` kwargs: keyword arguments to :func:`mailbox.get` Returns: the next result unpacked from the queue Raises: either the exception we got back from the underlying storage, or :exc:`queue.Empty` if :func:`mailbox.get` raises that. """ result = mailbox.get(*args, **kwargs) if result["type"] == "exception": raise result["result"] from None else: return result["result"] @staticmethod def collect_results(mailbox, num_results): """Collect num_results from the mailbox""" collected = 0 ret = [] while collected < num_results: try: ret.append( ObjStorageThread.get_result_from_mailbox(mailbox, True, 0.05) ) except queue.Empty: continue collected += 1 return ret def __getattr__(self, attr): def call(*args, **kwargs): mailbox = self.queue_command(attr, *args, **kwargs) return self.get_result_from_mailbox(mailbox) return call def __contains__(self, *args, **kwargs): mailbox = self.queue_command("__contains__", *args, **kwargs) return self.get_result_from_mailbox(mailbox) class MultiplexerObjStorage(ObjStorage): """Implementation of ObjStorage that distributes between multiple storages. The multiplexer object storage allows an input to be demultiplexed among multiple storages that will or will not accept it by themselves (see .filter package). As the ids can be different, no pre-computed ids should be submitted. Also, there are no guarantees that the returned ids can be used directly into the storages that the multiplexer manage. Use case examples follow. Example 1:: storage_v1 = filter.read_only(PathSlicingObjStorage('/dir1', '0:2/2:4/4:6')) storage_v2 = PathSlicingObjStorage('/dir2', '0:1/0:5') storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using 'storage', all the new contents will only be added to the v2 storage, while it will be retrievable from both. Example 2:: storage_v1 = filter.id_regex( PathSlicingObjStorage('/dir1', '0:2/2:4/4:6'), r'[^012].*' ) storage_v2 = filter.if_regex( PathSlicingObjStorage('/dir2', '0:1/0:5'), r'[012]/*' ) storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using this storage, the contents with a sha1 starting with 0, 1 or 2 will be redirected (read AND write) to the storage_v2, while the others will be redirected to the storage_v1. If a content starting with 0, 1 or 2 is present in the storage_v1, it would be ignored anyway. """ def __init__(self, storages, **kwargs): super().__init__(**kwargs) self.storages = storages self.storage_threads = [ObjStorageThread(storage) for storage in storages] for thread in self.storage_threads: thread.start() def wrap_call(self, threads, call, *args, **kwargs): threads = list(threads) mailbox = queue.Queue() for thread in threads: thread.queue_command(call, *args, mailbox=mailbox, **kwargs) return ObjStorageThread.collect_results(mailbox, len(threads)) def get_read_threads(self, obj_id=None): yield from self.storage_threads def get_write_threads(self, obj_id=None): yield from self.storage_threads def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ return all( self.wrap_call( self.storage_threads, "check_config", check_write=check_write ) ) def __contains__(self, obj_id): """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True if and only if the object is present in the current object storage. """ for storage in self.get_read_threads(obj_id): if obj_id in storage: return True return False def __iter__(self): def obj_iterator(): for storage in self.storages: yield from storage return obj_iterator() def add(self, content, obj_id=None, check_presence=True): """ Add a new object to the object storage. If the adding step works in all the storages that accept this content, this is a success. Otherwise, the full adding step is an error even if it succeed in some of the storages. Args: content: content of the object to be added to the storage. obj_id: checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence: indicate if the presence of the content should be verified before adding the file. Returns: an id of the object into the storage. As the write-storages are always readable as well, any id will be valid to retrieve a content. """ results = self.wrap_call( self.get_write_threads(obj_id), "add", content, obj_id=obj_id, check_presence=check_presence, ) for result in results: if not result: continue return result - def add_batch(self, contents, check_presence=True): + def add_batch(self, contents, check_presence=True) -> Dict: """Add a batch of new objects to the object storage. """ write_threads = list(self.get_write_threads()) results = self.wrap_call( write_threads, "add_batch", contents, check_presence=check_presence, ) summed = {"object:add": 0, "object:add:bytes": 0} for result in results: summed["object:add"] += result["object:add"] summed["object:add:bytes"] += result["object:add:bytes"] return { "object:add": summed["object:add"] // len(results), "object:add:bytes": summed["object:add:bytes"] // len(results), } def restore(self, content, obj_id=None): return self.wrap_call( self.get_write_threads(obj_id), "restore", content, obj_id=obj_id, ).pop() def get(self, obj_id): for storage in self.get_read_threads(obj_id): try: return storage.get(obj_id) except ObjNotFoundError: continue # If no storage contains this content, raise the error raise ObjNotFoundError(obj_id) def check(self, obj_id): nb_present = 0 for storage in self.get_read_threads(obj_id): try: storage.check(obj_id) except ObjNotFoundError: continue else: nb_present += 1 # If there is an Error because of a corrupted file, then let it pass. # Raise the ObjNotFoundError only if the content couldn't be found in # all the storages. if nb_present == 0: raise ObjNotFoundError(obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission return all(self.wrap_call(self.get_write_threads(obj_id), "delete", obj_id)) def get_random(self, batch_size): storages_set = [storage for storage in self.storages if len(storage) > 0] if len(storages_set) <= 0: return [] while storages_set: storage = random.choice(storages_set) try: return storage.get_random(batch_size) except NotImplementedError: storages_set.remove(storage) # There is no storage that allow the get_random operation raise NotImplementedError( "There is no storage implementation into the multiplexer that " "support the 'get_random' operation" ) diff --git a/swh/objstorage/multiplexer/striping_objstorage.py b/swh/objstorage/multiplexer/striping_objstorage.py index c797607..67e9538 100644 --- a/swh/objstorage/multiplexer/striping_objstorage.py +++ b/swh/objstorage/multiplexer/striping_objstorage.py @@ -1,74 +1,75 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict import queue +from typing import Dict from swh.objstorage.multiplexer.multiplexer_objstorage import ( MultiplexerObjStorage, ObjStorageThread, ) class StripingObjStorage(MultiplexerObjStorage): """Stripes objects across multiple objstorages This objstorage implementation will write objects to objstorages in a predictable way: it takes the modulo of the last 8 bytes of the object identifier with the number of object storages passed, which will yield an (almost) even distribution. Objects are read from all storages in turn until it succeeds. """ MOD_BYTES = 8 def __init__(self, storages, **kwargs): super().__init__(storages, **kwargs) self.num_storages = len(storages) def get_storage_index(self, obj_id): if obj_id is None: raise ValueError("StripingObjStorage always needs obj_id to be set") index = int.from_bytes(obj_id[: -self.MOD_BYTES], "big") return index % self.num_storages def get_write_threads(self, obj_id): idx = self.get_storage_index(obj_id) yield self.storage_threads[idx] def get_read_threads(self, obj_id=None): if obj_id: idx = self.get_storage_index(obj_id) else: idx = 0 for i in range(self.num_storages): yield self.storage_threads[(idx + i) % self.num_storages] - def add_batch(self, contents, check_presence=True): + def add_batch(self, contents, check_presence=True) -> Dict: """Add a batch of new objects to the object storage. """ - content_by_storage_index = defaultdict(dict) + content_by_storage_index: Dict[bytes, Dict] = defaultdict(dict) for obj_id, content in contents.items(): storage_index = self.get_storage_index(obj_id) content_by_storage_index[storage_index][obj_id] = content - mailbox = queue.Queue() + mailbox: queue.Queue[Dict] = queue.Queue() for storage_index, contents in content_by_storage_index.items(): self.storage_threads[storage_index].queue_command( "add_batch", contents, check_presence=check_presence, mailbox=mailbox, ) results = ObjStorageThread.collect_results( mailbox, len(content_by_storage_index) ) summed = {"object:add": 0, "object:add:bytes": 0} for result in results: summed["object:add"] += result["object:add"] summed["object:add:bytes"] += result["object:add:bytes"] return summed diff --git a/swh/objstorage/objstorage.py b/swh/objstorage/objstorage.py index e01df2d..2a39306 100644 --- a/swh/objstorage/objstorage.py +++ b/swh/objstorage/objstorage.py @@ -1,144 +1,144 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import bz2 from itertools import dropwhile, islice import lzma from typing import Dict import zlib from swh.model import hashutil from .exc import ObjNotFoundError ID_HASH_ALGO = "sha1" ID_HASH_LENGTH = 40 # Size in bytes of the hash hexadecimal representation. DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024 # Size in bytes of the streaming chunks DEFAULT_LIMIT = 10000 def compute_hash(content): """Compute the content's hash. Args: content (bytes): The raw content to hash hash_name (str): Hash's name (default to ID_HASH_ALGO) Returns: The ID_HASH_ALGO for the content """ return ( hashutil.MultiHash.from_data(content, hash_names=[ID_HASH_ALGO],) .digest() .get(ID_HASH_ALGO) ) class NullCompressor: def compress(self, data): return data def flush(self): return b"" class NullDecompressor: def decompress(self, data): return data @property def unused_data(self): return b"" decompressors = { "bz2": bz2.BZ2Decompressor, "lzma": lzma.LZMADecompressor, "gzip": lambda: zlib.decompressobj(wbits=31), "zlib": zlib.decompressobj, "none": NullDecompressor, } compressors = { "bz2": bz2.BZ2Compressor, "lzma": lzma.LZMACompressor, "gzip": lambda: zlib.compressobj(wbits=31), "zlib": zlib.compressobj, "none": NullCompressor, } class ObjStorage(metaclass=abc.ABCMeta): def __init__(self, *, allow_delete=False, **kwargs): # A more complete permission system could be used in place of that if # it becomes needed self.allow_delete = allow_delete @abc.abstractmethod def check_config(self, *, check_write): pass @abc.abstractmethod - def __contains__(self, obj_id, *args, **kwargs): + def __contains__(self, obj_id): pass @abc.abstractmethod - def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): + def add(self, content, obj_id=None, check_presence=True): pass def add_batch(self, contents, check_presence=True) -> Dict: summary = {"object:add": 0, "object:add:bytes": 0} for obj_id, content in contents.items(): if check_presence and obj_id in self: continue self.add(content, obj_id, check_presence=False) summary["object:add"] += 1 summary["object:add:bytes"] += len(content) return summary - def restore(self, content, obj_id=None, *args, **kwargs): + def restore(self, content, obj_id=None): # check_presence to false will erase the potential previous content. return self.add(content, obj_id, check_presence=False) @abc.abstractmethod - def get(self, obj_id, *args, **kwargs): + def get(self, obj_id): pass - def get_batch(self, obj_ids, *args, **kwargs): + def get_batch(self, obj_ids): for obj_id in obj_ids: try: yield self.get(obj_id) except ObjNotFoundError: yield None @abc.abstractmethod - def check(self, obj_id, *args, **kwargs): + def check(self, obj_id): pass @abc.abstractmethod - def delete(self, obj_id, *args, **kwargs): + def delete(self, obj_id): if not self.allow_delete: raise PermissionError("Delete is not allowed.") # Management methods - def get_random(self, batch_size, *args, **kwargs): + def get_random(self, batch_size): pass # Streaming methods def add_stream(self, content_iter, obj_id, check_presence=True): raise NotImplementedError def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): raise NotImplementedError def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): it = iter(self) if last_obj_id: it = dropwhile(lambda x: x <= last_obj_id, it) return islice(it, limit) diff --git a/swh/objstorage/tests/objstorage_testing.py b/swh/objstorage/tests/objstorage_testing.py index 9977a25..85fcd47 100644 --- a/swh/objstorage/tests/objstorage_testing.py +++ b/swh/objstorage/tests/objstorage_testing.py @@ -1,232 +1,264 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections.abc import Iterator +import inspect import time from swh.objstorage import exc +from swh.objstorage.interface import ObjStorageInterface from swh.objstorage.objstorage import compute_hash class ObjStorageTestFixture: + def test_types(self): + """Checks all methods of ObjStorageInterface are implemented by this + backend, and that they have the same signature.""" + # Create an instance of the protocol (which cannot be instantiated + # directly, so this creates a subclass, then instantiates it) + interface = type("_", (ObjStorageInterface,), {})() + + assert "get_batch" in dir(interface) + + missing_methods = [] + + for meth_name in dir(interface): + if meth_name.startswith("_"): + continue + interface_meth = getattr(interface, meth_name) + concrete_meth = getattr(self.storage, meth_name) + + expected_signature = inspect.signature(interface_meth) + actual_signature = inspect.signature(concrete_meth) + + assert expected_signature == actual_signature, meth_name + + assert missing_methods == [] + + # If all the assertions above succeed, then this one should too. + # But there's no harm in double-checking. + # And we could replace the assertions above by this one, but unlike + # the assertions above, it doesn't explain what is missing. + assert isinstance(self.storage, ObjStorageInterface) + def hash_content(self, content): obj_id = compute_hash(content) return content, obj_id def assertContentMatch(self, obj_id, expected_content): # noqa content = self.storage.get(obj_id) self.assertEqual(content, expected_content) def test_check_config(self): self.assertTrue(self.storage.check_config(check_write=False)) self.assertTrue(self.storage.check_config(check_write=True)) def test_contains(self): content_p, obj_id_p = self.hash_content(b"contains_present") content_m, obj_id_m = self.hash_content(b"contains_missing") self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) def test_add_get_w_id(self): content, obj_id = self.hash_content(b"add_get_w_id") r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) def test_add_twice(self): content, obj_id = self.hash_content(b"add_twice") r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) r = self.storage.add(content, obj_id=obj_id, check_presence=False) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) def test_add_big(self): content, obj_id = self.hash_content(b"add_big" * 1024 * 1024) r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) def test_add_get_wo_id(self): content, obj_id = self.hash_content(b"add_get_wo_id") r = self.storage.add(content) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) def test_add_get_batch(self): content1, obj_id1 = self.hash_content(b"add_get_batch_1") content2, obj_id2 = self.hash_content(b"add_get_batch_2") self.storage.add(content1, obj_id1) self.storage.add(content2, obj_id2) cr1, cr2 = self.storage.get_batch([obj_id1, obj_id2]) self.assertEqual(cr1, content1) self.assertEqual(cr2, content2) def test_get_batch_unexisting_content(self): content, obj_id = self.hash_content(b"get_batch_unexisting_content") result = list(self.storage.get_batch([obj_id])) self.assertTrue(len(result) == 1) self.assertIsNone(result[0]) def test_restore_content(self): self.storage.allow_delete = True valid_content, valid_obj_id = self.hash_content(b"restore_content") invalid_content = b"unexpected content" id_adding = self.storage.add(invalid_content, valid_obj_id) self.assertEqual(id_adding, valid_obj_id) with self.assertRaises(exc.Error): self.storage.check(id_adding) id_restore = self.storage.restore(valid_content, valid_obj_id) self.assertEqual(id_restore, valid_obj_id) self.assertContentMatch(valid_obj_id, valid_content) def test_get_missing(self): content, obj_id = self.hash_content(b"get_missing") with self.assertRaises(exc.ObjNotFoundError) as e: self.storage.get(obj_id) self.assertIn(obj_id, e.exception.args) def test_check_missing(self): content, obj_id = self.hash_content(b"check_missing") with self.assertRaises(exc.Error): self.storage.check(obj_id) def test_check_present(self): content, obj_id = self.hash_content(b"check_present") self.storage.add(content, obj_id) try: self.storage.check(obj_id) except exc.Error: self.fail("Integrity check failed") def test_delete_missing(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b"missing_content_to_delete") with self.assertRaises(exc.Error): self.storage.delete(obj_id) def test_delete_present(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) self.assertTrue(self.storage.delete(obj_id)) with self.assertRaises(exc.Error): self.storage.get(obj_id) def test_delete_not_allowed(self): self.storage.allow_delete = False content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.storage.delete(obj_id) def test_delete_not_allowed_by_default(self): content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.assertTrue(self.storage.delete(obj_id)) def test_add_stream(self): content = [b"chunk1", b"chunk2"] _, obj_id = self.hash_content(b"".join(content)) try: self.storage.add_stream(iter(content), obj_id=obj_id) except NotImplementedError: return self.assertContentMatch(obj_id, b"".join(content)) def test_add_stream_sleep(self): def gen_content(): yield b"chunk1" time.sleep(0.5) yield b"chunk42" _, obj_id = self.hash_content(b"placeholder_id") try: self.storage.add_stream(gen_content(), obj_id=obj_id) except NotImplementedError: return self.assertContentMatch(obj_id, b"chunk1chunk42") def test_get_stream(self): content = b"123456789" _, obj_id = self.hash_content(content) self.storage.add(content, obj_id=obj_id) r = self.storage.get(obj_id) self.assertEqual(r, content) try: r = self.storage.get_stream(obj_id, chunk_size=1) except NotImplementedError: return self.assertTrue(isinstance(r, Iterator)) r = list(r) self.assertEqual(b"".join(r), content) def test_add_batch(self): contents = {} expected_content_add = 0 expected_content_add_bytes = 0 for i in range(50): content = b"Test content %02d" % i content, obj_id = self.hash_content(content) contents[obj_id] = content expected_content_add_bytes += len(content) expected_content_add += 1 ret = self.storage.add_batch(contents) self.assertEqual( ret, { "object:add": expected_content_add, "object:add:bytes": expected_content_add_bytes, }, ) for obj_id in contents: self.assertIn(obj_id, self.storage) def test_content_iterator(self): sto_obj_ids = iter(self.storage) sto_obj_ids = list(sto_obj_ids) self.assertFalse(sto_obj_ids) obj_ids = set() for i in range(100): content, obj_id = self.hash_content(b"content %d" % i) self.storage.add(content, obj_id=obj_id) obj_ids.add(obj_id) sto_obj_ids = set(self.storage) self.assertEqual(sto_obj_ids, obj_ids) def test_list_content(self): all_ids = [] for i in range(1200): content = b"example %d" % i obj_id = compute_hash(content) self.storage.add(content, obj_id) all_ids.append(obj_id) all_ids.sort() ids = list(self.storage.list_content()) self.assertEqual(len(ids), 1200) self.assertEqual(ids[0], all_ids[0]) self.assertEqual(ids[100], all_ids[100]) self.assertEqual(ids[999], all_ids[999]) ids = list(self.storage.list_content(limit=10)) self.assertEqual(len(ids), 10) self.assertEqual(ids[0], all_ids[0]) self.assertEqual(ids[9], all_ids[9]) ids = list(self.storage.list_content(last_obj_id=all_ids[999], limit=100)) self.assertEqual(len(ids), 100) self.assertEqual(ids[0], all_ids[1000]) self.assertEqual(ids[9], all_ids[1009])