diff --git a/swh/objstorage/api/client.py b/swh/objstorage/api/client.py index f8fe8d6..d0116ae 100644 --- a/swh/objstorage/api/client.py +++ b/swh/objstorage/api/client.py @@ -1,72 +1,78 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.core.api import SWHRemoteAPI from swh.model import hashutil from ..objstorage import ObjStorage, DEFAULT_CHUNK_SIZE from ..exc import ObjNotFoundError, ObjStorageAPIError class RemoteObjStorage(ObjStorage, SWHRemoteAPI): """Proxy to a remote object storage. This class allows to connect to an object storage server via http protocol. Attributes: url (string): The url of the server to connect. Must end with a '/' session: The session to send requests. """ def __init__(self, url, **kwargs): super().__init__(api_exception=ObjStorageAPIError, url=url, **kwargs) def check_config(self, *, check_write): return self.post('check_config', {'check_write': check_write}) def __contains__(self, obj_id): return self.post('content/contains', {'obj_id': obj_id}) def add(self, content, obj_id=None, check_presence=True): return self.post('content/add', {'content': content, 'obj_id': obj_id, 'check_presence': check_presence}) + def add_batch(self, contents, check_presence=True): + return self.post('content/add/batch', { + 'contents': contents, + 'check_presence': check_presence, + }) + def get(self, obj_id): ret = self.post('content/get', {'obj_id': obj_id}) if ret is None: raise ObjNotFoundError(obj_id) else: return ret def get_batch(self, obj_ids): return self.post('content/get/batch', {'obj_ids': obj_ids}) def check(self, obj_id): return self.post('content/check', {'obj_id': obj_id}) def delete(self, obj_id): super().delete(obj_id) # Check delete permission return self.post('content/delete', {'obj_id': obj_id}) # Management methods def get_random(self, batch_size): return self.post('content/get/random', {'batch_size': batch_size}) # Streaming methods def add_stream(self, content_iter, obj_id, check_presence=True): obj_id = hashutil.hash_to_hex(obj_id) return self.post_stream('content/add_stream/{}'.format(obj_id), params={'check_presence': check_presence}, data=content_iter) def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): obj_id = hashutil.hash_to_hex(obj_id) return super().get_stream('content/get_stream/{}'.format(obj_id), chunk_size=chunk_size) diff --git a/swh/objstorage/api/server.py b/swh/objstorage/api/server.py index 8108e64..eedbca2 100644 --- a/swh/objstorage/api/server.py +++ b/swh/objstorage/api/server.py @@ -1,174 +1,181 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import asyncio import aiohttp.web import click from swh.core import config from swh.core.api_async import (SWHRemoteAPI, decode_request, encode_data_server as encode_data) from swh.model import hashutil from swh.objstorage import get_objstorage from swh.objstorage.exc import ObjNotFoundError DEFAULT_CONFIG_PATH = 'objstorage/server' DEFAULT_CONFIG = { 'cls': ('str', 'pathslicing'), 'args': ('dict', { 'root': '/srv/softwareheritage/objects', 'slicing': '0:2/2:4/4:6', }), 'client_max_size': ('int', 1024 * 1024 * 1024), } @asyncio.coroutine def index(request): return aiohttp.web.Response(body="SWH Objstorage API server") @asyncio.coroutine def check_config(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].check_config(**req)) @asyncio.coroutine def contains(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].__contains__(**req)) @asyncio.coroutine def add_bytes(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].add(**req)) +@asyncio.coroutine +def add_batch(request): + req = yield from decode_request(request) + return encode_data(request.app['objstorage'].add_batch(**req)) + + @asyncio.coroutine def get_bytes(request): req = yield from decode_request(request) try: ret = request.app['objstorage'].get(**req) except ObjNotFoundError: ret = { 'error': 'object_not_found', 'request': req, } return encode_data(ret, status=404) else: return encode_data(ret) @asyncio.coroutine def get_batch(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].get_batch(**req)) @asyncio.coroutine def check(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].check(**req)) @asyncio.coroutine def delete(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].delete(**req)) # Management methods @asyncio.coroutine def get_random_contents(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].get_random(**req)) # Streaming methods @asyncio.coroutine def add_stream(request): hex_id = request.match_info['hex_id'] obj_id = hashutil.hash_to_bytes(hex_id) check_pres = (request.query.get('check_presence', '').lower() == 'true') objstorage = request.app['objstorage'] if check_pres and obj_id in objstorage: return encode_data(obj_id) with objstorage.chunk_writer(obj_id) as write: # XXX (3.5): use 'async for chunk in request.content.iter_any()' while not request.content.at_eof(): chunk = yield from request.content.readany() write(chunk) return encode_data(obj_id) @asyncio.coroutine def get_stream(request): hex_id = request.match_info['hex_id'] obj_id = hashutil.hash_to_bytes(hex_id) response = aiohttp.web.StreamResponse() yield from response.prepare(request) for chunk in request.app['objstorage'].get_stream(obj_id, 2 << 20): response.write(chunk) yield from response.drain() return response @asyncio.coroutine def set_app_config(app): if app['config']: cfg = app['config'] else: cfg = config.load_named_config(DEFAULT_CONFIG_PATH, DEFAULT_CONFIG) if 'client_max_size' in cfg: app._client_max_size = cfg.pop('client_max_size') app.update(cfg) @asyncio.coroutine def create_objstorage(app): app['objstorage'] = get_objstorage(app['cls'], app['args']) app = SWHRemoteAPI() app['config'] = None app.router.add_route('GET', '/', index) app.router.add_route('POST', '/check_config', check_config) app.router.add_route('POST', '/content/contains', contains) app.router.add_route('POST', '/content/add', add_bytes) +app.router.add_route('POST', '/content/add/batch', add_batch) app.router.add_route('POST', '/content/get', get_bytes) app.router.add_route('POST', '/content/get/batch', get_batch) app.router.add_route('POST', '/content/get/random', get_random_contents) app.router.add_route('POST', '/content/check', check) app.router.add_route('POST', '/content/delete', delete) app.router.add_route('POST', '/content/add_stream/{hex_id}', add_stream) app.router.add_route('GET', '/content/get_stream/{hex_id}', get_stream) app.on_startup.append(set_app_config) app.on_startup.append(create_objstorage) @click.command() @click.argument('config-path', required=1) @click.option('--host', default='0.0.0.0', help="Host to run the server") @click.option('--port', default=5003, type=click.INT, help="Binding port of the server") @click.option('--debug/--nodebug', default=True, help="Indicates if the server should run in debug mode") def launch(config_path, host, port, debug): cfg = config.load_named_config(config_path, DEFAULT_CONFIG) app['config'] = cfg app.update(debug=bool(debug)) aiohttp.web.run_app(app, host=host, port=int(port)) if __name__ == '__main__': launch() diff --git a/swh/objstorage/multiplexer/multiplexer_objstorage.py b/swh/objstorage/multiplexer/multiplexer_objstorage.py index 8492088..65b00e4 100644 --- a/swh/objstorage/multiplexer/multiplexer_objstorage.py +++ b/swh/objstorage/multiplexer/multiplexer_objstorage.py @@ -1,289 +1,299 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import queue import random import threading from ..objstorage import ObjStorage from ..exc import ObjNotFoundError class ObjStorageThread(threading.Thread): def __init__(self, storage): super().__init__(daemon=True) self.storage = storage self.commands = queue.Queue() def run(self): while True: try: mailbox, command, args, kwargs = self.commands.get(True, 0.05) except queue.Empty: continue try: ret = getattr(self.storage, command)(*args, **kwargs) except Exception as exc: self.queue_result(mailbox, 'exception', exc) else: self.queue_result(mailbox, 'result', ret) def queue_command(self, command, *args, mailbox=None, **kwargs): """Enqueue a new command to be processed by the thread. Args: command (str): one of the method names for the underlying storage. mailbox (queue.Queue): explicit mailbox if the calling thread wants to override it. args, kwargs: arguments for the command. Returns: queue.Queue The mailbox you can read the response from """ if not mailbox: mailbox = queue.Queue() self.commands.put((mailbox, command, args, kwargs)) return mailbox def queue_result(self, mailbox, result_type, result): """Enqueue a new result in the mailbox This also provides a reference to the storage, which can be useful when an exceptional condition arises. Args: mailbox (queue.Queue): the mailbox to which we need to enqueue the result result_type (str): one of 'result', 'exception' result: the result to pass back to the calling thread """ mailbox.put({ 'type': result_type, 'result': result, }) @staticmethod def get_result_from_mailbox(mailbox, *args, **kwargs): """Unpack the result from the mailbox. Arguments: mailbox (queue.Queue): A mailbox to unpack a result from args, kwargs: arguments to :func:`mailbox.get` Returns: the next result unpacked from the queue Raises: either the exception we got back from the underlying storage, or :exc:`queue.Empty` if :func:`mailbox.get` raises that. """ result = mailbox.get(*args, **kwargs) if result['type'] == 'exception': raise result['result'] from None else: return result['result'] @staticmethod def collect_results(mailbox, num_results): """Collect num_results from the mailbox""" collected = 0 ret = [] while collected < num_results: try: ret.append(ObjStorageThread.get_result_from_mailbox( mailbox, True, 0.05 )) except queue.Empty: continue collected += 1 return ret def __getattr__(self, attr): def call(*args, **kwargs): mailbox = self.queue_command(attr, *args, **kwargs) return self.get_result_from_mailbox(mailbox) return call def __contains__(self, *args, **kwargs): mailbox = self.queue_command('__contains__', *args, **kwargs) return self.get_result_from_mailbox(mailbox) class MultiplexerObjStorage(ObjStorage): """Implementation of ObjStorage that distributes between multiple storages. The multiplexer object storage allows an input to be demultiplexed among multiple storages that will or will not accept it by themselves (see .filter package). As the ids can be differents, no pre-computed ids should be submitted. Also, there are no guarantees that the returned ids can be used directly into the storages that the multiplexer manage. Use case examples follow. Example 1:: storage_v1 = filter.read_only(PathSlicingObjStorage('/dir1', '0:2/2:4/4:6')) storage_v2 = PathSlicingObjStorage('/dir2', '0:1/0:5') storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using 'storage', all the new contents will only be added to the v2 storage, while it will be retrievable from both. Example 2:: storage_v1 = filter.id_regex( PathSlicingObjStorage('/dir1', '0:2/2:4/4:6'), r'[^012].*' ) storage_v2 = filter.if_regex( PathSlicingObjStorage('/dir2', '0:1/0:5'), r'[012]/*' ) storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using this storage, the contents with a sha1 starting with 0, 1 or 2 will be redirected (read AND write) to the storage_v2, while the others will be redirected to the storage_v1. If a content starting with 0, 1 or 2 is present in the storage_v1, it would be ignored anyway. """ def __init__(self, storages, **kwargs): super().__init__(**kwargs) self.storages = storages self.storage_threads = [ ObjStorageThread(storage) for storage in storages ] for thread in self.storage_threads: thread.start() def wrap_call(self, threads, call, *args, **kwargs): threads = list(threads) mailbox = queue.Queue() for thread in threads: thread.queue_command(call, *args, mailbox=mailbox, **kwargs) return ObjStorageThread.collect_results(mailbox, len(threads)) def get_read_threads(self, obj_id=None): yield from self.storage_threads def get_write_threads(self, obj_id=None): yield from self.storage_threads def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ return all( self.wrap_call(self.storage_threads, 'check_config', check_write=check_write) ) def __contains__(self, obj_id): """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True iff the object is present in the current object storage. """ for storage in self.get_read_threads(obj_id): if obj_id in storage: return True return False def add(self, content, obj_id=None, check_presence=True): """ Add a new object to the object storage. If the adding step works in all the storages that accept this content, this is a success. Otherwise, the full adding step is an error even if it succeed in some of the storages. Args: content: content of the object to be added to the storage. obj_id: checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence: indicate if the presence of the content should be verified before adding the file. Returns: an id of the object into the storage. As the write-storages are always readable as well, any id will be valid to retrieve a content. """ return self.wrap_call( self.get_write_threads(obj_id), 'add', content, obj_id=obj_id, check_presence=check_presence, ).pop() + def add_batch(self, contents, check_presence=True): + """Add a batch of new objects to the object storage. + + """ + write_threads = list(self.get_write_threads()) + return sum(self.wrap_call( + write_threads, 'add_batch', contents, + check_presence=check_presence, + )) // len(write_threads) + def restore(self, content, obj_id=None): return self.wrap_call( self.get_write_threads(obj_id), 'restore', content, obj_id=obj_id, ).pop() def get(self, obj_id): for storage in self.get_read_threads(obj_id): try: return storage.get(obj_id) except ObjNotFoundError: continue # If no storage contains this content, raise the error raise ObjNotFoundError(obj_id) def check(self, obj_id): nb_present = 0 for storage in self.get_read_threads(obj_id): try: storage.check(obj_id) except ObjNotFoundError: continue else: nb_present += 1 # If there is an Error because of a corrupted file, then let it pass. # Raise the ObjNotFoundError only if the content couldn't be found in # all the storages. if nb_present == 0: raise ObjNotFoundError(obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission return all( self.wrap_call(self.get_write_threads(obj_id), 'delete', obj_id) ) def get_random(self, batch_size): storages_set = [storage for storage in self.storages if len(storage) > 0] if len(storages_set) <= 0: return [] while storages_set: storage = random.choice(storages_set) try: return storage.get_random(batch_size) except NotImplementedError: storages_set.remove(storage) # There is no storage that allow the get_random operation raise NotImplementedError( "There is no storage implementation into the multiplexer that " "support the 'get_random' operation" ) diff --git a/swh/objstorage/multiplexer/striping_objstorage.py b/swh/objstorage/multiplexer/striping_objstorage.py index f73799e..b63222d 100644 --- a/swh/objstorage/multiplexer/striping_objstorage.py +++ b/swh/objstorage/multiplexer/striping_objstorage.py @@ -1,45 +1,71 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from .multiplexer_objstorage import MultiplexerObjStorage +from collections import defaultdict +import queue + +from .multiplexer_objstorage import ObjStorageThread, MultiplexerObjStorage class StripingObjStorage(MultiplexerObjStorage): """Stripes objects across multiple objstorages This objstorage implementation will write objects to objstorages in a predictable way: it takes the modulo of the last 8 bytes of the object identifier with the number of object storages passed, which will yield an (almost) even distribution. Objects are read from all storages in turn until it succeeds. """ MOD_BYTES = 8 def __init__(self, storages, **kwargs): super().__init__(storages, **kwargs) self.num_storages = len(storages) def get_storage_index(self, obj_id): if obj_id is None: raise ValueError( 'StripingObjStorage always needs obj_id to be set' ) index = int.from_bytes(obj_id[:-self.MOD_BYTES], 'little') return index % self.num_storages def get_write_threads(self, obj_id): idx = self.get_storage_index(obj_id) yield self.storage_threads[idx] def get_read_threads(self, obj_id=None): if obj_id: idx = self.get_storage_index(obj_id) else: idx = 0 for i in range(self.num_storages): yield self.storage_threads[(idx + i) % self.num_storages] + + def add_batch(self, contents, check_presence=True): + """Add a batch of new objects to the object storage. + + """ + content_by_storage_index = defaultdict(dict) + for obj_id, content in contents.items(): + storage_index = self.get_storage_index(obj_id) + content_by_storage_index[storage_index][obj_id] = content + + mailbox = queue.Queue() + for storage_index, contents in content_by_storage_index.items(): + self.storage_threads[storage_index].queue_command( + 'add_batch', + contents, + check_presence=check_presence, + mailbox=mailbox, + ) + return sum( + ObjStorageThread.collect_results( + mailbox, len(content_by_storage_index) + ) + ) diff --git a/swh/objstorage/objstorage.py b/swh/objstorage/objstorage.py index 9a90254..60ed817 100644 --- a/swh/objstorage/objstorage.py +++ b/swh/objstorage/objstorage.py @@ -1,263 +1,277 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc from swh.model import hashutil from .exc import ObjNotFoundError ID_HASH_ALGO = 'sha1' ID_HASH_LENGTH = 40 # Size in bytes of the hash hexadecimal representation. DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024 # Size in bytes of the streaming chunks def compute_hash(content): return hashutil.hash_data( content, algorithms=[ID_HASH_ALGO] ).get(ID_HASH_ALGO) class ObjStorage(metaclass=abc.ABCMeta): """ High-level API to manipulate the Software Heritage object storage. Conceptually, the object storage offers the following methods: - check_config() check if the object storage is properly configured - __contains__() check if an object is present, by object id - add() add a new object, returning an object id - restore() same as add() but erase an already existed content - get() retrieve the content of an object, by object id - check() check the integrity of an object, by object id - delete() remove an object And some management methods: - get_random() get random object id of existing contents (used for the content integrity checker). Some of the methods have available streaming equivalents: - add_stream() same as add() but with a chunked iterator - restore_stream() same as add_stream() but erase already existing content - get_stream() same as get() but returns a chunked iterator Each implementation of this interface can have a different behavior and its own way to store the contents. """ def __init__(self, *, allow_delete=False, **kwargs): # A more complete permission system could be used in place of that if # it becomes needed super().__init__(**kwargs) self.allow_delete = allow_delete @abc.abstractmethod def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ pass @abc.abstractmethod def __contains__(self, obj_id, *args, **kwargs): """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True iff the object is present in the current object storage. """ pass @abc.abstractmethod def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): """Add a new object to the object storage. Args: content (bytes): object's raw content to add in storage. obj_id (bytes): checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ pass + def add_batch(self, contents, check_presence=True): + """Add a batch of new objects to the object storage. + + Args: + contents (dict): mapping from obj_id to object conetnts + Returns: + the number of objects added to the storage + """ + ctr = 0 + for obj_id, content in contents.items(): + self.add(content, obj_id, check_presence=check_presence) + ctr += 1 + return ctr + def restore(self, content, obj_id=None, *args, **kwargs): """Restore a content that have been corrupted. This function is identical to add but does not check if the object id is already in the file system. The default implementation provided by the current class is suitable for most cases. Args: content (bytes): object's raw content to add in storage obj_id (bytes): checksum of `bytes` as computed by ID_HASH_ALGO. When given, obj_id will be trusted to match bytes. If missing, obj_id will be computed on the fly. """ # check_presence to false will erase the potential previous content. return self.add(content, obj_id, check_presence=False) @abc.abstractmethod def get(self, obj_id, *args, **kwargs): """Retrieve the content of a given object. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ pass def get_batch(self, obj_ids, *args, **kwargs): """Retrieve objects' raw content in bulk from storage. Note: This function does have a default implementation in ObjStorage that is suitable for most cases. For object storages that needs to do the minimal number of requests possible (ex: remote object storages), that method can be overriden to perform a more efficient operation. Args: obj_ids ([bytes]: list of object ids. Returns: list of resulting contents, or None if the content could not be retrieved. Do not raise any exception as a fail for one content will not cancel the whole request. """ for obj_id in obj_ids: try: yield self.get(obj_id) except ObjNotFoundError: yield None @abc.abstractmethod def check(self, obj_id, *args, **kwargs): """Perform an integrity check for a given object. Verify that the file object is in place and that the gziped content matches the object id. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. Error: if the request object is corrupted. """ pass @abc.abstractmethod def delete(self, obj_id, *args, **kwargs): """Delete an object. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. """ if not self.allow_delete: raise PermissionError("Delete is not allowed.") # Management methods def get_random(self, batch_size, *args, **kwargs): """Get random ids of existing contents. This method is used in order to get random ids to perform content integrity verifications on random contents. Args: batch_size (int): Number of ids that will be given Yields: An iterable of ids (bytes) of contents that are in the current object storage. """ pass # Streaming methods def add_stream(self, content_iter, obj_id, check_presence=True): """Add a new object to the object storage using streaming. This function is identical to add() except it takes a generator that yields the chunked content instead of the whole content at once. Args: content (bytes): chunked generator that yields the object's raw content to add in storage. obj_id (bytes): object identifier check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ raise NotImplementedError def restore_stream(self, content_iter, obj_id=None): """Restore a content that have been corrupted using streaming. This function is identical to restore() except it takes a generator that yields the chunked content instead of the whole content at once. The default implementation provided by the current class is suitable for most cases. Args: content (bytes): chunked generator that yields the object's raw content to add in storage. obj_id (bytes): object identifier """ # check_presence to false will erase the potential previous content. return self.add_stream(content_iter, obj_id, check_presence=False) def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): """Retrieve the content of a given object as a chunked iterator. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ raise NotImplementedError diff --git a/swh/objstorage/tests/objstorage_testing.py b/swh/objstorage/tests/objstorage_testing.py index 9c2fadb..bd02a31 100644 --- a/swh/objstorage/tests/objstorage_testing.py +++ b/swh/objstorage/tests/objstorage_testing.py @@ -1,177 +1,190 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import time from nose.tools import istest from swh.model import hashutil from swh.objstorage import exc class ObjStorageTestFixture(): def setUp(self): super().setUp() def hash_content(self, content): obj_id = hashutil.hash_data(content)['sha1'] return content, obj_id def assertContentMatch(self, obj_id, expected_content): # noqa content = self.storage.get(obj_id) self.assertEqual(content, expected_content) @istest def check_config(self): self.assertTrue(self.storage.check_config(check_write=False)) self.assertTrue(self.storage.check_config(check_write=True)) @istest def contains(self): content_p, obj_id_p = self.hash_content(b'contains_present') content_m, obj_id_m = self.hash_content(b'contains_missing') self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) @istest def add_get_w_id(self): content, obj_id = self.hash_content(b'add_get_w_id') r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) @istest def add_big(self): content, obj_id = self.hash_content(b'add_big' * 1024 * 1024) r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) @istest def add_get_wo_id(self): content, obj_id = self.hash_content(b'add_get_wo_id') r = self.storage.add(content) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) @istest def add_get_batch(self): content1, obj_id1 = self.hash_content(b'add_get_batch_1') content2, obj_id2 = self.hash_content(b'add_get_batch_2') self.storage.add(content1, obj_id1) self.storage.add(content2, obj_id2) cr1, cr2 = self.storage.get_batch([obj_id1, obj_id2]) self.assertEqual(cr1, content1) self.assertEqual(cr2, content2) @istest def get_batch_unexisting_content(self): content, obj_id = self.hash_content(b'get_batch_unexisting_content') result = list(self.storage.get_batch([obj_id])) self.assertTrue(len(result) == 1) self.assertIsNone(result[0]) @istest def restore_content(self): valid_content, valid_obj_id = self.hash_content(b'restore_content') invalid_content = b'unexpected content' id_adding = self.storage.add(invalid_content, valid_obj_id) self.assertEqual(id_adding, valid_obj_id) with self.assertRaises(exc.Error): self.storage.check(id_adding) id_restore = self.storage.restore(valid_content, valid_obj_id) self.assertEqual(id_restore, valid_obj_id) self.assertContentMatch(valid_obj_id, valid_content) @istest def get_missing(self): content, obj_id = self.hash_content(b'get_missing') with self.assertRaises(exc.ObjNotFoundError) as e: self.storage.get(obj_id) self.assertIn(obj_id, e.exception.args) @istest def check_missing(self): content, obj_id = self.hash_content(b'check_missing') with self.assertRaises(exc.Error): self.storage.check(obj_id) @istest def check_present(self): content, obj_id = self.hash_content(b'check_present') self.storage.add(content, obj_id) try: self.storage.check(obj_id) except exc.Error: self.fail('Integrity check failed') @istest def delete_missing(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b'missing_content_to_delete') with self.assertRaises(exc.Error): self.storage.delete(obj_id) @istest def delete_present(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b'content_to_delete') self.storage.add(content, obj_id=obj_id) self.assertTrue(self.storage.delete(obj_id)) with self.assertRaises(exc.Error): self.storage.get(obj_id) @istest def delete_not_allowed(self): self.storage.allow_delete = False content, obj_id = self.hash_content(b'content_to_delete') self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.assertTrue(self.storage.delete(obj_id)) @istest def delete_not_allowed_by_default(self): content, obj_id = self.hash_content(b'content_to_delete') self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.assertTrue(self.storage.delete(obj_id)) @istest def add_stream(self): content = [b'chunk1', b'chunk2'] _, obj_id = self.hash_content(b''.join(content)) try: self.storage.add_stream(iter(content), obj_id=obj_id) except NotImplementedError: return self.assertContentMatch(obj_id, b''.join(content)) @istest def add_stream_sleep(self): def gen_content(): yield b'chunk1' time.sleep(0.5) yield b'chunk2' _, obj_id = self.hash_content(b'placeholder_id') try: self.storage.add_stream(gen_content(), obj_id=obj_id) except NotImplementedError: return self.assertContentMatch(obj_id, b'chunk1chunk2') @istest def get_stream(self): content_l = [b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9'] content = b''.join(content_l) _, obj_id = self.hash_content(content) self.storage.add(content, obj_id=obj_id) try: r = list(self.storage.get_stream(obj_id, chunk_size=1)) except NotImplementedError: return self.assertEqual(r, content_l) + + @istest + def add_batch(self): + contents = {} + for i in range(50): + content = b'Test content %02d' % i + content, obj_id = self.hash_content(content) + contents[obj_id] = content + + ret = self.storage.add_batch(contents) + self.assertEqual(len(contents), ret) + for obj_id in contents: + self.assertIn(obj_id, self.storage)