diff --git a/PKG-INFO b/PKG-INFO index 34aede1..35d6d2d 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,10 +1,10 @@ Metadata-Version: 1.0 Name: swh.objstorage -Version: 0.0.24 +Version: 0.0.25 Summary: Software Heritage Object Storage Home-page: https://forge.softwareheritage.org/diffusion/DOBJS Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN diff --git a/bin/swh-objstorage-azure b/bin/swh-objstorage-azure index b059cc3..33a56bd 100755 --- a/bin/swh-objstorage-azure +++ b/bin/swh-objstorage-azure @@ -1,112 +1,112 @@ #!/usr/bin/env python3 # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # NOT FOR PRODUCTION import click -from swh.objstorage import get_objstorage +from swh.objstorage import get_objstorage, exc from swh.core import config, hashutil class AzureAccess(config.SWHConfig): """This is an orchestration class to try and check objstorage_azure implementation.""" DEFAULT_CONFIG = { # Output storage 'storage_azure': ('dict', {'cls': 'pathslicing', 'args': {'root': '/srv/softwareheritage/objects', 'slicing': '0:2/2:4/4:6'}}), # Input storage 'storage_local': ('dict', {'cls': 'pathslicing', 'args': {'root': '/srv/softwareheritage/objects', 'slicing': '0:2/2:4/4:6'}}), } CONFIG_BASE_FILENAME = 'objstorage/azure' def __init__(self): super().__init__() self.config = self.parse_config_file() self.azure_cloud_storage = get_objstorage( **self.config['storage_azure']) self.read_objstorage = get_objstorage( **self.config['storage_local']) def list_contents(self, limit=10): count = 0 for c in self.azure_cloud_storage: count += 1 yield c if count >= limit: return def send_one_content(self, obj_id): obj_content = self.read_objstorage.get(obj_id) self.azure_cloud_storage.add(content=obj_content, obj_id=obj_id) def check_integrity(self, obj_id): self.azure_cloud_storage.check(obj_id) # will raise if problem def check_presence(self, obj_id): return obj_id in self.azure_cloud_storage def download(self, obj_id): return self.azure_cloud_storage.get(obj_id) @click.command() def tryout(): obj_azure = AzureAccess() hex_sample_id = '00000085c856b32f0709a4f5d669bb4faa3a0ce9' sample_id = hashutil.hex_to_hash(hex_sample_id) check_presence = obj_azure.check_presence(sample_id) print('presence first time should be False:', check_presence) obj_azure.send_one_content(sample_id) check_presence = obj_azure.check_presence(sample_id) print('presence True:', check_presence) hex_sample_2 = 'dfeffffeffff17b439f3e582813bd875e7141a0e' sample_2 = hashutil.hex_to_hash(hex_sample_2) check_presence = obj_azure.check_presence(sample_2) print('presence False:', check_presence) print() print('Download a blob') blob_content = obj_azure.download(sample_id) print(blob_content) print() try: not_found_hex_id = hex_sample_id.replace('0', 'f') not_found_id = hashutil.hash_to_hex(not_found_hex_id) obj_azure.download(not_found_id) - except: + except exc.ObjNotFoundError: print('Expected `blob does not exist`!') # print() # print('blobs:') # print(list(obj_azure.list_contents())) # print() # print('content of %s' % hex_sample_id) # print(obj_azure.download(hex_sample_id)) obj_azure.check_integrity(sample_id) if __name__ == '__main__': tryout() diff --git a/debian/control b/debian/control index af997da..c1eb8c3 100644 --- a/debian/control +++ b/debian/control @@ -1,32 +1,34 @@ Source: swh-objstorage Maintainer: Software Heritage developers Section: python Priority: optional Build-Depends: debhelper (>= 9), dh-python (>= 2), python3-aiohttp (>= 2.1.0), python3-all, python3-flask, python3-nose, python3-setuptools, - python3-swh.core (>= 0.0.28~), + python3-swh.core (>= 0.0.37~), python3-swh.model (>= 0.0.14~), python3-click, python3-libcloud, python3-azure-storage, - python3-vcversioner + python3-vcversioner, + python3-rados Standards-Version: 3.9.6 Homepage: https://forge.softwareheritage.org/diffusion/DOBJS/ Package: python3-swh.objstorage Architecture: all -Depends: python3-swh.core (>= 0.0.28~), ${misc:Depends}, ${python3:Depends}, python3-aiohttp (>= 2.1.0) +Depends: python3-swh.core (>= 0.0.37~), ${misc:Depends}, ${python3:Depends}, python3-aiohttp (>= 2.1.0) +Breaks: python3-swh.archiver (<< 0.0.3~) Description: Software Heritage Object Storage Package: python3-swh.objstorage.cloud Architecture: all Depends: python3-swh.objstorage (= ${binary:Version}), python3-libcloud, python3-azure-storage, ${misc:Depends}, ${python3:Depends} Breaks: python3-swh.objstorage (<= 0.0.7~) Description: Software Heritage Cloud Object Storage diff --git a/requirements-swh.txt b/requirements-swh.txt index 464723a..c193587 100644 --- a/requirements-swh.txt +++ b/requirements-swh.txt @@ -1,2 +1,2 @@ -swh.core >= 0.0.28 +swh.core >= 0.0.37 swh.model >= 0.0.14 diff --git a/swh.objstorage.egg-info/PKG-INFO b/swh.objstorage.egg-info/PKG-INFO index 34aede1..35d6d2d 100644 --- a/swh.objstorage.egg-info/PKG-INFO +++ b/swh.objstorage.egg-info/PKG-INFO @@ -1,10 +1,10 @@ Metadata-Version: 1.0 Name: swh.objstorage -Version: 0.0.24 +Version: 0.0.25 Summary: Software Heritage Object Storage Home-page: https://forge.softwareheritage.org/diffusion/DOBJS Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN diff --git a/swh.objstorage.egg-info/SOURCES.txt b/swh.objstorage.egg-info/SOURCES.txt index e05f550..e62dbd6 100644 --- a/swh.objstorage.egg-info/SOURCES.txt +++ b/swh.objstorage.egg-info/SOURCES.txt @@ -1,57 +1,60 @@ .gitignore AUTHORS LICENSE MANIFEST.in Makefile requirements-swh.txt requirements.txt setup.py version.txt bin/swh-objstorage-add-dir bin/swh-objstorage-azure bin/swh-objstorage-fsck debian/changelog debian/compat debian/control debian/copyright debian/rules debian/source/format docs/.gitignore docs/Makefile docs/conf.py docs/index.rst docs/_static/.placeholder docs/_templates/.placeholder swh/__init__.py swh.objstorage.egg-info/PKG-INFO swh.objstorage.egg-info/SOURCES.txt swh.objstorage.egg-info/dependency_links.txt swh.objstorage.egg-info/requires.txt swh.objstorage.egg-info/top_level.txt swh/objstorage/__init__.py swh/objstorage/exc.py swh/objstorage/objstorage.py swh/objstorage/objstorage_in_memory.py swh/objstorage/objstorage_pathslicing.py +swh/objstorage/objstorage_rados.py swh/objstorage/api/__init__.py swh/objstorage/api/client.py swh/objstorage/api/server.py swh/objstorage/cloud/__init__.py swh/objstorage/cloud/objstorage_azure.py swh/objstorage/cloud/objstorage_cloud.py swh/objstorage/multiplexer/__init__.py swh/objstorage/multiplexer/multiplexer_objstorage.py +swh/objstorage/multiplexer/striping_objstorage.py swh/objstorage/multiplexer/filter/__init__.py swh/objstorage/multiplexer/filter/filter.py swh/objstorage/multiplexer/filter/id_filter.py swh/objstorage/multiplexer/filter/read_write_filter.py +swh/objstorage/tests/__init__.py swh/objstorage/tests/objstorage_testing.py -swh/objstorage/tests/server_testing.py swh/objstorage/tests/test_multiplexer_filter.py swh/objstorage/tests/test_objstorage_api.py swh/objstorage/tests/test_objstorage_azure.py swh/objstorage/tests/test_objstorage_cloud.py swh/objstorage/tests/test_objstorage_in_memory.py swh/objstorage/tests/test_objstorage_instantiation.py swh/objstorage/tests/test_objstorage_multiplexer.py -swh/objstorage/tests/test_objstorage_pathslicing.py \ No newline at end of file +swh/objstorage/tests/test_objstorage_pathslicing.py +swh/objstorage/tests/test_objstorage_striping.py \ No newline at end of file diff --git a/swh.objstorage.egg-info/requires.txt b/swh.objstorage.egg-info/requires.txt index f48fb1f..eeba3e8 100644 --- a/swh.objstorage.egg-info/requires.txt +++ b/swh.objstorage.egg-info/requires.txt @@ -1,5 +1,5 @@ aiohttp>=2.1.0 click -swh.core>=0.0.28 +swh.core>=0.0.37 swh.model>=0.0.14 vcversioner diff --git a/swh/objstorage/__init__.py b/swh/objstorage/__init__.py index bc4dd77..fd36149 100644 --- a/swh/objstorage/__init__.py +++ b/swh/objstorage/__init__.py @@ -1,67 +1,86 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from .objstorage import ObjStorage from .objstorage_pathslicing import PathSlicingObjStorage from .objstorage_in_memory import InMemoryObjStorage from .api.client import RemoteObjStorage -from .multiplexer import MultiplexerObjStorage +from .multiplexer import MultiplexerObjStorage, StripingObjStorage from .multiplexer.filter import add_filters __all__ = ['get_objstorage', 'ObjStorage'] _STORAGE_CLASSES = { 'pathslicing': PathSlicingObjStorage, 'remote': RemoteObjStorage, 'in-memory': InMemoryObjStorage, } try: - from swh.objstorage.cloud.objstorage_azure import AzureCloudObjStorage - _STORAGE_CLASSES['azure-storage'] = AzureCloudObjStorage + from swh.objstorage.cloud.objstorage_azure import ( + AzureCloudObjStorage, + PrefixedAzureCloudObjStorage, + ) + _STORAGE_CLASSES['azure'] = AzureCloudObjStorage + _STORAGE_CLASSES['azure-prefixed'] = PrefixedAzureCloudObjStorage +except ImportError: + pass + +try: + from swh.objstorage.objstorage_rados import RADOSObjStorage + _STORAGE_CLASSES['rados'] = RADOSObjStorage except ImportError: pass def get_objstorage(cls, args): """ Create an ObjStorage using the given implementation class. Args: cls (str): objstorage class unique key contained in the _STORAGE_CLASSES dict. args (dict): arguments for the required class of objstorage that must match exactly the one in the `__init__` method of the class. Returns: subclass of ObjStorage that match the given `storage_class` argument. Raises: ValueError: if the given storage class is not a valid objstorage key. """ try: return _STORAGE_CLASSES[cls](**args) except KeyError: raise ValueError('Storage class %s does not exist' % cls) def _construct_filtered_objstorage(storage_conf, filters_conf): return add_filters( get_objstorage(**storage_conf), filters_conf ) _STORAGE_CLASSES['filtered'] = _construct_filtered_objstorage def _construct_multiplexer_objstorage(objstorages): storages = [get_objstorage(**conf) for conf in objstorages] return MultiplexerObjStorage(storages) _STORAGE_CLASSES['multiplexer'] = _construct_multiplexer_objstorage + + +def _construct_striping_objstorage(objstorages): + storages = [get_objstorage(**conf) + for conf in objstorages] + return StripingObjStorage(storages) + + +_STORAGE_CLASSES['striping'] = _construct_striping_objstorage diff --git a/swh/objstorage/api/client.py b/swh/objstorage/api/client.py index f8fe8d6..d0116ae 100644 --- a/swh/objstorage/api/client.py +++ b/swh/objstorage/api/client.py @@ -1,72 +1,78 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.core.api import SWHRemoteAPI from swh.model import hashutil from ..objstorage import ObjStorage, DEFAULT_CHUNK_SIZE from ..exc import ObjNotFoundError, ObjStorageAPIError class RemoteObjStorage(ObjStorage, SWHRemoteAPI): """Proxy to a remote object storage. This class allows to connect to an object storage server via http protocol. Attributes: url (string): The url of the server to connect. Must end with a '/' session: The session to send requests. """ def __init__(self, url, **kwargs): super().__init__(api_exception=ObjStorageAPIError, url=url, **kwargs) def check_config(self, *, check_write): return self.post('check_config', {'check_write': check_write}) def __contains__(self, obj_id): return self.post('content/contains', {'obj_id': obj_id}) def add(self, content, obj_id=None, check_presence=True): return self.post('content/add', {'content': content, 'obj_id': obj_id, 'check_presence': check_presence}) + def add_batch(self, contents, check_presence=True): + return self.post('content/add/batch', { + 'contents': contents, + 'check_presence': check_presence, + }) + def get(self, obj_id): ret = self.post('content/get', {'obj_id': obj_id}) if ret is None: raise ObjNotFoundError(obj_id) else: return ret def get_batch(self, obj_ids): return self.post('content/get/batch', {'obj_ids': obj_ids}) def check(self, obj_id): return self.post('content/check', {'obj_id': obj_id}) def delete(self, obj_id): super().delete(obj_id) # Check delete permission return self.post('content/delete', {'obj_id': obj_id}) # Management methods def get_random(self, batch_size): return self.post('content/get/random', {'batch_size': batch_size}) # Streaming methods def add_stream(self, content_iter, obj_id, check_presence=True): obj_id = hashutil.hash_to_hex(obj_id) return self.post_stream('content/add_stream/{}'.format(obj_id), params={'check_presence': check_presence}, data=content_iter) def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): obj_id = hashutil.hash_to_hex(obj_id) return super().get_stream('content/get_stream/{}'.format(obj_id), chunk_size=chunk_size) diff --git a/swh/objstorage/api/server.py b/swh/objstorage/api/server.py index 41226bc..eedbca2 100644 --- a/swh/objstorage/api/server.py +++ b/swh/objstorage/api/server.py @@ -1,165 +1,181 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import asyncio import aiohttp.web import click from swh.core import config from swh.core.api_async import (SWHRemoteAPI, decode_request, encode_data_server as encode_data) from swh.model import hashutil from swh.objstorage import get_objstorage from swh.objstorage.exc import ObjNotFoundError DEFAULT_CONFIG_PATH = 'objstorage/server' DEFAULT_CONFIG = { 'cls': ('str', 'pathslicing'), 'args': ('dict', { 'root': '/srv/softwareheritage/objects', 'slicing': '0:2/2:4/4:6', }), 'client_max_size': ('int', 1024 * 1024 * 1024), } @asyncio.coroutine def index(request): return aiohttp.web.Response(body="SWH Objstorage API server") @asyncio.coroutine def check_config(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].check_config(**req)) @asyncio.coroutine def contains(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].__contains__(**req)) @asyncio.coroutine def add_bytes(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].add(**req)) +@asyncio.coroutine +def add_batch(request): + req = yield from decode_request(request) + return encode_data(request.app['objstorage'].add_batch(**req)) + + @asyncio.coroutine def get_bytes(request): req = yield from decode_request(request) try: ret = request.app['objstorage'].get(**req) except ObjNotFoundError: ret = { 'error': 'object_not_found', 'request': req, } return encode_data(ret, status=404) else: return encode_data(ret) @asyncio.coroutine def get_batch(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].get_batch(**req)) @asyncio.coroutine def check(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].check(**req)) @asyncio.coroutine def delete(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].delete(**req)) # Management methods @asyncio.coroutine def get_random_contents(request): req = yield from decode_request(request) return encode_data(request.app['objstorage'].get_random(**req)) # Streaming methods @asyncio.coroutine def add_stream(request): hex_id = request.match_info['hex_id'] obj_id = hashutil.hash_to_bytes(hex_id) check_pres = (request.query.get('check_presence', '').lower() == 'true') objstorage = request.app['objstorage'] if check_pres and obj_id in objstorage: return encode_data(obj_id) with objstorage.chunk_writer(obj_id) as write: # XXX (3.5): use 'async for chunk in request.content.iter_any()' while not request.content.at_eof(): chunk = yield from request.content.readany() write(chunk) return encode_data(obj_id) @asyncio.coroutine def get_stream(request): hex_id = request.match_info['hex_id'] obj_id = hashutil.hash_to_bytes(hex_id) response = aiohttp.web.StreamResponse() yield from response.prepare(request) for chunk in request.app['objstorage'].get_stream(obj_id, 2 << 20): response.write(chunk) yield from response.drain() return response -def make_app(config, **kwargs): - if 'client_max_size' in config: - kwargs['client_max_size'] = config['client_max_size'] - - app = SWHRemoteAPI(**kwargs) - app.router.add_route('GET', '/', index) - app.router.add_route('POST', '/check_config', check_config) - app.router.add_route('POST', '/content/contains', contains) - app.router.add_route('POST', '/content/add', add_bytes) - app.router.add_route('POST', '/content/get', get_bytes) - app.router.add_route('POST', '/content/get/batch', get_batch) - app.router.add_route('POST', '/content/get/random', get_random_contents) - app.router.add_route('POST', '/content/check', check) - app.router.add_route('POST', '/content/delete', delete) - app.router.add_route('POST', '/content/add_stream/{hex_id}', add_stream) - app.router.add_route('GET', '/content/get_stream/{hex_id}', get_stream) - app.update(config) +@asyncio.coroutine +def set_app_config(app): + if app['config']: + cfg = app['config'] + else: + cfg = config.load_named_config(DEFAULT_CONFIG_PATH, DEFAULT_CONFIG) + if 'client_max_size' in cfg: + app._client_max_size = cfg.pop('client_max_size') + app.update(cfg) + + +@asyncio.coroutine +def create_objstorage(app): app['objstorage'] = get_objstorage(app['cls'], app['args']) - return app -def make_app_from_configfile(config_path=DEFAULT_CONFIG_PATH, **kwargs): - cfg = config.load_named_config(config_path, DEFAULT_CONFIG) - return make_app(cfg, **kwargs) +app = SWHRemoteAPI() +app['config'] = None +app.router.add_route('GET', '/', index) +app.router.add_route('POST', '/check_config', check_config) +app.router.add_route('POST', '/content/contains', contains) +app.router.add_route('POST', '/content/add', add_bytes) +app.router.add_route('POST', '/content/add/batch', add_batch) +app.router.add_route('POST', '/content/get', get_bytes) +app.router.add_route('POST', '/content/get/batch', get_batch) +app.router.add_route('POST', '/content/get/random', get_random_contents) +app.router.add_route('POST', '/content/check', check) +app.router.add_route('POST', '/content/delete', delete) +app.router.add_route('POST', '/content/add_stream/{hex_id}', add_stream) +app.router.add_route('GET', '/content/get_stream/{hex_id}', get_stream) +app.on_startup.append(set_app_config) +app.on_startup.append(create_objstorage) @click.command() @click.argument('config-path', required=1) @click.option('--host', default='0.0.0.0', help="Host to run the server") @click.option('--port', default=5003, type=click.INT, help="Binding port of the server") @click.option('--debug/--nodebug', default=True, help="Indicates if the server should run in debug mode") def launch(config_path, host, port, debug): - app = make_app_from_configfile(config_path, debug=bool(debug)) + cfg = config.load_named_config(config_path, DEFAULT_CONFIG) + app['config'] = cfg + app.update(debug=bool(debug)) aiohttp.web.run_app(app, host=host, port=int(port)) if __name__ == '__main__': launch() diff --git a/swh/objstorage/cloud/objstorage_azure.py b/swh/objstorage/cloud/objstorage_azure.py index c325638..1f4bdaa 100644 --- a/swh/objstorage/cloud/objstorage_azure.py +++ b/swh/objstorage/cloud/objstorage_azure.py @@ -1,128 +1,207 @@ -# Copyright (C) 2016-2017 The Software Heritage developers +# Copyright (C) 2016-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import gzip +import itertools +import string + +from azure.storage.blob import BlockBlobService +from azure.common import AzureMissingResourceHttpError +import requests from swh.objstorage.objstorage import ObjStorage, compute_hash from swh.objstorage.exc import ObjNotFoundError, Error from swh.model import hashutil -from azure.storage.blob import BlockBlobService -from azure.common import AzureMissingResourceHttpError - class AzureCloudObjStorage(ObjStorage): """ObjStorage with azure abilities. """ def __init__(self, account_name, api_secret_key, container_name, **kwargs): super().__init__(**kwargs) self.block_blob_service = BlockBlobService( account_name=account_name, - account_key=api_secret_key) + account_key=api_secret_key, + request_session=requests.Session(), + ) self.container_name = container_name + def get_blob_service(self, hex_obj_id): + """Get the block_blob_service and container that contains the object with + internal id hex_obj_id + """ + return self.block_blob_service, self.container_name + + def get_all_blob_services(self): + """Get all active block_blob_services""" + yield self.block_blob_service, self.container_name + def _internal_id(self, obj_id): """Internal id is the hex version in objstorage. """ return hashutil.hash_to_hex(obj_id) def check_config(self, *, check_write): """Check the configuration for this object storage""" - props = self.block_blob_service.get_container_properties( - self.container_name - ) + for service, container in self.get_all_blob_services(): + props = service.get_container_properties(container) + + # FIXME: check_write is ignored here + if not props: + return False - # FIXME: check_write is ignored here - return bool(props) + return True def __contains__(self, obj_id): """Does the storage contains the obj_id. """ hex_obj_id = self._internal_id(obj_id) - return self.block_blob_service.exists( - container_name=self.container_name, + service, container = self.get_blob_service(hex_obj_id) + return service.exists( + container_name=container, blob_name=hex_obj_id) def __iter__(self): """Iterate over the objects present in the storage. """ - for obj in self.block_blob_service.list_blobs(self.container_name): - yield hashutil.hash_to_bytes(obj.name) + for service, container in self.get_all_blob_services(): + for obj in service.list_blobs(container): + yield hashutil.hash_to_bytes(obj.name) def __len__(self): """Compute the number of objects in the current object storage. Returns: number of objects contained in the storage. """ return sum(1 for i in self) def add(self, content, obj_id=None, check_presence=True): """Add an obj in storage if it's not there already. """ if obj_id is None: # Checksum is missing, compute it on the fly. obj_id = compute_hash(content) if check_presence and obj_id in self: return obj_id hex_obj_id = self._internal_id(obj_id) # Send the gzipped content - self.block_blob_service.create_blob_from_bytes( - container_name=self.container_name, + service, container = self.get_blob_service(hex_obj_id) + service.create_blob_from_bytes( + container_name=container, blob_name=hex_obj_id, blob=gzip.compress(content)) return obj_id def restore(self, content, obj_id=None): """Restore a content. """ return self.add(content, obj_id, check_presence=False) def get(self, obj_id): """Retrieve blob's content if found. """ hex_obj_id = self._internal_id(obj_id) + service, container = self.get_blob_service(hex_obj_id) try: - blob = self.block_blob_service.get_blob_to_bytes( - container_name=self.container_name, + blob = service.get_blob_to_bytes( + container_name=container, blob_name=hex_obj_id) except AzureMissingResourceHttpError: raise ObjNotFoundError(obj_id) return gzip.decompress(blob.content) def check(self, obj_id): """Check the content integrity. """ obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id): """Delete an object.""" super().delete(obj_id) # Check delete permission hex_obj_id = self._internal_id(obj_id) + service, container = self.get_blob_service(hex_obj_id) try: - self.block_blob_service.delete_blob( - container_name=self.container_name, + service.delete_blob( + container_name=container, blob_name=hex_obj_id) except AzureMissingResourceHttpError: raise ObjNotFoundError('Content {} not found!'.format(hex_obj_id)) return True + + +class PrefixedAzureCloudObjStorage(AzureCloudObjStorage): + """ObjStorage with azure capabilities, striped by prefix. + + accounts is a dict containing entries of the form: + : + account_name: + api_secret_key: + container_name: + """ + def __init__(self, accounts, **kwargs): + # shortcut AzureCloudObjStorage __init__ + ObjStorage.__init__(self, **kwargs) + + # Definition sanity check + prefix_lengths = set(len(prefix) for prefix in accounts) + if not len(prefix_lengths) == 1: + raise ValueError("Inconsistent prefixes, found lengths %s" + % ', '.join( + str(l) for l in sorted(prefix_lengths) + )) + + self.prefix_len = prefix_lengths.pop() + + expected_prefixes = set( + ''.join(letters) + for letters in itertools.product( + set(string.hexdigits.lower()), repeat=self.prefix_len + ) + ) + missing_prefixes = expected_prefixes - set(accounts) + if missing_prefixes: + raise ValueError("Missing prefixes %s" + % ', '.join(sorted(missing_prefixes))) + + self.prefixes = {} + request_session = requests.Session() + for prefix, account in accounts.items(): + self.prefixes[prefix] = ( + BlockBlobService( + account_name=account['account_name'], + account_key=account['api_secret_key'], + request_session=request_session, + ), + account['container_name'], + ) + + def get_blob_service(self, hex_obj_id): + """Get the block_blob_service and container that contains the object with + internal id hex_obj_id + """ + return self.prefixes[hex_obj_id[:self.prefix_len]] + + def get_all_blob_services(self): + """Get all active block_blob_services""" + yield from self.prefixes.values() diff --git a/swh/objstorage/multiplexer/__init__.py b/swh/objstorage/multiplexer/__init__.py index 09f8b18..33f21ff 100644 --- a/swh/objstorage/multiplexer/__init__.py +++ b/swh/objstorage/multiplexer/__init__.py @@ -1,4 +1,5 @@ from .multiplexer_objstorage import MultiplexerObjStorage +from .striping_objstorage import StripingObjStorage -__all__ = ['MultiplexerObjStorage'] +__all__ = ['MultiplexerObjStorage', 'StripingObjStorage'] diff --git a/swh/objstorage/multiplexer/filter/__init__.py b/swh/objstorage/multiplexer/filter/__init__.py index 1d81952..ec9ca16 100644 --- a/swh/objstorage/multiplexer/filter/__init__.py +++ b/swh/objstorage/multiplexer/filter/__init__.py @@ -1,99 +1,99 @@ from .read_write_filter import ReadObjStorageFilter from .id_filter import RegexIdObjStorageFilter, PrefixIdObjStorageFilter _FILTERS_CLASSES = { 'readonly': ReadObjStorageFilter, 'regex': RegexIdObjStorageFilter, 'prefix': PrefixIdObjStorageFilter } _FILTERS_PRIORITY = { 'readonly': 0, 'prefix': 1, 'regex': 2 } def read_only(): return {'type': 'readonly'} def id_prefix(prefix): return {'type': 'prefix', 'prefix': prefix} def id_regex(regex): return {'type': 'regex', 'regex': regex} def _filter_priority(filter_type): """Get the priority of this filter. Priority is a value that indicates if the operation of the filter is time-consuming (smaller values means quick execution), or very likely to be almost always the same value (False being small, and True high). In case the filters are chained, they will be ordered in a way that small priorities (quick execution or instantly break the chain) are executed first. Default value is 1. Value 0 is recommended for storages that change behavior only by disabling some operations (making the method return None). """ return _FILTERS_PRIORITY.get(filter_type, 1) def add_filter(storage, filter_conf): """Add a filter to the given storage. Args: storage (swh.objstorage.ObjStorage): storage which will be filtered. filter_conf (dict): configuration of an ObjStorageFilter, given as - a dictionnary that contains the keys: + a dictionary that contains the keys: - type: which represent the type of filter, one of the keys of _FILTERS_CLASSES - Every arguments that this type of filter requires. Returns: A filtered storage that perform only the valid operations. """ type = filter_conf['type'] args = {k: v for k, v in filter_conf.items() if k != 'type'} filtered_storage = _FILTERS_CLASSES[type](storage=storage, **args) return filtered_storage def add_filters(storage, filter_confs): """ Add multiple filters to the given storage. (See filter.add_filter) Args: storage (swh.objstorage.ObjStorage): storage which will be filtered. filter_confs (list): any number of filter conf, as a dict with: - type: which represent the type of filter, one of the keys of FILTERS. - Every arguments that this type of filter require. Returns: A filtered storage that fulfill the requirement of all the given filters. """ # Reverse sorting in order to put the filter with biggest priority first. filter_confs.sort(key=lambda conf: _filter_priority(conf['type']), reverse=True) # Add the bigest filter to the storage, and reduce it to accumulate filters # on top of it, until the smallest (fastest, see filter.filter_priority) is # added. for filter_conf in filter_confs: storage = add_filter(storage, filter_conf) return storage diff --git a/swh/objstorage/multiplexer/filter/id_filter.py b/swh/objstorage/multiplexer/filter/id_filter.py index c9c2115..9285f9a 100644 --- a/swh/objstorage/multiplexer/filter/id_filter.py +++ b/swh/objstorage/multiplexer/filter/id_filter.py @@ -1,90 +1,90 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import re import abc from swh.model import hashutil from .filter import ObjStorageFilter from ...objstorage import compute_hash from ...exc import ObjNotFoundError class IdObjStorageFilter(ObjStorageFilter, metaclass=abc.ABCMeta): """ Filter that only allow operations if the object id match a requirement. Even for read operations, check before if the id match the requirements. - This may prevent for unnecesary disk access. + This may prevent for unnecessary disk access. """ @abc.abstractmethod def is_valid(self, obj_id): """ Indicates if the given id is valid. """ raise NotImplementedError('Implementations of an IdObjStorageFilter ' 'must have a "is_valid" method') def __contains__(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.__contains__(*args, obj_id=obj_id, **kwargs) return False def __len__(self): return sum(1 for i in [id for id in self.storage if self.is_valid(id)]) def __iter__(self): yield from filter(lambda id: self.is_valid(id), iter(self.storage)) def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): if obj_id is None: obj_id = compute_hash(content) if self.is_valid(obj_id): return self.storage.add(content, *args, obj_id=obj_id, **kwargs) def restore(self, content, obj_id=None, *args, **kwargs): if obj_id is None: obj_id = compute_hash(content) if self.is_valid(obj_id): return self.storage.restore(content, *args, obj_id=obj_id, **kwargs) def get(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.get(*args, obj_id=obj_id, **kwargs) raise ObjNotFoundError(obj_id) def check(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.check(*args, obj_id=obj_id, **kwargs) raise ObjNotFoundError(obj_id) def get_random(self, *args, **kwargs): yield from filter(lambda id: self.is_valid(id), self.storage.get_random(*args, **kwargs)) class RegexIdObjStorageFilter(IdObjStorageFilter): """ Filter that allow operations if the content's id as hex match a regex. """ def __init__(self, storage, regex): super().__init__(storage) self.regex = re.compile(regex) def is_valid(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return self.regex.match(hex_obj_id) is not None class PrefixIdObjStorageFilter(IdObjStorageFilter): """ Filter that allow operations if the hexlified id have a given prefix. """ def __init__(self, storage, prefix): super().__init__(storage) self.prefix = str(prefix) def is_valid(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return str(hex_obj_id).startswith(self.prefix) diff --git a/swh/objstorage/multiplexer/multiplexer_objstorage.py b/swh/objstorage/multiplexer/multiplexer_objstorage.py index b7dab2c..65b00e4 100644 --- a/swh/objstorage/multiplexer/multiplexer_objstorage.py +++ b/swh/objstorage/multiplexer/multiplexer_objstorage.py @@ -1,179 +1,299 @@ -# Copyright (C) 2015-2016 The Software Heritage developers +# Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import queue import random +import threading from ..objstorage import ObjStorage from ..exc import ObjNotFoundError +class ObjStorageThread(threading.Thread): + def __init__(self, storage): + super().__init__(daemon=True) + self.storage = storage + self.commands = queue.Queue() + + def run(self): + while True: + try: + mailbox, command, args, kwargs = self.commands.get(True, 0.05) + except queue.Empty: + continue + + try: + ret = getattr(self.storage, command)(*args, **kwargs) + except Exception as exc: + self.queue_result(mailbox, 'exception', exc) + else: + self.queue_result(mailbox, 'result', ret) + + + def queue_command(self, command, *args, mailbox=None, **kwargs): + """Enqueue a new command to be processed by the thread. + + Args: + command (str): one of the method names for the underlying storage. + mailbox (queue.Queue): explicit mailbox if the calling thread wants + to override it. + args, kwargs: arguments for the command. + Returns: queue.Queue + The mailbox you can read the response from + """ + if not mailbox: + mailbox = queue.Queue() + self.commands.put((mailbox, command, args, kwargs)) + return mailbox + + def queue_result(self, mailbox, result_type, result): + """Enqueue a new result in the mailbox + + This also provides a reference to the storage, which can be useful when + an exceptional condition arises. + + Args: + mailbox (queue.Queue): the mailbox to which we need to enqueue the + result + result_type (str): one of 'result', 'exception' + result: the result to pass back to the calling thread + """ + mailbox.put({ + 'type': result_type, + 'result': result, + }) + + @staticmethod + def get_result_from_mailbox(mailbox, *args, **kwargs): + """Unpack the result from the mailbox. + + Arguments: + mailbox (queue.Queue): A mailbox to unpack a result from + args, kwargs: arguments to :func:`mailbox.get` + + Returns: + the next result unpacked from the queue + Raises: + either the exception we got back from the underlying storage, + or :exc:`queue.Empty` if :func:`mailbox.get` raises that. + """ + + result = mailbox.get(*args, **kwargs) + if result['type'] == 'exception': + raise result['result'] from None + else: + return result['result'] + + @staticmethod + def collect_results(mailbox, num_results): + """Collect num_results from the mailbox""" + collected = 0 + ret = [] + while collected < num_results: + try: + ret.append(ObjStorageThread.get_result_from_mailbox( + mailbox, True, 0.05 + )) + except queue.Empty: + continue + collected += 1 + return ret + + def __getattr__(self, attr): + def call(*args, **kwargs): + mailbox = self.queue_command(attr, *args, **kwargs) + return self.get_result_from_mailbox(mailbox) + return call + + def __contains__(self, *args, **kwargs): + mailbox = self.queue_command('__contains__', *args, **kwargs) + return self.get_result_from_mailbox(mailbox) + + class MultiplexerObjStorage(ObjStorage): """Implementation of ObjStorage that distributes between multiple storages. The multiplexer object storage allows an input to be demultiplexed among multiple storages that will or will not accept it by themselves (see .filter package). As the ids can be differents, no pre-computed ids should be submitted. Also, there are no guarantees that the returned ids can be used directly into the storages that the multiplexer manage. Use case examples follow. Example 1:: storage_v1 = filter.read_only(PathSlicingObjStorage('/dir1', '0:2/2:4/4:6')) storage_v2 = PathSlicingObjStorage('/dir2', '0:1/0:5') storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using 'storage', all the new contents will only be added to the v2 storage, while it will be retrievable from both. Example 2:: storage_v1 = filter.id_regex( PathSlicingObjStorage('/dir1', '0:2/2:4/4:6'), r'[^012].*' ) storage_v2 = filter.if_regex( PathSlicingObjStorage('/dir2', '0:1/0:5'), r'[012]/*' ) storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using this storage, the contents with a sha1 starting with 0, 1 or 2 will be redirected (read AND write) to the storage_v2, while the others will be redirected to the storage_v1. If a content starting with 0, 1 or 2 is present in the storage_v1, it would be ignored anyway. """ def __init__(self, storages, **kwargs): super().__init__(**kwargs) self.storages = storages + self.storage_threads = [ + ObjStorageThread(storage) for storage in storages + ] + for thread in self.storage_threads: + thread.start() - def check_config(self, *, check_write): - return all( - storage.check_config(check_write=check_write) - for storage in self.storages - ) + def wrap_call(self, threads, call, *args, **kwargs): + threads = list(threads) + mailbox = queue.Queue() + for thread in threads: + thread.queue_command(call, *args, mailbox=mailbox, **kwargs) - def __contains__(self, obj_id): - """Check the object storage for proper configuration. + return ObjStorageThread.collect_results(mailbox, len(threads)) - Args: - check_write: check whether writes to the objstorage will succeed - Returns: - True if the storage is properly configured - """ - for storage in self.storages: - if obj_id in storage: - return True - return False + def get_read_threads(self, obj_id=None): + yield from self.storage_threads - def __iter__(self): - """Iterates over the content of each storages + def get_write_threads(self, obj_id=None): + yield from self.storage_threads - Due to the demultiplexer nature, same content can be in multiple - storages and may be yielded multiple times. + def check_config(self, *, check_write): + """Check whether the object storage is properly configured. - Warning: - The ``__iter__`` methods frequently have bad performance. You - almost certainly don't want to use this method in production. + Args: + check_write (bool): if True, check if writes to the object storage + can succeed. + Returns: + True if the configuration check worked, an exception if it didn't. """ - for storage in self.storages: - yield from storage + return all( + self.wrap_call(self.storage_threads, 'check_config', + check_write=check_write) + ) - def __len__(self): - """Compute the number of objects in the current object storage. + def __contains__(self, obj_id): + """Indicate if the given object is present in the storage. - Identical objects present in multiple storages will be counted as - multiple objects. - Warning: this currently uses `__iter__`, its warning about bad - performance applies. + Args: + obj_id (bytes): object identifier. Returns: - number of objects contained in the storage. + True iff the object is present in the current object storage. """ - return sum(map(len, self.storages)) + for storage in self.get_read_threads(obj_id): + if obj_id in storage: + return True + return False def add(self, content, obj_id=None, check_presence=True): """ Add a new object to the object storage. If the adding step works in all the storages that accept this content, this is a success. Otherwise, the full adding step is an error even if it succeed in some of the storages. Args: content: content of the object to be added to the storage. obj_id: checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence: indicate if the presence of the content should be verified before adding the file. Returns: an id of the object into the storage. As the write-storages are always readable as well, any id will be valid to retrieve a content. """ - return [storage.add(content, obj_id, check_presence) - for storage in self.storages].pop() + return self.wrap_call( + self.get_write_threads(obj_id), 'add', content, + obj_id=obj_id, check_presence=check_presence, + ).pop() + + def add_batch(self, contents, check_presence=True): + """Add a batch of new objects to the object storage. + + """ + write_threads = list(self.get_write_threads()) + return sum(self.wrap_call( + write_threads, 'add_batch', contents, + check_presence=check_presence, + )) // len(write_threads) def restore(self, content, obj_id=None): - return [storage.restore(content, obj_id) - for storage in self.storages].pop() + return self.wrap_call( + self.get_write_threads(obj_id), 'restore', content, obj_id=obj_id, + ).pop() def get(self, obj_id): - for storage in self.storages: + for storage in self.get_read_threads(obj_id): try: return storage.get(obj_id) except ObjNotFoundError: continue # If no storage contains this content, raise the error raise ObjNotFoundError(obj_id) def check(self, obj_id): nb_present = 0 - for storage in self.storages: + for storage in self.get_read_threads(obj_id): try: storage.check(obj_id) except ObjNotFoundError: continue else: nb_present += 1 # If there is an Error because of a corrupted file, then let it pass. - # Raise the ObjNotFoundError only if the content coulnd't be found in + # Raise the ObjNotFoundError only if the content couldn't be found in # all the storages. if nb_present == 0: raise ObjNotFoundError(obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission - return all(storage.delete(obj_id) for storage in self.storages) + return all( + self.wrap_call(self.get_write_threads(obj_id), 'delete', obj_id) + ) def get_random(self, batch_size): storages_set = [storage for storage in self.storages if len(storage) > 0] if len(storages_set) <= 0: return [] while storages_set: storage = random.choice(storages_set) try: return storage.get_random(batch_size) except NotImplementedError: storages_set.remove(storage) # There is no storage that allow the get_random operation raise NotImplementedError( "There is no storage implementation into the multiplexer that " "support the 'get_random' operation" ) diff --git a/swh/objstorage/multiplexer/striping_objstorage.py b/swh/objstorage/multiplexer/striping_objstorage.py new file mode 100644 index 0000000..b63222d --- /dev/null +++ b/swh/objstorage/multiplexer/striping_objstorage.py @@ -0,0 +1,71 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +from collections import defaultdict +import queue + +from .multiplexer_objstorage import ObjStorageThread, MultiplexerObjStorage + + +class StripingObjStorage(MultiplexerObjStorage): + """Stripes objects across multiple objstorages + + This objstorage implementation will write objects to objstorages in a + predictable way: it takes the modulo of the last 8 bytes of the object + identifier with the number of object storages passed, which will yield an + (almost) even distribution. + + Objects are read from all storages in turn until it succeeds. + + """ + MOD_BYTES = 8 + + def __init__(self, storages, **kwargs): + super().__init__(storages, **kwargs) + self.num_storages = len(storages) + + def get_storage_index(self, obj_id): + if obj_id is None: + raise ValueError( + 'StripingObjStorage always needs obj_id to be set' + ) + + index = int.from_bytes(obj_id[:-self.MOD_BYTES], 'little') + return index % self.num_storages + + def get_write_threads(self, obj_id): + idx = self.get_storage_index(obj_id) + yield self.storage_threads[idx] + + def get_read_threads(self, obj_id=None): + if obj_id: + idx = self.get_storage_index(obj_id) + else: + idx = 0 + for i in range(self.num_storages): + yield self.storage_threads[(idx + i) % self.num_storages] + + def add_batch(self, contents, check_presence=True): + """Add a batch of new objects to the object storage. + + """ + content_by_storage_index = defaultdict(dict) + for obj_id, content in contents.items(): + storage_index = self.get_storage_index(obj_id) + content_by_storage_index[storage_index][obj_id] = content + + mailbox = queue.Queue() + for storage_index, contents in content_by_storage_index.items(): + self.storage_threads[storage_index].queue_command( + 'add_batch', + contents, + check_presence=check_presence, + mailbox=mailbox, + ) + return sum( + ObjStorageThread.collect_results( + mailbox, len(content_by_storage_index) + ) + ) diff --git a/swh/objstorage/objstorage.py b/swh/objstorage/objstorage.py index 9a90254..60ed817 100644 --- a/swh/objstorage/objstorage.py +++ b/swh/objstorage/objstorage.py @@ -1,263 +1,277 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc from swh.model import hashutil from .exc import ObjNotFoundError ID_HASH_ALGO = 'sha1' ID_HASH_LENGTH = 40 # Size in bytes of the hash hexadecimal representation. DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024 # Size in bytes of the streaming chunks def compute_hash(content): return hashutil.hash_data( content, algorithms=[ID_HASH_ALGO] ).get(ID_HASH_ALGO) class ObjStorage(metaclass=abc.ABCMeta): """ High-level API to manipulate the Software Heritage object storage. Conceptually, the object storage offers the following methods: - check_config() check if the object storage is properly configured - __contains__() check if an object is present, by object id - add() add a new object, returning an object id - restore() same as add() but erase an already existed content - get() retrieve the content of an object, by object id - check() check the integrity of an object, by object id - delete() remove an object And some management methods: - get_random() get random object id of existing contents (used for the content integrity checker). Some of the methods have available streaming equivalents: - add_stream() same as add() but with a chunked iterator - restore_stream() same as add_stream() but erase already existing content - get_stream() same as get() but returns a chunked iterator Each implementation of this interface can have a different behavior and its own way to store the contents. """ def __init__(self, *, allow_delete=False, **kwargs): # A more complete permission system could be used in place of that if # it becomes needed super().__init__(**kwargs) self.allow_delete = allow_delete @abc.abstractmethod def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ pass @abc.abstractmethod def __contains__(self, obj_id, *args, **kwargs): """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True iff the object is present in the current object storage. """ pass @abc.abstractmethod def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): """Add a new object to the object storage. Args: content (bytes): object's raw content to add in storage. obj_id (bytes): checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ pass + def add_batch(self, contents, check_presence=True): + """Add a batch of new objects to the object storage. + + Args: + contents (dict): mapping from obj_id to object conetnts + Returns: + the number of objects added to the storage + """ + ctr = 0 + for obj_id, content in contents.items(): + self.add(content, obj_id, check_presence=check_presence) + ctr += 1 + return ctr + def restore(self, content, obj_id=None, *args, **kwargs): """Restore a content that have been corrupted. This function is identical to add but does not check if the object id is already in the file system. The default implementation provided by the current class is suitable for most cases. Args: content (bytes): object's raw content to add in storage obj_id (bytes): checksum of `bytes` as computed by ID_HASH_ALGO. When given, obj_id will be trusted to match bytes. If missing, obj_id will be computed on the fly. """ # check_presence to false will erase the potential previous content. return self.add(content, obj_id, check_presence=False) @abc.abstractmethod def get(self, obj_id, *args, **kwargs): """Retrieve the content of a given object. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ pass def get_batch(self, obj_ids, *args, **kwargs): """Retrieve objects' raw content in bulk from storage. Note: This function does have a default implementation in ObjStorage that is suitable for most cases. For object storages that needs to do the minimal number of requests possible (ex: remote object storages), that method can be overriden to perform a more efficient operation. Args: obj_ids ([bytes]: list of object ids. Returns: list of resulting contents, or None if the content could not be retrieved. Do not raise any exception as a fail for one content will not cancel the whole request. """ for obj_id in obj_ids: try: yield self.get(obj_id) except ObjNotFoundError: yield None @abc.abstractmethod def check(self, obj_id, *args, **kwargs): """Perform an integrity check for a given object. Verify that the file object is in place and that the gziped content matches the object id. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. Error: if the request object is corrupted. """ pass @abc.abstractmethod def delete(self, obj_id, *args, **kwargs): """Delete an object. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. """ if not self.allow_delete: raise PermissionError("Delete is not allowed.") # Management methods def get_random(self, batch_size, *args, **kwargs): """Get random ids of existing contents. This method is used in order to get random ids to perform content integrity verifications on random contents. Args: batch_size (int): Number of ids that will be given Yields: An iterable of ids (bytes) of contents that are in the current object storage. """ pass # Streaming methods def add_stream(self, content_iter, obj_id, check_presence=True): """Add a new object to the object storage using streaming. This function is identical to add() except it takes a generator that yields the chunked content instead of the whole content at once. Args: content (bytes): chunked generator that yields the object's raw content to add in storage. obj_id (bytes): object identifier check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ raise NotImplementedError def restore_stream(self, content_iter, obj_id=None): """Restore a content that have been corrupted using streaming. This function is identical to restore() except it takes a generator that yields the chunked content instead of the whole content at once. The default implementation provided by the current class is suitable for most cases. Args: content (bytes): chunked generator that yields the object's raw content to add in storage. obj_id (bytes): object identifier """ # check_presence to false will erase the potential previous content. return self.add_stream(content_iter, obj_id, check_presence=False) def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): """Retrieve the content of a given object as a chunked iterator. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ raise NotImplementedError diff --git a/swh/objstorage/objstorage_in_memory.py b/swh/objstorage/objstorage_in_memory.py index c023744..1717150 100644 --- a/swh/objstorage/objstorage_in_memory.py +++ b/swh/objstorage/objstorage_in_memory.py @@ -1,55 +1,57 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from swh.objstorage.exc import ObjNotFoundError +from swh.objstorage.exc import ObjNotFoundError, Error from swh.objstorage import objstorage class InMemoryObjStorage(objstorage.ObjStorage): """In-Memory objstorage. Intended for test purposes. """ - state = {} def __init__(self, **args): super().__init__() + self.state = {} def check_config(self, *, check_write): return True def __contains__(self, obj_id, *args, **kwargs): return obj_id in self.state def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): if obj_id is None: obj_id = objstorage.compute_hash(content) if check_presence and obj_id in self: return obj_id self.state[obj_id] = content return obj_id def get(self, obj_id, *args, **kwargs): if obj_id not in self: raise ObjNotFoundError(obj_id) return self.state[obj_id] def check(self, obj_id, *args, **kwargs): if obj_id not in self: raise ObjNotFoundError(obj_id) + if objstorage.compute_hash(self.state[obj_id]) != obj_id: + raise Error('Corrupt object %s' % obj_id) return True def delete(self, obj_id, *args, **kwargs): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) self.state.pop(obj_id) return True diff --git a/swh/objstorage/objstorage_pathslicing.py b/swh/objstorage/objstorage_pathslicing.py index 49d8c7b..96f152d 100644 --- a/swh/objstorage/objstorage_pathslicing.py +++ b/swh/objstorage/objstorage_pathslicing.py @@ -1,340 +1,340 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import functools import os import gzip import tempfile import random from contextlib import contextmanager from swh.model import hashutil from .objstorage import (ObjStorage, compute_hash, ID_HASH_ALGO, ID_HASH_LENGTH, DEFAULT_CHUNK_SIZE) from .exc import ObjNotFoundError, Error GZIP_BUFSIZ = 1048576 DIR_MODE = 0o755 FILE_MODE = 0o644 @contextmanager def _write_obj_file(hex_obj_id, objstorage): """ Context manager for writing object files to the object storage. During writing, data are written to a temporary file, which is atomically renamed to the right file name after closing. This context manager also takes care of (gzip) compressing the data on the fly. Usage sample: with _write_obj_file(hex_obj_id, objstorage): f.write(obj_data) Yields: a file-like object open for writing bytes. """ # Get the final paths and create the directory if absent. dir = objstorage._obj_dir(hex_obj_id) if not os.path.isdir(dir): os.makedirs(dir, DIR_MODE, exist_ok=True) path = os.path.join(dir, hex_obj_id) # Create a temporary file. (tmp, tmp_path) = tempfile.mkstemp(suffix='.tmp', prefix='hex_obj_id.', dir=dir) # Open the file and yield it for writing. tmp_f = os.fdopen(tmp, 'wb') with gzip.GzipFile(filename=tmp_path, fileobj=tmp_f) as f: yield f # Then close the temporary file and move it to the right directory. tmp_f.close() os.chmod(tmp_path, FILE_MODE) os.rename(tmp_path, path) @contextmanager def _read_obj_file(hex_obj_id, objstorage): """ Context manager for reading object file in the object storage. Usage sample: with _read_obj_file(hex_obj_id, objstorage) as f: b = f.read() Yields: a file-like object open for reading bytes. """ path = objstorage._obj_path(hex_obj_id) with gzip.GzipFile(path, 'rb') as f: yield f class PathSlicingObjStorage(ObjStorage): """Implementation of the ObjStorage API based on the hash of the content. On disk, an object storage is a directory tree containing files named after their object IDs. An object ID is a checksum of its content, depending on the value of the ID_HASH_ALGO constant (see swh.model.hashutil for its meaning). To avoid directories that contain too many files, the object storage has a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. So for instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will be stored in the given object storages : - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 The files in the storage are stored in gzipped compressed format. Attributes: root (string): path to the root directory of the storage on the disk. bounds: list of tuples that indicates the beginning and the end of each subdirectory for a content. """ def __init__(self, root, slicing, **kwargs): """ Create an object to access a hash-slicing based object storage. Args: root (string): path to the root directory of the storage on the disk. slicing (string): string that indicates the slicing to perform on the hash of the content to know the path where it should be stored. """ super().__init__(**kwargs) self.root = root # Make a list of tuples where each tuple contains the beginning # and the end of each slicing. self.bounds = [ slice(*map(int, sbounds.split(':'))) for sbounds in slicing.split('/') if sbounds ] self.check_config(check_write=False) def check_config(self, *, check_write): """Check whether this object storage is properly configured""" root = self.root if not os.path.isdir(root): raise ValueError( 'PathSlicingObjStorage root "%s" is not a directory' % root ) max_endchar = max(map(lambda bound: bound.stop, self.bounds)) if ID_HASH_LENGTH < max_endchar: raise ValueError( 'Algorithm %s has too short hash for slicing to char %d' % (ID_HASH_ALGO, max_endchar) ) if check_write: if not os.access(self.root, os.W_OK): raise PermissionError( 'PathSlicingObjStorage root "%s" is not writable' % root ) return True def __contains__(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) - return os.path.exists(self._obj_path(hex_obj_id)) + return os.path.isfile(self._obj_path(hex_obj_id)) def __iter__(self): """Iterate over the object identifiers currently available in the storage. Warning: with the current implementation of the object storage, this method will walk the filesystem to list objects, meaning that listing all objects will be very slow for large storages. You almost certainly don't want to use this method in production. Return: Iterator over object IDs """ def obj_iterator(): # XXX hackish: it does not verify that the depth of found files # matches the slicing depth of the storage for root, _dirs, files in os.walk(self.root): for f in files: yield bytes.fromhex(f) return obj_iterator() def __len__(self): """Compute the number of objects available in the storage. Warning: this currently uses `__iter__`, its warning about bad performances applies Return: number of objects contained in the storage """ return sum(1 for i in self) def _obj_dir(self, hex_obj_id): """ Compute the storage directory of an object. See also: PathSlicingObjStorage::_obj_path Args: hex_obj_id: object id as hexlified string. Returns: Path to the directory that contains the required object. """ slices = [hex_obj_id[bound] for bound in self.bounds] return os.path.join(self.root, *slices) def _obj_path(self, hex_obj_id): """ Compute the full path to an object into the current storage. See also: PathSlicingObjStorage::_obj_dir Args: hex_obj_id: object id as hexlified string. Returns: Path to the actual object corresponding to the given id. """ return os.path.join(self._obj_dir(hex_obj_id), hex_obj_id) def add(self, content, obj_id=None, check_presence=True): if obj_id is None: obj_id = compute_hash(content) if check_presence and obj_id in self: # If the object is already present, return immediately. return obj_id hex_obj_id = hashutil.hash_to_hex(obj_id) with _write_obj_file(hex_obj_id, self) as f: f.write(content) return obj_id def get(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) # Open the file and return its content as bytes hex_obj_id = hashutil.hash_to_hex(obj_id) with _read_obj_file(hex_obj_id, self) as f: return f.read() def check(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) try: with gzip.open(self._obj_path(hex_obj_id)) as f: length = None if ID_HASH_ALGO.endswith('_git'): # if the hashing algorithm is git-like, we need to know the # content size to hash on the fly. Do a first pass here to # compute the size length = 0 while True: chunk = f.read(GZIP_BUFSIZ) length += len(chunk) if not chunk: break f.rewind() checksums = hashutil.hash_file(f, length, algorithms=[ID_HASH_ALGO]) actual_obj_id = checksums[ID_HASH_ALGO] if obj_id != actual_obj_id: raise Error( 'Corrupt object %s should have id %s' % (hashutil.hash_to_hex(obj_id), hashutil.hash_to_hex(actual_obj_id)) ) except (OSError, IOError): # IOError is for compatibility with older python versions raise Error('Corrupt object %s is not a gzip file' % obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) try: os.remove(self._obj_path(hex_obj_id)) except FileNotFoundError: raise ObjNotFoundError(obj_id) return True # Management methods def get_random(self, batch_size): def get_random_content(self, batch_size): """ Get a batch of content inside a single directory. Returns: a tuple (batch size, batch). """ dirs = [] for level in range(len(self.bounds)): path = os.path.join(self.root, *dirs) dir_list = next(os.walk(path))[1] if 'tmp' in dir_list: dir_list.remove('tmp') dirs.append(random.choice(dir_list)) path = os.path.join(self.root, *dirs) content_list = next(os.walk(path))[2] length = min(batch_size, len(content_list)) return length, map(hashutil.hash_to_bytes, random.sample(content_list, length)) while batch_size: length, it = get_random_content(self, batch_size) batch_size = batch_size - length yield from it # Streaming methods @contextmanager def chunk_writer(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) with _write_obj_file(hex_obj_id, self) as f: yield f.write def add_stream(self, content_iter, obj_id, check_presence=True): if check_presence and obj_id in self: return obj_id with self.chunk_writer(obj_id) as writer: for chunk in content_iter: writer(chunk) return obj_id def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) with _read_obj_file(hex_obj_id, self) as f: reader = functools.partial(f.read, chunk_size) yield from iter(reader, b'') diff --git a/swh/objstorage/objstorage_rados.py b/swh/objstorage/objstorage_rados.py new file mode 100644 index 0000000..82e41ca --- /dev/null +++ b/swh/objstorage/objstorage_rados.py @@ -0,0 +1,87 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import rados + +from swh.model import hashutil + +from swh.objstorage.exc import ObjNotFoundError +from swh.objstorage import objstorage + +READ_SIZE = 8192 + + +class RADOSObjStorage(objstorage.ObjStorage): + """Object storage implemented with RADOS""" + + def __init__(self, *, rados_id, pool_name, ceph_config, + allow_delete=False): + super().__init__(allow_delete=allow_delete) + self.pool_name = pool_name + self.cluster = rados.Rados(rados_id=rados_id, conf=ceph_config) + self.cluster.connect() + self.__ioctx = None + + def check_config(self, *, check_write): + if self.pool_name not in self.cluster.list_pools(): + raise ValueError('Pool %s does not exist' % self.pool_name) + + @staticmethod + def _to_rados_obj_id(obj_id): + """Convert to a RADOS object identifier""" + return hashutil.hash_to_hex(obj_id) + + @property + def ioctx(self): + if not self.__ioctx: + self.__ioctx = self.cluster.open_ioctx(self.pool_name) + return self.__ioctx + + def __contains__(self, obj_id): + try: + self.ioctx.stat(self._to_rados_obj_id(obj_id)) + except rados.ObjectNotFound: + return False + else: + return True + + def add(self, content, obj_id=None, check_presence=True): + if not obj_id: + raise ValueError('add needs an obj_id') + + _obj_id = self._to_rados_obj_id(obj_id) + + if check_presence: + try: + self.ioctx.stat(_obj_id) + except rados.ObjectNotFound: + pass + else: + return obj_id + self.ioctx.write_full(_obj_id, content) + + return obj_id + + def get(self, obj_id): + chunks = [] + _obj_id = self._to_rados_obj_id(obj_id) + try: + length, mtime = self.ioctx.stat(_obj_id) + except rados.ObjectNotFound: + raise ObjNotFoundError(obj_id) from None + offset = 0 + while offset < length: + chunk = self.ioctx.read(_obj_id, offset, READ_SIZE) + chunks.append(chunk) + offset += len(chunk) + + return b''.join(chunks) + + def check(self, obj_id): + return True + + def delete(self, obj_id): + super().delete(obj_id) # check delete permission + return True diff --git a/swh/objstorage/tests/__init__.py b/swh/objstorage/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/swh/objstorage/tests/objstorage_testing.py b/swh/objstorage/tests/objstorage_testing.py index 4d4820c..bd02a31 100644 --- a/swh/objstorage/tests/objstorage_testing.py +++ b/swh/objstorage/tests/objstorage_testing.py @@ -1,177 +1,190 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import time from nose.tools import istest from swh.model import hashutil from swh.objstorage import exc class ObjStorageTestFixture(): def setUp(self): super().setUp() def hash_content(self, content): obj_id = hashutil.hash_data(content)['sha1'] return content, obj_id def assertContentMatch(self, obj_id, expected_content): # noqa content = self.storage.get(obj_id) self.assertEqual(content, expected_content) @istest def check_config(self): self.assertTrue(self.storage.check_config(check_write=False)) self.assertTrue(self.storage.check_config(check_write=True)) @istest def contains(self): content_p, obj_id_p = self.hash_content(b'contains_present') content_m, obj_id_m = self.hash_content(b'contains_missing') self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) @istest def add_get_w_id(self): content, obj_id = self.hash_content(b'add_get_w_id') r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) @istest def add_big(self): content, obj_id = self.hash_content(b'add_big' * 1024 * 1024) r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) @istest def add_get_wo_id(self): content, obj_id = self.hash_content(b'add_get_wo_id') r = self.storage.add(content) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) @istest def add_get_batch(self): content1, obj_id1 = self.hash_content(b'add_get_batch_1') content2, obj_id2 = self.hash_content(b'add_get_batch_2') self.storage.add(content1, obj_id1) self.storage.add(content2, obj_id2) cr1, cr2 = self.storage.get_batch([obj_id1, obj_id2]) self.assertEqual(cr1, content1) self.assertEqual(cr2, content2) @istest def get_batch_unexisting_content(self): content, obj_id = self.hash_content(b'get_batch_unexisting_content') result = list(self.storage.get_batch([obj_id])) self.assertTrue(len(result) == 1) self.assertIsNone(result[0]) @istest def restore_content(self): valid_content, valid_obj_id = self.hash_content(b'restore_content') invalid_content = b'unexpected content' id_adding = self.storage.add(invalid_content, valid_obj_id) - id_restore = self.storage.restore(valid_content) - # Adding a false content then restore it to the right one and - # then perform a verification should result in a successful check. self.assertEqual(id_adding, valid_obj_id) + with self.assertRaises(exc.Error): + self.storage.check(id_adding) + id_restore = self.storage.restore(valid_content, valid_obj_id) self.assertEqual(id_restore, valid_obj_id) self.assertContentMatch(valid_obj_id, valid_content) @istest def get_missing(self): content, obj_id = self.hash_content(b'get_missing') with self.assertRaises(exc.ObjNotFoundError) as e: self.storage.get(obj_id) self.assertIn(obj_id, e.exception.args) @istest def check_missing(self): content, obj_id = self.hash_content(b'check_missing') with self.assertRaises(exc.Error): self.storage.check(obj_id) @istest def check_present(self): - content, obj_id = self.hash_content(b'check_missing') - self.storage.add(content) + content, obj_id = self.hash_content(b'check_present') + self.storage.add(content, obj_id) try: self.storage.check(obj_id) - except: + except exc.Error: self.fail('Integrity check failed') @istest def delete_missing(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b'missing_content_to_delete') with self.assertRaises(exc.Error): self.storage.delete(obj_id) @istest def delete_present(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b'content_to_delete') self.storage.add(content, obj_id=obj_id) self.assertTrue(self.storage.delete(obj_id)) with self.assertRaises(exc.Error): self.storage.get(obj_id) @istest def delete_not_allowed(self): self.storage.allow_delete = False content, obj_id = self.hash_content(b'content_to_delete') self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.assertTrue(self.storage.delete(obj_id)) @istest def delete_not_allowed_by_default(self): content, obj_id = self.hash_content(b'content_to_delete') self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.assertTrue(self.storage.delete(obj_id)) @istest def add_stream(self): content = [b'chunk1', b'chunk2'] _, obj_id = self.hash_content(b''.join(content)) try: self.storage.add_stream(iter(content), obj_id=obj_id) except NotImplementedError: return self.assertContentMatch(obj_id, b''.join(content)) @istest def add_stream_sleep(self): def gen_content(): yield b'chunk1' time.sleep(0.5) yield b'chunk2' _, obj_id = self.hash_content(b'placeholder_id') try: self.storage.add_stream(gen_content(), obj_id=obj_id) except NotImplementedError: return self.assertContentMatch(obj_id, b'chunk1chunk2') @istest def get_stream(self): content_l = [b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9'] content = b''.join(content_l) _, obj_id = self.hash_content(content) self.storage.add(content, obj_id=obj_id) try: r = list(self.storage.get_stream(obj_id, chunk_size=1)) except NotImplementedError: return self.assertEqual(r, content_l) + + @istest + def add_batch(self): + contents = {} + for i in range(50): + content = b'Test content %02d' % i + content, obj_id = self.hash_content(content) + contents[obj_id] = content + + ret = self.storage.add_batch(contents) + self.assertEqual(len(contents), ret) + for obj_id in contents: + self.assertIn(obj_id, self.storage) diff --git a/swh/objstorage/tests/server_testing.py b/swh/objstorage/tests/server_testing.py deleted file mode 100644 index 02cc94e..0000000 --- a/swh/objstorage/tests/server_testing.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (C) 2015 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import aiohttp.web -import multiprocessing -import socket -import time - -from urllib.request import urlopen - - -class ServerTestFixture(): - """ Base class for http client/server testing. - - Mix this in a test class in order to have access to an http flask - server running in background. - - Note that the subclass should define a dictionary in self.config - that contains the flask server config. - And a flask application in self.app that corresponds to the type of - server the tested client needs. - - To ensure test isolation, each test will run in a different server - and a different repertory. - - In order to correctly work, the subclass must call the parents class's - setUp() and tearDown() methods. - """ - - def setUp(self): - super().setUp() - self.start_server() - - def tearDown(self): - self.stop_server() - super().tearDown() - - def url(self): - return 'http://127.0.0.1:%d/' % self.port - - def start_server(self): - """ Spawn the API server using multiprocessing. - """ - self.process = None - - # WSGI app configuration - for key, value in self.config.items(): - self.app[key] = value - # Get an available port number - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.bind(('127.0.0.1', 0)) - self.port = sock.getsockname()[1] - sock.close() - - # Worker function for multiprocessing - def worker(app, port): - return aiohttp.web.run_app(app, port=port, print=lambda *_: None) - - self.process = multiprocessing.Process( - target=worker, args=(self.app, self.port) - ) - self.process.start() - - # Wait max 5 seconds for server to spawn - i = 0 - while i < 100: - try: - urlopen(self.url()) - except Exception: - i += 1 - time.sleep(0.1) - else: - return - - def stop_server(self): - """ Terminate the API server's process. - """ - if self.process: - self.process.terminate() diff --git a/swh/objstorage/tests/test_multiplexer_filter.py b/swh/objstorage/tests/test_multiplexer_filter.py index b7322f2..64c55db 100644 --- a/swh/objstorage/tests/test_multiplexer_filter.py +++ b/swh/objstorage/tests/test_multiplexer_filter.py @@ -1,331 +1,342 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import random +import shutil import tempfile import unittest -import random from string import ascii_lowercase from nose.tools import istest from swh.model import hashutil from swh.objstorage.exc import ObjNotFoundError, Error from swh.objstorage import get_objstorage from swh.objstorage.multiplexer.filter import read_only, id_prefix, id_regex def get_random_content(): return bytes(''.join(random.sample(ascii_lowercase, 10)), 'utf8') class MixinTestReadFilter(unittest.TestCase): # Read only filter should not allow writing def setUp(self): super().setUp() + self.tmpdir = tempfile.mkdtemp() pstorage = {'cls': 'pathslicing', - 'args': {'root': tempfile.mkdtemp(), + 'args': {'root': self.tmpdir, 'slicing': '0:5'}} base_storage = get_objstorage(**pstorage) base_storage.id = lambda cont: hashutil.hash_data(cont)['sha1'] self.storage = get_objstorage('filtered', {'storage_conf': pstorage, 'filters_conf': [read_only()]}) self.valid_content = b'pre-existing content' self.invalid_content = b'invalid_content' self.true_invalid_content = b'Anything that is not correct' self.absent_content = b'non-existent content' # Create a valid content. self.valid_id = base_storage.add(self.valid_content) # Create an invalid id and add a content with it. self.invalid_id = base_storage.id(self.true_invalid_content) base_storage.add(self.invalid_content, obj_id=self.invalid_id) # Compute an id for a non-existing content. self.absent_id = base_storage.id(self.absent_content) + def tearDown(self): + super().tearDown() + shutil.rmtree(self.tmpdir) + @istest def can_contains(self): self.assertTrue(self.valid_id in self.storage) self.assertTrue(self.invalid_id in self.storage) self.assertFalse(self.absent_id in self.storage) @istest def can_iter(self): self.assertIn(self.valid_id, iter(self.storage)) self.assertIn(self.invalid_id, iter(self.storage)) @istest def can_len(self): self.assertEqual(2, len(self.storage)) @istest def can_get(self): self.assertEqual(self.valid_content, self.storage.get(self.valid_id)) self.assertEqual(self.invalid_content, self.storage.get(self.invalid_id)) @istest def can_check(self): with self.assertRaises(ObjNotFoundError): self.storage.check(self.absent_id) with self.assertRaises(Error): self.storage.check(self.invalid_id) self.storage.check(self.valid_id) @istest def can_get_random(self): self.assertEqual(1, len(list(self.storage.get_random(1)))) self.assertEqual(len(list(self.storage)), len(set(self.storage.get_random(1000)))) @istest def cannot_add(self): new_id = self.storage.add(b'New content') result = self.storage.add(self.valid_content, self.valid_id) self.assertIsNone(new_id, self.storage) self.assertIsNone(result) @istest def cannot_restore(self): result = self.storage.restore(self.valid_content, self.valid_id) self.assertIsNone(result) class MixinTestIdFilter(): """ Mixin class that tests the filters based on filter.IdFilter Methods "make_valid", "make_invalid" and "filter_storage" must be implemented by subclasses. """ def setUp(self): super().setUp() # Use a hack here : as the mock uses the content as id, it is easy to # create contents that are filtered or not. self.prefix = '71' + self.tmpdir = tempfile.mkdtemp() # Make the storage filtered self.sconf = {'cls': 'pathslicing', - 'args': {'root': tempfile.mkdtemp(), + 'args': {'root': self.tmpdir, 'slicing': '0:5'}} storage = get_objstorage(**self.sconf) self.base_storage = storage self.storage = self.filter_storage(self.sconf) # Set the id calculators storage.id = lambda cont: hashutil.hash_data(cont)['sha1'] # Present content with valid id self.present_valid_content = self.ensure_valid(b'yroqdtotji') self.present_valid_id = storage.id(self.present_valid_content) # Present content with invalid id self.present_invalid_content = self.ensure_invalid(b'glxddlmmzb') self.present_invalid_id = storage.id(self.present_invalid_content) # Missing content with valid id self.missing_valid_content = self.ensure_valid(b'rmzkdclkez') self.missing_valid_id = storage.id(self.missing_valid_content) # Missing content with invalid id self.missing_invalid_content = self.ensure_invalid(b'hlejfuginh') self.missing_invalid_id = storage.id(self.missing_invalid_content) # Present corrupted content with valid id self.present_corrupted_valid_content = self.ensure_valid(b'cdsjwnpaij') self.true_present_corrupted_valid_content = self.ensure_valid( b'mgsdpawcrr') self.present_corrupted_valid_id = storage.id( self.true_present_corrupted_valid_content) # Present corrupted content with invalid id self.present_corrupted_invalid_content = self.ensure_invalid( b'pspjljnrco') self.true_present_corrupted_invalid_content = self.ensure_invalid( b'rjocbnnbso') self.present_corrupted_invalid_id = storage.id( self.true_present_corrupted_invalid_content) # Missing (potentially) corrupted content with valid id self.missing_corrupted_valid_content = self.ensure_valid( b'zxkokfgtou') self.true_missing_corrupted_valid_content = self.ensure_valid( b'royoncooqa') self.missing_corrupted_valid_id = storage.id( self.true_missing_corrupted_valid_content) # Missing (potentially) corrupted content with invalid id self.missing_corrupted_invalid_content = self.ensure_invalid( b'hxaxnrmnyk') self.true_missing_corrupted_invalid_content = self.ensure_invalid( b'qhbolyuifr') self.missing_corrupted_invalid_id = storage.id( self.true_missing_corrupted_invalid_content) # Add the content that are supposed to be present self.storage.add(self.present_valid_content) self.storage.add(self.present_invalid_content) self.storage.add(self.present_corrupted_valid_content, obj_id=self.present_corrupted_valid_id) self.storage.add(self.present_corrupted_invalid_content, obj_id=self.present_corrupted_invalid_id) + def tearDown(self): + super().tearDown() + shutil.rmtree(self.tmpdir) + def filter_storage(self, sconf): raise NotImplementedError( 'Id_filter test class must have a filter_storage method') def ensure_valid(self, content=None): if content is None: content = get_random_content() while not self.storage.is_valid(self.base_storage.id(content)): content = get_random_content() return content def ensure_invalid(self, content=None): if content is None: content = get_random_content() while self.storage.is_valid(self.base_storage.id(content)): content = get_random_content() return content @istest def contains(self): # Both contents are present, but the invalid one should be ignored. self.assertTrue(self.present_valid_id in self.storage) self.assertFalse(self.present_invalid_id in self.storage) self.assertFalse(self.missing_valid_id in self.storage) self.assertFalse(self.missing_invalid_id in self.storage) self.assertTrue(self.present_corrupted_valid_id in self.storage) self.assertFalse(self.present_corrupted_invalid_id in self.storage) self.assertFalse(self.missing_corrupted_valid_id in self.storage) self.assertFalse(self.missing_corrupted_invalid_id in self.storage) @istest def iter(self): self.assertIn(self.present_valid_id, iter(self.storage)) self.assertNotIn(self.present_invalid_id, iter(self.storage)) self.assertNotIn(self.missing_valid_id, iter(self.storage)) self.assertNotIn(self.missing_invalid_id, iter(self.storage)) self.assertIn(self.present_corrupted_valid_id, iter(self.storage)) self.assertNotIn(self.present_corrupted_invalid_id, iter(self.storage)) self.assertNotIn(self.missing_corrupted_valid_id, iter(self.storage)) self.assertNotIn(self.missing_corrupted_invalid_id, iter(self.storage)) @istest def len(self): # Four contents are present, but only two should be valid. self.assertEqual(2, len(self.storage)) @istest def get(self): self.assertEqual(self.present_valid_content, self.storage.get(self.present_valid_id)) with self.assertRaises(ObjNotFoundError): self.storage.get(self.present_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_invalid_id) self.assertEqual(self.present_corrupted_valid_content, self.storage.get(self.present_corrupted_valid_id)) with self.assertRaises(ObjNotFoundError): self.storage.get(self.present_corrupted_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_corrupted_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_corrupted_invalid_id) @istest def check(self): self.storage.check(self.present_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.present_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_invalid_id) with self.assertRaises(Error): self.storage.check(self.present_corrupted_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.present_corrupted_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_corrupted_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_corrupted_invalid_id) @istest def get_random(self): self.assertEqual(0, len(list(self.storage.get_random(0)))) random_content = list(self.storage.get_random(1000)) self.assertIn(self.present_valid_id, random_content) self.assertNotIn(self.present_invalid_id, random_content) self.assertNotIn(self.missing_valid_id, random_content) self.assertNotIn(self.missing_invalid_id, random_content) self.assertIn(self.present_corrupted_valid_id, random_content) self.assertNotIn(self.present_corrupted_invalid_id, random_content) self.assertNotIn(self.missing_corrupted_valid_id, random_content) self.assertNotIn(self.missing_corrupted_invalid_id, random_content) @istest def add(self): # Add valid and invalid contents to the storage and check their # presence with the unfiltered storage. valid_content = self.ensure_valid(b'ulepsrjbgt') valid_id = self.base_storage.id(valid_content) invalid_content = self.ensure_invalid(b'znvghkjked') invalid_id = self.base_storage.id(invalid_content) self.storage.add(valid_content) self.storage.add(invalid_content) self.assertTrue(valid_id in self.base_storage) self.assertFalse(invalid_id in self.base_storage) @istest def restore(self): # Add corrupted content to the storage and the try to restore it valid_content = self.ensure_valid(b'ulepsrjbgt') valid_id = self.base_storage.id(valid_content) corrupted_content = self.ensure_valid(b'ltjkjsloyb') corrupted_id = self.base_storage.id(corrupted_content) self.storage.add(corrupted_content, obj_id=valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(corrupted_id) with self.assertRaises(Error): self.storage.check(valid_id) self.storage.restore(valid_content) self.storage.check(valid_id) class TestPrefixFilter(MixinTestIdFilter, unittest.TestCase): def setUp(self): self.prefix = b'71' super().setUp() def ensure_valid(self, content): obj_id = hashutil.hash_data(content)['sha1'] hex_obj_id = hashutil.hash_to_hex(obj_id) self.assertTrue(hex_obj_id.startswith(self.prefix)) return content def ensure_invalid(self, content): obj_id = hashutil.hash_data(content)['sha1'] hex_obj_id = hashutil.hash_to_hex(obj_id) self.assertFalse(hex_obj_id.startswith(self.prefix)) return content def filter_storage(self, sconf): return get_objstorage('filtered', {'storage_conf': sconf, 'filters_conf': [id_prefix(self.prefix)]}) class TestRegexFilter(MixinTestIdFilter, unittest.TestCase): def setUp(self): self.regex = r'[a-f][0-9].*' super().setUp() def filter_storage(self, sconf): return get_objstorage('filtered', {'storage_conf': sconf, 'filters_conf': [id_regex(self.regex)]}) diff --git a/swh/objstorage/tests/test_objstorage_api.py b/swh/objstorage/tests/test_objstorage_api.py index 7946075..418c8b4 100644 --- a/swh/objstorage/tests/test_objstorage_api.py +++ b/swh/objstorage/tests/test_objstorage_api.py @@ -1,35 +1,43 @@ -# Copyright (C) 2015 The Software Heritage developers +# Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import shutil import tempfile import unittest +from swh.core.tests.server_testing import ServerTestFixtureAsync + from swh.objstorage import get_objstorage from swh.objstorage.tests.objstorage_testing import ObjStorageTestFixture -from swh.objstorage.tests.server_testing import ServerTestFixture -from swh.objstorage.api.server import make_app +from swh.objstorage.api.server import app -class TestRemoteObjStorage(ServerTestFixture, ObjStorageTestFixture, +class TestRemoteObjStorage(ServerTestFixtureAsync, ObjStorageTestFixture, unittest.TestCase): """ Test the remote archive API. """ def setUp(self): + self.tmpdir = tempfile.mkdtemp() self.config = { 'cls': 'pathslicing', 'args': { - 'root': tempfile.mkdtemp(), + 'root': self.tmpdir, 'slicing': '0:1/0:5', 'allow_delete': True, }, 'client_max_size': 8 * 1024 * 1024, } - self.app = make_app(self.config) + self.app = app + self.app['config'] = self.config super().setUp() self.storage = get_objstorage('remote', { 'url': self.url() }) + + def tearDown(self): + super().tearDown() + shutil.rmtree(self.tmpdir) diff --git a/swh/objstorage/tests/test_objstorage_azure.py b/swh/objstorage/tests/test_objstorage_azure.py index c1576ff..01bc0b3 100644 --- a/swh/objstorage/tests/test_objstorage_azure.py +++ b/swh/objstorage/tests/test_objstorage_azure.py @@ -1,76 +1,138 @@ -# Copyright (C) 2016 The Software Heritage developers +# Copyright (C) 2016-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +from collections import defaultdict import unittest +from unittest.mock import patch + +from nose.tools import istest from azure.common import AzureMissingResourceHttpError -from swh.objstorage.cloud.objstorage_azure import AzureCloudObjStorage +from swh.model.hashutil import hash_to_hex +from swh.objstorage import get_objstorage -from objstorage_testing import ObjStorageTestFixture +from .objstorage_testing import ObjStorageTestFixture class MockBlob(): """ Libcloud object mock that replicates its API """ def __init__(self, name, content): self.name = name self.content = content class MockBlockBlobService(): """Mock internal azure library which AzureCloudObjStorage depends upon. """ data = {} - def __init__(self, account_name, api_secret_key, container_name): + def __init__(self, account_name, account_key, **kwargs): # do not care for the account_name and the api_secret_key here - self.data[container_name] = {} + self.data = defaultdict(dict) def get_container_properties(self, container_name): + self.data[container_name] return container_name in self.data def create_blob_from_bytes(self, container_name, blob_name, blob): self.data[container_name][blob_name] = blob def get_blob_to_bytes(self, container_name, blob_name): if blob_name not in self.data[container_name]: raise AzureMissingResourceHttpError( 'Blob %s not found' % blob_name, 404) return MockBlob(name=blob_name, content=self.data[container_name][blob_name]) def delete_blob(self, container_name, blob_name): try: self.data[container_name].pop(blob_name) except KeyError: raise AzureMissingResourceHttpError( 'Blob %s not found' % blob_name, 404) return True def exists(self, container_name, blob_name): return blob_name in self.data[container_name] def list_blobs(self, container_name): for blob_name, content in self.data[container_name].items(): yield MockBlob(name=blob_name, content=content) -class MockAzureCloudObjStorage(AzureCloudObjStorage): - """ Cloud object storage that uses a mocked driver """ - def __init__(self, api_key, api_secret_key, container_name): - self.container_name = container_name - self.block_blob_service = MockBlockBlobService(api_key, api_secret_key, - container_name) - self.allow_delete = False - - class TestAzureCloudObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() - self.storage = MockAzureCloudObjStorage( - 'account-name', 'api-secret-key', 'container-name') + patcher = patch( + 'swh.objstorage.cloud.objstorage_azure.BlockBlobService', + MockBlockBlobService, + ) + patcher.start() + self.addCleanup(patcher.stop) + + self.storage = get_objstorage('azure', { + 'account_name': 'account-name', + 'api_secret_key': 'api-secret-key', + 'container_name': 'container-name', + }) + + +class TestPrefixedAzureCloudObjStorage(ObjStorageTestFixture, + unittest.TestCase): + def setUp(self): + super().setUp() + patcher = patch( + 'swh.objstorage.cloud.objstorage_azure.BlockBlobService', + MockBlockBlobService, + ) + patcher.start() + self.addCleanup(patcher.stop) + + self.accounts = {} + for prefix in '0123456789abcdef': + self.accounts[prefix] = { + 'account_name': 'account_%s' % prefix, + 'api_secret_key': 'secret_key_%s' % prefix, + 'container_name': 'container_%s' % prefix, + } + + self.storage = get_objstorage('azure-prefixed', { + 'accounts': self.accounts + }) + + @istest + def prefixedazure_instantiation_missing_prefixes(self): + del self.accounts['d'] + del self.accounts['e'] + + with self.assertRaisesRegex(ValueError, 'Missing prefixes'): + get_objstorage('azure-prefixed', { + 'accounts': self.accounts + }) + + @istest + def prefixedazure_instantiation_inconsistent_prefixes(self): + self.accounts['00'] = self.accounts['0'] + + with self.assertRaisesRegex(ValueError, 'Inconsistent prefixes'): + get_objstorage('azure-prefixed', { + 'accounts': self.accounts + }) + + @istest + def prefixedazure_sharding_behavior(self): + for i in range(100): + content, obj_id = self.hash_content(b'test_content_%02d' % i) + self.storage.add(content, obj_id=obj_id) + hex_obj_id = hash_to_hex(obj_id) + prefix = hex_obj_id[0] + self.assertTrue( + self.storage.prefixes[prefix][0].exists( + self.accounts[prefix]['container_name'], hex_obj_id + )) diff --git a/swh/objstorage/tests/test_objstorage_cloud.py b/swh/objstorage/tests/test_objstorage_cloud.py index b317bb8..5dbdfdf 100644 --- a/swh/objstorage/tests/test_objstorage_cloud.py +++ b/swh/objstorage/tests/test_objstorage_cloud.py @@ -1,97 +1,97 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from swh.objstorage.cloud.objstorage_cloud import CloudObjStorage from libcloud.storage.types import (ObjectDoesNotExistError, ContainerDoesNotExistError) from libcloud.common.types import InvalidCredsError -from objstorage_testing import ObjStorageTestFixture +from .objstorage_testing import ObjStorageTestFixture API_KEY = 'API_KEY' API_SECRET_KEY = 'API SECRET KEY' CONTAINER_NAME = 'test_container' class MockLibcloudObject(): """ Libcloud object mock that replicates its API """ def __init__(self, name, content): self.name = name self.content = list(content) def as_stream(self): yield from iter(self.content) class MockLibcloudDriver(): """ Mock driver that replicates the used LibCloud API """ def __init__(self, api_key, api_secret_key): self.containers = {CONTAINER_NAME: {}} # Storage is initialized self.api_key = api_key self.api_secret_key = api_secret_key def _check_credentials(self): # Private method may be known as another name in Libcloud but is used # to replicate libcloud behavior (i.e. check credential at each # request) if self.api_key != API_KEY or self.api_secret_key != API_SECRET_KEY: raise InvalidCredsError() def get_container(self, container_name): try: return self.containers[container_name] except KeyError: raise ContainerDoesNotExistError(container_name=container_name, driver=self, value=None) def iterate_container_objects(self, container): self._check_credentials() yield from container.values() def get_object(self, container_name, obj_id): self._check_credentials() try: container = self.get_container(container_name) return container[obj_id] except KeyError: raise ObjectDoesNotExistError(object_name=obj_id, driver=self, value=None) def delete_object(self, obj): self._check_credentials() try: container = self.get_container(CONTAINER_NAME) container.pop(obj.name) return True except KeyError: raise ObjectDoesNotExistError(object_name=obj.name, driver=self, value=None) def upload_object_via_stream(self, content, container, obj_id): self._check_credentials() obj = MockLibcloudObject(obj_id, content) container[obj_id] = obj class MockCloudObjStorage(CloudObjStorage): """ Cloud object storage that uses a mocked driver """ def _get_driver(self, api_key, api_secret_key): return MockLibcloudDriver(api_key, api_secret_key) def _get_provider(self): # Implement this for the abc requirement, but behavior is defined in # _get_driver. pass class TestCloudObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() self.storage = MockCloudObjStorage(API_KEY, API_SECRET_KEY, CONTAINER_NAME) diff --git a/swh/objstorage/tests/test_objstorage_in_memory.py b/swh/objstorage/tests/test_objstorage_in_memory.py index ca90422..47a2446 100644 --- a/swh/objstorage/tests/test_objstorage_in_memory.py +++ b/swh/objstorage/tests/test_objstorage_in_memory.py @@ -1,16 +1,16 @@ # Copyright (C) 2015-2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from swh.objstorage.objstorage_in_memory import InMemoryObjStorage -from objstorage_testing import ObjStorageTestFixture +from .objstorage_testing import ObjStorageTestFixture -class TestMultiplexerObjStorage(ObjStorageTestFixture, unittest.TestCase): +class TestInMemoryObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() self.storage = InMemoryObjStorage() diff --git a/swh/objstorage/tests/test_objstorage_instantiation.py b/swh/objstorage/tests/test_objstorage_instantiation.py index a515855..4a1e10a 100644 --- a/swh/objstorage/tests/test_objstorage_instantiation.py +++ b/swh/objstorage/tests/test_objstorage_instantiation.py @@ -1,46 +1,53 @@ # Copyright (C) 2015-2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import shutil import tempfile import unittest from nose.tools import istest from swh.objstorage import get_objstorage from swh.objstorage.objstorage_pathslicing import PathSlicingObjStorage from swh.objstorage.api.client import RemoteObjStorage class TestObjStorageInitialization(unittest.TestCase): """ Test that the methods for ObjStorage initializations with `get_objstorage` works properly. """ def setUp(self): self.path = tempfile.mkdtemp() + self.path2 = tempfile.mkdtemp() # Server is launched at self.url() - self.config = {'storage_base': tempfile.mkdtemp(), + self.config = {'storage_base': self.path2, 'storage_slicing': '0:1/0:5'} super().setUp() + def tearDown(self): + super().tearDown() + shutil.rmtree(self.path) + shutil.rmtree(self.path2) + @istest def pathslicing_objstorage(self): conf = { 'cls': 'pathslicing', 'args': {'root': self.path, 'slicing': '0:2/0:5'} } st = get_objstorage(**conf) self.assertTrue(isinstance(st, PathSlicingObjStorage)) @istest def remote_objstorage(self): conf = { 'cls': 'remote', 'args': { 'url': 'http://127.0.0.1:4242/' } } st = get_objstorage(**conf) self.assertTrue(isinstance(st, RemoteObjStorage)) diff --git a/swh/objstorage/tests/test_objstorage_multiplexer.py b/swh/objstorage/tests/test_objstorage_multiplexer.py index 3aaa7b9..1404460 100644 --- a/swh/objstorage/tests/test_objstorage_multiplexer.py +++ b/swh/objstorage/tests/test_objstorage_multiplexer.py @@ -1,90 +1,74 @@ # Copyright (C) 2015-2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import os +import shutil import tempfile import unittest from nose.tools import istest from swh.objstorage import PathSlicingObjStorage from swh.objstorage.multiplexer import MultiplexerObjStorage from swh.objstorage.multiplexer.filter import add_filter, read_only -from objstorage_testing import ObjStorageTestFixture +from .objstorage_testing import ObjStorageTestFixture class TestMultiplexerObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() - self.storage_v1 = PathSlicingObjStorage(tempfile.mkdtemp(), '0:2/2:4') - self.storage_v2 = PathSlicingObjStorage(tempfile.mkdtemp(), '0:1/0:5') + self.tmpdir = tempfile.mkdtemp() + os.mkdir(os.path.join(self.tmpdir, 'root1')) + os.mkdir(os.path.join(self.tmpdir, 'root2')) + self.storage_v1 = PathSlicingObjStorage( + os.path.join(self.tmpdir, 'root1'), '0:2/2:4') + self.storage_v2 = PathSlicingObjStorage( + os.path.join(self.tmpdir, 'root2'), '0:1/0:5') self.r_storage = add_filter(self.storage_v1, read_only()) self.w_storage = self.storage_v2 self.storage = MultiplexerObjStorage([self.r_storage, self.w_storage]) + def tearDown(self): + super().tearDown() + shutil.rmtree(self.tmpdir) + @istest def contains(self): content_p, obj_id_p = self.hash_content(b'contains_present') content_m, obj_id_m = self.hash_content(b'contains_missing') self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) - @istest - def iter(self): - content, obj_id = self.hash_content(b'iter') - self.assertEqual(list(iter(self.storage)), []) - self.storage.add(content, obj_id=obj_id) - self.assertEqual(list(iter(self.storage)), [obj_id]) - - @istest - def len(self): - content, obj_id = self.hash_content(b'len') - self.assertEqual(len(self.storage), 0) - self.storage.add(content, obj_id=obj_id) - self.assertEqual(len(self.storage), 1) - - @istest - def len_multiple(self): - content, obj_id = self.hash_content(b'len_multiple') - # Add a content to the read-only storage - self.storage_v1.add(content) - self.assertEqual(len(self.storage), 1) - # By adding the same content to the global storage, it should be - # Replicated. - # len() behavior is to indicates the number of files, not unique - # contents. - self.storage.add(content) - self.assertEqual(len(self.storage), 2) - @istest def delete_missing(self): self.storage_v1.allow_delete = True self.storage_v2.allow_delete = True super().delete_missing() @istest def delete_present(self): self.storage_v1.allow_delete = True self.storage_v2.allow_delete = True super().delete_present() @istest def get_random_contents(self): content, obj_id = self.hash_content(b'get_random_content') self.storage.add(content) random_contents = list(self.storage.get_random(1)) self.assertEqual(1, len(random_contents)) self.assertIn(obj_id, random_contents) @istest def access_readonly(self): # Add a content to the readonly storage content, obj_id = self.hash_content(b'content in read-only') self.storage_v1.add(content) # Try to retrieve it on the main storage self.assertIn(obj_id, self.storage) diff --git a/swh/objstorage/tests/test_objstorage_pathslicing.py b/swh/objstorage/tests/test_objstorage_pathslicing.py index 8e9c4a0..e209c8d 100644 --- a/swh/objstorage/tests/test_objstorage_pathslicing.py +++ b/swh/objstorage/tests/test_objstorage_pathslicing.py @@ -1,71 +1,76 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import shutil import tempfile import unittest from nose.tools import istest from swh.model import hashutil from swh.objstorage import exc from swh.objstorage import get_objstorage -from objstorage_testing import ObjStorageTestFixture +from .objstorage_testing import ObjStorageTestFixture class TestPathSlicingObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() self.slicing = '0:2/2:4/4:6' self.tmpdir = tempfile.mkdtemp() self.storage = get_objstorage( 'pathslicing', {'root': self.tmpdir, 'slicing': self.slicing} ) + def tearDown(self): + super().tearDown() + shutil.rmtree(self.tmpdir) + def content_path(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return self.storage._obj_path(hex_obj_id) @istest def iter(self): content, obj_id = self.hash_content(b'iter') self.assertEqual(list(iter(self.storage)), []) self.storage.add(content, obj_id=obj_id) self.assertEqual(list(iter(self.storage)), [obj_id]) @istest def len(self): content, obj_id = self.hash_content(b'len') self.assertEqual(len(self.storage), 0) self.storage.add(content, obj_id=obj_id) self.assertEqual(len(self.storage), 1) @istest def check_not_gzip(self): content, obj_id = self.hash_content(b'check_not_gzip') self.storage.add(content, obj_id=obj_id) with open(self.content_path(obj_id), 'ab') as f: # Add garbage. f.write(b'garbage') with self.assertRaises(exc.Error): self.storage.check(obj_id) @istest def check_id_mismatch(self): content, obj_id = self.hash_content(b'check_id_mismatch') self.storage.add(content, obj_id=obj_id) with open(self.content_path(obj_id), 'wb') as f: f.write(b'unexpected content') with self.assertRaises(exc.Error): self.storage.check(obj_id) @istest def get_random_contents(self): content, obj_id = self.hash_content(b'get_random_content') self.storage.add(content, obj_id=obj_id) random_contents = list(self.storage.get_random(1)) self.assertEqual(1, len(random_contents)) self.assertIn(obj_id, random_contents) diff --git a/swh/objstorage/tests/test_objstorage_striping.py b/swh/objstorage/tests/test_objstorage_striping.py new file mode 100644 index 0000000..6c2beb7 --- /dev/null +++ b/swh/objstorage/tests/test_objstorage_striping.py @@ -0,0 +1,84 @@ +# Copyright (C) 2015-2016 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import os +import shutil +import tempfile +import unittest + +from nose.tools import istest + +from swh.objstorage import get_objstorage +from .objstorage_testing import ObjStorageTestFixture + + +class TestStripingObjStorage(ObjStorageTestFixture, unittest.TestCase): + + def setUp(self): + super().setUp() + self.base_dir = tempfile.mkdtemp() + os.mkdir(os.path.join(self.base_dir, 'root1')) + os.mkdir(os.path.join(self.base_dir, 'root2')) + storage_config = { + 'cls': 'striping', + 'args': { + 'objstorages': [ + { + 'cls': 'pathslicing', + 'args': { + 'root': os.path.join(self.base_dir, 'root1'), + 'slicing': '0:2', + 'allow_delete': True, + } + }, + { + 'cls': 'pathslicing', + 'args': { + 'root': os.path.join(self.base_dir, 'root2'), + 'slicing': '0:2', + 'allow_delete': True, + } + }, + ] + } + } + self.storage = get_objstorage(**storage_config) + + def tearDown(self): + shutil.rmtree(self.base_dir) + + @istest + def add_get_wo_id(self): + self.skipTest("can't add without id in the multiplexer storage") + + @istest + def add_striping_behavior(self): + exp_storage_counts = [0, 0] + storage_counts = [0, 0] + for i in range(100): + content, obj_id = self.hash_content( + b'striping_behavior_test%02d' % i + ) + self.storage.add(content, obj_id) + exp_storage_counts[self.storage.get_storage_index(obj_id)] += 1 + count = 0 + for i, storage in enumerate(self.storage.storages): + if obj_id not in storage: + continue + count += 1 + storage_counts[i] += 1 + self.assertEqual(count, 1) + self.assertEqual(storage_counts, exp_storage_counts) + + @istest + def get_striping_behavior(self): + # Make sure we can read objects that are available in any backend + # storage + content, obj_id = self.hash_content(b'striping_behavior_test') + for storage in self.storage.storages: + storage.add(content, obj_id) + self.assertIn(obj_id, self.storage) + storage.delete(obj_id) + self.assertNotIn(obj_id, self.storage) diff --git a/version.txt b/version.txt index cb02228..2633902 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.24-0-g6611f95 \ No newline at end of file +v0.0.25-0-g418dfe7 \ No newline at end of file