diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 380c658..69b3349 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,46 +1,40 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v2.4.0 hooks: - id: trailing-whitespace - id: flake8 - id: check-json - id: check-yaml - repo: https://github.com/codespell-project/codespell rev: v1.16.0 hooks: - id: codespell - repo: local hooks: - id: mypy name: mypy entry: mypy args: [swh] pass_filenames: false language: system types: [python] +- repo: https://github.com/python/black + rev: 19.10b0 + hooks: + - id: black + # unfortunately, we are far from being able to enable this... # - repo: https://github.com/PyCQA/pydocstyle.git # rev: 4.0.0 # hooks: # - id: pydocstyle # name: pydocstyle # description: pydocstyle is a static analysis tool for checking compliance with Python docstring conventions. # entry: pydocstyle --convention=google # language: python # types: [python] -# black requires py3.6+ -#- repo: https://github.com/python/black -# rev: 19.3b0 -# hooks: -# - id: black -# language_version: python3 -#- repo: https://github.com/asottile/blacken-docs -# rev: v1.0.0-1 -# hooks: -# - id: blacken-docs -# additional_dependencies: [black==19.3b0] diff --git a/bin/swh-objstorage-azure b/bin/swh-objstorage-azure index 33a56bd..e481bb9 100755 --- a/bin/swh-objstorage-azure +++ b/bin/swh-objstorage-azure @@ -1,112 +1,121 @@ #!/usr/bin/env python3 # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # NOT FOR PRODUCTION import click from swh.objstorage import get_objstorage, exc from swh.core import config, hashutil class AzureAccess(config.SWHConfig): """This is an orchestration class to try and check objstorage_azure implementation.""" DEFAULT_CONFIG = { # Output storage - 'storage_azure': ('dict', - {'cls': 'pathslicing', - 'args': {'root': '/srv/softwareheritage/objects', - 'slicing': '0:2/2:4/4:6'}}), + "storage_azure": ( + "dict", + { + "cls": "pathslicing", + "args": { + "root": "/srv/softwareheritage/objects", + "slicing": "0:2/2:4/4:6", + }, + }, + ), # Input storage - 'storage_local': ('dict', - {'cls': 'pathslicing', - 'args': {'root': '/srv/softwareheritage/objects', - 'slicing': '0:2/2:4/4:6'}}), + "storage_local": ( + "dict", + { + "cls": "pathslicing", + "args": { + "root": "/srv/softwareheritage/objects", + "slicing": "0:2/2:4/4:6", + }, + }, + ), } - CONFIG_BASE_FILENAME = 'objstorage/azure' + CONFIG_BASE_FILENAME = "objstorage/azure" def __init__(self): super().__init__() self.config = self.parse_config_file() - self.azure_cloud_storage = get_objstorage( - **self.config['storage_azure']) - self.read_objstorage = get_objstorage( - **self.config['storage_local']) + self.azure_cloud_storage = get_objstorage(**self.config["storage_azure"]) + self.read_objstorage = get_objstorage(**self.config["storage_local"]) def list_contents(self, limit=10): count = 0 for c in self.azure_cloud_storage: count += 1 yield c if count >= limit: return def send_one_content(self, obj_id): obj_content = self.read_objstorage.get(obj_id) - self.azure_cloud_storage.add(content=obj_content, - obj_id=obj_id) + self.azure_cloud_storage.add(content=obj_content, obj_id=obj_id) def check_integrity(self, obj_id): self.azure_cloud_storage.check(obj_id) # will raise if problem def check_presence(self, obj_id): return obj_id in self.azure_cloud_storage def download(self, obj_id): return self.azure_cloud_storage.get(obj_id) @click.command() def tryout(): obj_azure = AzureAccess() - hex_sample_id = '00000085c856b32f0709a4f5d669bb4faa3a0ce9' + hex_sample_id = "00000085c856b32f0709a4f5d669bb4faa3a0ce9" sample_id = hashutil.hex_to_hash(hex_sample_id) check_presence = obj_azure.check_presence(sample_id) - print('presence first time should be False:', check_presence) + print("presence first time should be False:", check_presence) obj_azure.send_one_content(sample_id) check_presence = obj_azure.check_presence(sample_id) - print('presence True:', check_presence) + print("presence True:", check_presence) - hex_sample_2 = 'dfeffffeffff17b439f3e582813bd875e7141a0e' + hex_sample_2 = "dfeffffeffff17b439f3e582813bd875e7141a0e" sample_2 = hashutil.hex_to_hash(hex_sample_2) check_presence = obj_azure.check_presence(sample_2) - print('presence False:', check_presence) + print("presence False:", check_presence) print() - print('Download a blob') + print("Download a blob") blob_content = obj_azure.download(sample_id) print(blob_content) print() try: - not_found_hex_id = hex_sample_id.replace('0', 'f') + not_found_hex_id = hex_sample_id.replace("0", "f") not_found_id = hashutil.hash_to_hex(not_found_hex_id) obj_azure.download(not_found_id) except exc.ObjNotFoundError: - print('Expected `blob does not exist`!') + print("Expected `blob does not exist`!") # print() # print('blobs:') # print(list(obj_azure.list_contents())) # print() # print('content of %s' % hex_sample_id) # print(obj_azure.download(hex_sample_id)) obj_azure.check_integrity(sample_id) -if __name__ == '__main__': +if __name__ == "__main__": tryout() diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..8d79b7e --- /dev/null +++ b/setup.cfg @@ -0,0 +1,6 @@ +[flake8] +# E203: whitespaces before ':' +# E231: missing whitespace after ',' +# W503: line break before binary operator +ignore = E203,E231,W503 +max-line-length = 88 diff --git a/setup.py b/setup.py index b588838..da36137 100755 --- a/setup.py +++ b/setup.py @@ -1,71 +1,71 @@ #!/usr/bin/env python3 # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from setuptools import setup, find_packages from os import path from io import open here = path.abspath(path.dirname(__file__)) # Get the long description from the README file -with open(path.join(here, 'README.md'), encoding='utf-8') as f: +with open(path.join(here, "README.md"), encoding="utf-8") as f: long_description = f.read() def parse_requirements(name=None): if name: - reqf = 'requirements-%s.txt' % name + reqf = "requirements-%s.txt" % name else: - reqf = 'requirements.txt' + reqf = "requirements.txt" requirements = [] if not path.exists(reqf): return requirements with open(reqf) as f: for line in f.readlines(): line = line.strip() - if not line or line.startswith('#'): + if not line or line.startswith("#"): continue requirements.append(line) return requirements setup( - name='swh.objstorage', - description='Software Heritage Object Storage', + name="swh.objstorage", + description="Software Heritage Object Storage", long_description=long_description, - long_description_content_type='text/markdown', - author='Software Heritage developers', - author_email='swh-devel@inria.fr', - url='https://forge.softwareheritage.org/diffusion/DOBJS', + long_description_content_type="text/markdown", + author="Software Heritage developers", + author_email="swh-devel@inria.fr", + url="https://forge.softwareheritage.org/diffusion/DOBJS", packages=find_packages(), - python_requires='>=3.6', - install_requires=parse_requirements() + parse_requirements('swh'), - setup_requires=['vcversioner'], - extras_require={'testing': parse_requirements('test')}, + python_requires=">=3.6", + install_requires=parse_requirements() + parse_requirements("swh"), + setup_requires=["vcversioner"], + extras_require={"testing": parse_requirements("test")}, vcversioner={}, include_package_data=True, - entry_points=''' + entry_points=""" [console_scripts] swh-objstorage=swh.objstorage.cli:main [swh.cli.subcommands] objstorage=swh.objstorage.cli:cli - ''', + """, classifiers=[ "Programming Language :: Python :: 3", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Operating System :: OS Independent", "Development Status :: 5 - Production/Stable", ], project_urls={ - 'Bug Reports': 'https://forge.softwareheritage.org/maniphest', - 'Funding': 'https://www.softwareheritage.org/donate', - 'Source': 'https://forge.softwareheritage.org/source/swh-objstorage', + "Bug Reports": "https://forge.softwareheritage.org/maniphest", + "Funding": "https://www.softwareheritage.org/donate", + "Source": "https://forge.softwareheritage.org/source/swh-objstorage", }, ) diff --git a/swh/objstorage/__init__.py b/swh/objstorage/__init__.py index 373786c..498e16e 100644 --- a/swh/objstorage/__init__.py +++ b/swh/objstorage/__init__.py @@ -1,112 +1,107 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.objstorage.objstorage import ObjStorage, ID_HASH_LENGTH # noqa from swh.objstorage.backends.pathslicing import PathSlicingObjStorage from swh.objstorage.backends.in_memory import InMemoryObjStorage from swh.objstorage.api.client import RemoteObjStorage -from swh.objstorage.multiplexer import ( - MultiplexerObjStorage, StripingObjStorage) +from swh.objstorage.multiplexer import MultiplexerObjStorage, StripingObjStorage from swh.objstorage.multiplexer.filter import add_filters from swh.objstorage.backends.seaweed import WeedObjStorage from swh.objstorage.backends.generator import RandomGeneratorObjStorage from typing import Callable, Dict, Union -__all__ = ['get_objstorage', 'ObjStorage'] +__all__ = ["get_objstorage", "ObjStorage"] -_STORAGE_CLASSES: Dict[ - str, - Union[type, Callable[..., type]] -] = { - 'pathslicing': PathSlicingObjStorage, - 'remote': RemoteObjStorage, - 'memory': InMemoryObjStorage, - 'weed': WeedObjStorage, - 'random': RandomGeneratorObjStorage, +_STORAGE_CLASSES: Dict[str, Union[type, Callable[..., type]]] = { + "pathslicing": PathSlicingObjStorage, + "remote": RemoteObjStorage, + "memory": InMemoryObjStorage, + "weed": WeedObjStorage, + "random": RandomGeneratorObjStorage, } -_STORAGE_CLASSES_MISSING = { -} +_STORAGE_CLASSES_MISSING = {} try: from swh.objstorage.backends.azure import ( AzureCloudObjStorage, PrefixedAzureCloudObjStorage, ) - _STORAGE_CLASSES['azure'] = AzureCloudObjStorage - _STORAGE_CLASSES['azure-prefixed'] = PrefixedAzureCloudObjStorage + + _STORAGE_CLASSES["azure"] = AzureCloudObjStorage + _STORAGE_CLASSES["azure-prefixed"] = PrefixedAzureCloudObjStorage except ImportError as e: - _STORAGE_CLASSES_MISSING['azure'] = e.args[0] - _STORAGE_CLASSES_MISSING['azure-prefixed'] = e.args[0] + _STORAGE_CLASSES_MISSING["azure"] = e.args[0] + _STORAGE_CLASSES_MISSING["azure-prefixed"] = e.args[0] try: from swh.objstorage.backends.rados import RADOSObjStorage - _STORAGE_CLASSES['rados'] = RADOSObjStorage + + _STORAGE_CLASSES["rados"] = RADOSObjStorage except ImportError as e: - _STORAGE_CLASSES_MISSING['rados'] = e.args[0] + _STORAGE_CLASSES_MISSING["rados"] = e.args[0] try: from swh.objstorage.backends.libcloud import ( AwsCloudObjStorage, OpenStackCloudObjStorage, ) - _STORAGE_CLASSES['s3'] = AwsCloudObjStorage - _STORAGE_CLASSES['swift'] = OpenStackCloudObjStorage + + _STORAGE_CLASSES["s3"] = AwsCloudObjStorage + _STORAGE_CLASSES["swift"] = OpenStackCloudObjStorage except ImportError as e: - _STORAGE_CLASSES_MISSING['s3'] = e.args[0] - _STORAGE_CLASSES_MISSING['swift'] = e.args[0] + _STORAGE_CLASSES_MISSING["s3"] = e.args[0] + _STORAGE_CLASSES_MISSING["swift"] = e.args[0] def get_objstorage(cls, args): """ Create an ObjStorage using the given implementation class. Args: cls (str): objstorage class unique key contained in the _STORAGE_CLASSES dict. args (dict): arguments for the required class of objstorage that must match exactly the one in the `__init__` method of the class. Returns: subclass of ObjStorage that match the given `storage_class` argument. Raises: ValueError: if the given storage class is not a valid objstorage key. """ if cls in _STORAGE_CLASSES: return _STORAGE_CLASSES[cls](**args) else: - raise ValueError('Storage class {} is not available: {}'.format( - cls, - _STORAGE_CLASSES_MISSING.get(cls, 'unknown name'))) + raise ValueError( + "Storage class {} is not available: {}".format( + cls, _STORAGE_CLASSES_MISSING.get(cls, "unknown name") + ) + ) def _construct_filtered_objstorage(storage_conf, filters_conf): - return add_filters( - get_objstorage(**storage_conf), - filters_conf - ) + return add_filters(get_objstorage(**storage_conf), filters_conf) -_STORAGE_CLASSES['filtered'] = _construct_filtered_objstorage +_STORAGE_CLASSES["filtered"] = _construct_filtered_objstorage def _construct_multiplexer_objstorage(objstorages): - storages = [get_objstorage(**conf) - for conf in objstorages] + storages = [get_objstorage(**conf) for conf in objstorages] return MultiplexerObjStorage(storages) -_STORAGE_CLASSES['multiplexer'] = _construct_multiplexer_objstorage +_STORAGE_CLASSES["multiplexer"] = _construct_multiplexer_objstorage def _construct_striping_objstorage(objstorages): - storages = [get_objstorage(**conf) - for conf in objstorages] + storages = [get_objstorage(**conf) for conf in objstorages] return StripingObjStorage(storages) -_STORAGE_CLASSES['striping'] = _construct_striping_objstorage +_STORAGE_CLASSES["striping"] = _construct_striping_objstorage diff --git a/swh/objstorage/api/client.py b/swh/objstorage/api/client.py index bacb432..4c80018 100644 --- a/swh/objstorage/api/client.py +++ b/swh/objstorage/api/client.py @@ -1,92 +1,95 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.core.api import RPCClient from swh.model import hashutil from ..objstorage import DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT from ..exc import Error, ObjNotFoundError, ObjStorageAPIError class RemoteObjStorage: """Proxy to a remote object storage. This class allows to connect to an object storage server via http protocol. Attributes: url (string): The url of the server to connect. Must end with a '/' session: The session to send requests. """ def __init__(self, **kwargs): self._proxy = RPCClient( api_exception=ObjStorageAPIError, reraise_exceptions=[ObjNotFoundError, Error], - **kwargs) + **kwargs + ) def check_config(self, *, check_write): - return self._proxy.post('check_config', {'check_write': check_write}) + return self._proxy.post("check_config", {"check_write": check_write}) def __contains__(self, obj_id): - return self._proxy.post('content/contains', {'obj_id': obj_id}) + return self._proxy.post("content/contains", {"obj_id": obj_id}) def add(self, content, obj_id=None, check_presence=True): - return self._proxy.post('content/add', { - 'content': content, 'obj_id': obj_id, - 'check_presence': check_presence}) + return self._proxy.post( + "content/add", + {"content": content, "obj_id": obj_id, "check_presence": check_presence}, + ) def add_batch(self, contents, check_presence=True): - return self._proxy.post('content/add/batch', { - 'contents': contents, - 'check_presence': check_presence, - }) + return self._proxy.post( + "content/add/batch", + {"contents": contents, "check_presence": check_presence,}, + ) def restore(self, content, obj_id=None, *args, **kwargs): return self.add(content, obj_id, check_presence=False) def get(self, obj_id): - return self._proxy.post('content/get', {'obj_id': obj_id}) + return self._proxy.post("content/get", {"obj_id": obj_id}) def get_batch(self, obj_ids): - return self._proxy.post('content/get/batch', {'obj_ids': obj_ids}) + return self._proxy.post("content/get/batch", {"obj_ids": obj_ids}) def check(self, obj_id): - return self._proxy.post('content/check', {'obj_id': obj_id}) + return self._proxy.post("content/check", {"obj_id": obj_id}) def delete(self, obj_id): # deletion permission are checked server-side - return self._proxy.post('content/delete', {'obj_id': obj_id}) + return self._proxy.post("content/delete", {"obj_id": obj_id}) # Management methods def get_random(self, batch_size): - return self._proxy.post('content/get/random', - {'batch_size': batch_size}) + return self._proxy.post("content/get/random", {"batch_size": batch_size}) # Streaming methods def add_stream(self, content_iter, obj_id, check_presence=True): obj_id = hashutil.hash_to_hex(obj_id) return self._proxy.post_stream( - 'content/add_stream/{}'.format(obj_id), - params={'check_presence': check_presence}, - data=content_iter) + "content/add_stream/{}".format(obj_id), + params={"check_presence": check_presence}, + data=content_iter, + ) def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): obj_id = hashutil.hash_to_hex(obj_id) - return self._proxy.get_stream('content/get_stream/{}'.format(obj_id), - chunk_size=chunk_size) + return self._proxy.get_stream( + "content/get_stream/{}".format(obj_id), chunk_size=chunk_size + ) def __iter__(self): - yield from self._proxy.get_stream('content') + yield from self._proxy.get_stream("content") def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): - params = {'limit': limit} + params = {"limit": limit} if last_obj_id: - params['last_obj_id'] = hashutil.hash_to_hex(last_obj_id) - yield from self._proxy.get_stream('content', params=params) + params["last_obj_id"] = hashutil.hash_to_hex(last_obj_id) + yield from self._proxy.get_stream("content", params=params) diff --git a/swh/objstorage/api/server.py b/swh/objstorage/api/server.py index db68d38..517b004 100644 --- a/swh/objstorage/api/server.py +++ b/swh/objstorage/api/server.py @@ -1,264 +1,270 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import aiohttp.web import json from swh.core.config import read as config_read -from swh.core.api.asynchronous import (RPCServerApp, decode_request, - encode_data_server as encode_data) +from swh.core.api.asynchronous import ( + RPCServerApp, + decode_request, + encode_data_server as encode_data, +) from swh.core.api.serializers import msgpack_loads, SWHJSONDecoder from swh.model import hashutil from swh.objstorage import get_objstorage from swh.objstorage.objstorage import DEFAULT_LIMIT from swh.objstorage.exc import Error, ObjNotFoundError from swh.core.statsd import statsd def timed(f): async def w(*a, **kw): with statsd.timed( - 'swh_objstorage_request_duration_seconds', - tags={'endpoint': f.__name__}): + "swh_objstorage_request_duration_seconds", tags={"endpoint": f.__name__} + ): return await f(*a, **kw) + return w @timed async def index(request): return aiohttp.web.Response(body="SWH Objstorage API server") @timed async def check_config(request): req = await decode_request(request) - return encode_data(request.app['objstorage'].check_config(**req)) + return encode_data(request.app["objstorage"].check_config(**req)) @timed async def contains(request): req = await decode_request(request) - return encode_data(request.app['objstorage'].__contains__(**req)) + return encode_data(request.app["objstorage"].__contains__(**req)) @timed async def add_bytes(request): req = await decode_request(request) - statsd.increment('swh_objstorage_in_bytes_total', - len(req['content']), - tags={'endpoint': 'add_bytes'}) - return encode_data(request.app['objstorage'].add(**req)) + statsd.increment( + "swh_objstorage_in_bytes_total", + len(req["content"]), + tags={"endpoint": "add_bytes"}, + ) + return encode_data(request.app["objstorage"].add(**req)) @timed async def add_batch(request): req = await decode_request(request) - return encode_data(request.app['objstorage'].add_batch(**req)) + return encode_data(request.app["objstorage"].add_batch(**req)) @timed async def get_bytes(request): req = await decode_request(request) - ret = request.app['objstorage'].get(**req) + ret = request.app["objstorage"].get(**req) - statsd.increment('swh_objstorage_out_bytes_total', - len(ret), - tags={'endpoint': 'get_bytes'}) + statsd.increment( + "swh_objstorage_out_bytes_total", len(ret), tags={"endpoint": "get_bytes"} + ) return encode_data(ret) @timed async def get_batch(request): req = await decode_request(request) - return encode_data(request.app['objstorage'].get_batch(**req)) + return encode_data(request.app["objstorage"].get_batch(**req)) @timed async def check(request): req = await decode_request(request) - return encode_data(request.app['objstorage'].check(**req)) + return encode_data(request.app["objstorage"].check(**req)) @timed async def delete(request): req = await decode_request(request) - return encode_data(request.app['objstorage'].delete(**req)) + return encode_data(request.app["objstorage"].delete(**req)) # Management methods + @timed async def get_random_contents(request): req = await decode_request(request) - return encode_data(request.app['objstorage'].get_random(**req)) + return encode_data(request.app["objstorage"].get_random(**req)) # Streaming methods + @timed async def add_stream(request): - hex_id = request.match_info['hex_id'] + hex_id = request.match_info["hex_id"] obj_id = hashutil.hash_to_bytes(hex_id) - check_pres = (request.query.get('check_presence', '').lower() == 'true') - objstorage = request.app['objstorage'] + check_pres = request.query.get("check_presence", "").lower() == "true" + objstorage = request.app["objstorage"] if check_pres and obj_id in objstorage: return encode_data(obj_id) # XXX this really should go in a decode_stream_request coroutine in # swh.core, but since py35 does not support async generators, it cannot # easily be made for now - content_type = request.headers.get('Content-Type') - if content_type == 'application/x-msgpack': + content_type = request.headers.get("Content-Type") + if content_type == "application/x-msgpack": decode = msgpack_loads - elif content_type == 'application/json': + elif content_type == "application/json": decode = lambda x: json.loads(x, cls=SWHJSONDecoder) # noqa else: - raise ValueError('Wrong content type `%s` for API request' - % content_type) + raise ValueError("Wrong content type `%s` for API request" % content_type) - buffer = b'' + buffer = b"" with objstorage.chunk_writer(obj_id) as write: while not request.content.at_eof(): data, eot = await request.content.readchunk() buffer += data if eot: write(decode(buffer)) - buffer = b'' + buffer = b"" return encode_data(obj_id) @timed async def get_stream(request): - hex_id = request.match_info['hex_id'] + hex_id = request.match_info["hex_id"] obj_id = hashutil.hash_to_bytes(hex_id) response = aiohttp.web.StreamResponse() await response.prepare(request) - for chunk in request.app['objstorage'].get_stream(obj_id, 2 << 20): + for chunk in request.app["objstorage"].get_stream(obj_id, 2 << 20): await response.write(chunk) await response.write_eof() return response @timed async def list_content(request): - last_obj_id = request.query.get('last_obj_id') + last_obj_id = request.query.get("last_obj_id") if last_obj_id: last_obj_id = bytes.fromhex(last_obj_id) - limit = int(request.query.get('limit', DEFAULT_LIMIT)) + limit = int(request.query.get("limit", DEFAULT_LIMIT)) response = aiohttp.web.StreamResponse() response.enable_chunked_encoding() await response.prepare(request) - for obj_id in request.app['objstorage'].list_content( - last_obj_id, limit=limit): + for obj_id in request.app["objstorage"].list_content(last_obj_id, limit=limit): await response.write(obj_id) await response.write_eof() return response def make_app(config): """Initialize the remote api application. """ - client_max_size = config.get('client_max_size', 1024 * 1024 * 1024) + client_max_size = config.get("client_max_size", 1024 * 1024 * 1024) app = RPCServerApp(client_max_size=client_max_size) app.client_exception_classes = (ObjNotFoundError, Error) # retro compatibility configuration settings - app['config'] = config - _cfg = config['objstorage'] - app['objstorage'] = get_objstorage(_cfg['cls'], _cfg['args']) - - app.router.add_route('GET', '/', index) - app.router.add_route('POST', '/check_config', check_config) - app.router.add_route('POST', '/content/contains', contains) - app.router.add_route('POST', '/content/add', add_bytes) - app.router.add_route('POST', '/content/add/batch', add_batch) - app.router.add_route('POST', '/content/get', get_bytes) - app.router.add_route('POST', '/content/get/batch', get_batch) - app.router.add_route('POST', '/content/get/random', get_random_contents) - app.router.add_route('POST', '/content/check', check) - app.router.add_route('POST', '/content/delete', delete) - app.router.add_route('GET', '/content', list_content) - app.router.add_route('POST', '/content/add_stream/{hex_id}', add_stream) - app.router.add_route('GET', '/content/get_stream/{hex_id}', get_stream) + app["config"] = config + _cfg = config["objstorage"] + app["objstorage"] = get_objstorage(_cfg["cls"], _cfg["args"]) + + app.router.add_route("GET", "/", index) + app.router.add_route("POST", "/check_config", check_config) + app.router.add_route("POST", "/content/contains", contains) + app.router.add_route("POST", "/content/add", add_bytes) + app.router.add_route("POST", "/content/add/batch", add_batch) + app.router.add_route("POST", "/content/get", get_bytes) + app.router.add_route("POST", "/content/get/batch", get_batch) + app.router.add_route("POST", "/content/get/random", get_random_contents) + app.router.add_route("POST", "/content/check", check) + app.router.add_route("POST", "/content/delete", delete) + app.router.add_route("GET", "/content", list_content) + app.router.add_route("POST", "/content/add_stream/{hex_id}", add_stream) + app.router.add_route("GET", "/content/get_stream/{hex_id}", get_stream) return app def load_and_check_config(config_file): """Check the minimal configuration is set to run the api or raise an error explanation. Args: config_file (str): Path to the configuration file to load type (str): configuration type. For 'local' type, more checks are done. Raises: Error if the setup is not as expected Returns: configuration as a dict """ if not config_file: - raise EnvironmentError('Configuration file must be defined') + raise EnvironmentError("Configuration file must be defined") if not os.path.exists(config_file): - raise FileNotFoundError('Configuration file %s does not exist' % ( - config_file, )) + raise FileNotFoundError("Configuration file %s does not exist" % (config_file,)) cfg = config_read(config_file) - if 'objstorage' not in cfg: - raise KeyError( - "Invalid configuration; missing objstorage config entry") + if "objstorage" not in cfg: + raise KeyError("Invalid configuration; missing objstorage config entry") missing_keys = [] - vcfg = cfg['objstorage'] - for key in ('cls', 'args'): + vcfg = cfg["objstorage"] + for key in ("cls", "args"): v = vcfg.get(key) if v is None: missing_keys.append(key) if missing_keys: raise KeyError( - "Invalid configuration; missing %s config entry" % ( - ', '.join(missing_keys), )) - - cls = vcfg.get('cls') - if cls == 'pathslicing': - args = vcfg['args'] - for key in ('root', 'slicing'): + "Invalid configuration; missing %s config entry" + % (", ".join(missing_keys),) + ) + + cls = vcfg.get("cls") + if cls == "pathslicing": + args = vcfg["args"] + for key in ("root", "slicing"): v = args.get(key) if v is None: missing_keys.append(key) if missing_keys: raise KeyError( - "Invalid configuration; missing args.%s config entry" % ( - ', '.join(missing_keys), )) + "Invalid configuration; missing args.%s config entry" + % (", ".join(missing_keys),) + ) return cfg def make_app_from_configfile(): """Load configuration and then build application to run """ - config_file = os.environ.get('SWH_CONFIG_FILENAME') + config_file = os.environ.get("SWH_CONFIG_FILENAME") config = load_and_check_config(config_file) return make_app(config=config) -if __name__ == '__main__': - print('Deprecated. Use swh-objstorage') +if __name__ == "__main__": + print("Deprecated. Use swh-objstorage") diff --git a/swh/objstorage/backends/azure.py b/swh/objstorage/backends/azure.py index 227e859..6ae16c7 100644 --- a/swh/objstorage/backends/azure.py +++ b/swh/objstorage/backends/azure.py @@ -1,242 +1,248 @@ # Copyright (C) 2016-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging import string from itertools import dropwhile, islice, product from azure.storage.blob import BlockBlobService from azure.common import AzureMissingResourceHttpError import requests -from swh.objstorage.objstorage import (ObjStorage, compute_hash, DEFAULT_LIMIT, - compressors, decompressors) +from swh.objstorage.objstorage import ( + ObjStorage, + compute_hash, + DEFAULT_LIMIT, + compressors, + decompressors, +) from swh.objstorage.exc import ObjNotFoundError, Error from swh.model import hashutil logging.getLogger("azure.storage").setLevel(logging.CRITICAL) class AzureCloudObjStorage(ObjStorage): """ObjStorage with azure abilities. """ - def __init__(self, account_name, api_secret_key, container_name, - compression='gzip', **kwargs): + + def __init__( + self, account_name, api_secret_key, container_name, compression="gzip", **kwargs + ): super().__init__(**kwargs) self.block_blob_service = BlockBlobService( account_name=account_name, account_key=api_secret_key, request_session=requests.Session(), ) self.container_name = container_name self.compression = compression def get_blob_service(self, hex_obj_id): """Get the block_blob_service and container that contains the object with internal id hex_obj_id """ return self.block_blob_service, self.container_name def get_all_blob_services(self): """Get all active block_blob_services""" yield self.block_blob_service, self.container_name def _internal_id(self, obj_id): """Internal id is the hex version in objstorage. """ return hashutil.hash_to_hex(obj_id) def check_config(self, *, check_write): """Check the configuration for this object storage""" for service, container in self.get_all_blob_services(): props = service.get_container_properties(container) # FIXME: check_write is ignored here if not props: return False return True def __contains__(self, obj_id): """Does the storage contains the obj_id. """ hex_obj_id = self._internal_id(obj_id) service, container = self.get_blob_service(hex_obj_id) - return service.exists( - container_name=container, - blob_name=hex_obj_id) + return service.exists(container_name=container, blob_name=hex_obj_id) def __iter__(self): """Iterate over the objects present in the storage. """ for service, container in self.get_all_blob_services(): for obj in service.list_blobs(container): yield hashutil.hash_to_bytes(obj.name) def __len__(self): """Compute the number of objects in the current object storage. Returns: number of objects contained in the storage. """ return sum(1 for i in self) def add(self, content, obj_id=None, check_presence=True): """Add an obj in storage if it's not there already. """ if obj_id is None: # Checksum is missing, compute it on the fly. obj_id = compute_hash(content) if check_presence and obj_id in self: return obj_id hex_obj_id = self._internal_id(obj_id) # Send the compressed content compressor = compressors[self.compression]() blob = [compressor.compress(content), compressor.flush()] service, container = self.get_blob_service(hex_obj_id) service.create_blob_from_bytes( - container_name=container, - blob_name=hex_obj_id, - blob=b''.join(blob), + container_name=container, blob_name=hex_obj_id, blob=b"".join(blob), ) return obj_id def restore(self, content, obj_id=None): """Restore a content. """ return self.add(content, obj_id, check_presence=False) def get(self, obj_id): """Retrieve blob's content if found. """ hex_obj_id = self._internal_id(obj_id) service, container = self.get_blob_service(hex_obj_id) try: blob = service.get_blob_to_bytes( - container_name=container, - blob_name=hex_obj_id) + container_name=container, blob_name=hex_obj_id + ) except AzureMissingResourceHttpError: raise ObjNotFoundError(obj_id) decompressor = decompressors[self.compression]() ret = decompressor.decompress(blob.content) if decompressor.unused_data: - raise Error('Corrupt object %s: trailing data found' % hex_obj_id) + raise Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret def check(self, obj_id): """Check the content integrity. """ obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id): """Delete an object.""" super().delete(obj_id) # Check delete permission hex_obj_id = self._internal_id(obj_id) service, container = self.get_blob_service(hex_obj_id) try: - service.delete_blob( - container_name=container, - blob_name=hex_obj_id) + service.delete_blob(container_name=container, blob_name=hex_obj_id) except AzureMissingResourceHttpError: - raise ObjNotFoundError('Content {} not found!'.format(hex_obj_id)) + raise ObjNotFoundError("Content {} not found!".format(hex_obj_id)) return True def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): all_blob_services = self.get_all_blob_services() if last_obj_id: last_obj_id = self._internal_id(last_obj_id) last_service, _ = self.get_blob_service(last_obj_id) all_blob_services = dropwhile( - lambda srv: srv[0] != last_service, all_blob_services) + lambda srv: srv[0] != last_service, all_blob_services + ) else: last_service = None def iterate_blobs(): for service, container in all_blob_services: marker = last_obj_id if service == last_service else None for obj in service.list_blobs( - container, marker=marker, maxresults=limit): + container, marker=marker, maxresults=limit + ): yield hashutil.hash_to_bytes(obj.name) + return islice(iterate_blobs(), limit) class PrefixedAzureCloudObjStorage(AzureCloudObjStorage): """ObjStorage with azure capabilities, striped by prefix. accounts is a dict containing entries of the form: : account_name: api_secret_key: container_name: """ - def __init__(self, accounts, compression='gzip', **kwargs): + + def __init__(self, accounts, compression="gzip", **kwargs): # shortcut AzureCloudObjStorage __init__ ObjStorage.__init__(self, **kwargs) self.compression = compression # Definition sanity check prefix_lengths = set(len(prefix) for prefix in accounts) if not len(prefix_lengths) == 1: - raise ValueError("Inconsistent prefixes, found lengths %s" - % ', '.join( - str(l) for l in sorted(prefix_lengths) - )) + raise ValueError( + "Inconsistent prefixes, found lengths %s" + % ", ".join(str(l) for l in sorted(prefix_lengths)) + ) self.prefix_len = prefix_lengths.pop() expected_prefixes = set( - ''.join(letters) + "".join(letters) for letters in product( - set(string.hexdigits.lower()), repeat=self.prefix_len + set(string.hexdigits.lower()), repeat=self.prefix_len ) ) missing_prefixes = expected_prefixes - set(accounts) if missing_prefixes: - raise ValueError("Missing prefixes %s" - % ', '.join(sorted(missing_prefixes))) + raise ValueError( + "Missing prefixes %s" % ", ".join(sorted(missing_prefixes)) + ) self.prefixes = {} request_session = requests.Session() for prefix, account in accounts.items(): self.prefixes[prefix] = ( BlockBlobService( - account_name=account['account_name'], - account_key=account['api_secret_key'], + account_name=account["account_name"], + account_key=account["api_secret_key"], request_session=request_session, ), - account['container_name'], + account["container_name"], ) def get_blob_service(self, hex_obj_id): """Get the block_blob_service and container that contains the object with internal id hex_obj_id """ - return self.prefixes[hex_obj_id[:self.prefix_len]] + return self.prefixes[hex_obj_id[: self.prefix_len]] def get_all_blob_services(self): """Get all active block_blob_services""" # iterate on items() to sort blob services; # needed to be able to paginate in the list_content() method yield from (v for _, v in sorted(self.prefixes.items())) diff --git a/swh/objstorage/backends/generator.py b/swh/objstorage/backends/generator.py index 08b21da..be4a809 100644 --- a/swh/objstorage/backends/generator.py +++ b/swh/objstorage/backends/generator.py @@ -1,135 +1,227 @@ from itertools import count, islice, repeat import random import io import functools import logging -from swh.objstorage.objstorage import ( - ObjStorage, DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT) +from swh.objstorage.objstorage import ObjStorage, DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT logger = logging.getLogger(__name__) class Randomizer: def __init__(self): self.size = 0 self.read(1024) # create a not-so-small initial buffer def read(self, size): if size > self.size: - with open('/dev/urandom', 'rb') as fobj: - self.data = fobj.read(2*size) + with open("/dev/urandom", "rb") as fobj: + self.data = fobj.read(2 * size) self.size = len(self.data) # pick a random subset of our existing buffer idx = random.randint(0, self.size - size - 1) - return self.data[idx:idx+size] + return self.data[idx : idx + size] def gen_sizes(): - '''generates numbers according to the rought distribution of file size in the + """generates numbers according to the rought distribution of file size in the SWH archive - ''' + """ # these are the histogram bounds of the pg content.length column - bounds = [0, 2, 72, 119, 165, 208, 256, 300, 345, 383, 429, 474, 521, 572, - 618, 676, 726, 779, 830, 879, 931, 992, 1054, 1119, 1183, 1244, - 1302, 1370, 1437, 1504, 1576, 1652, 1725, 1806, 1883, 1968, 2045, - 2133, 2236, 2338, 2433, 2552, 2659, 2774, 2905, 3049, 3190, 3322, - 3489, 3667, 3834, 4013, 4217, 4361, 4562, 4779, 5008, 5233, 5502, - 5788, 6088, 6396, 6728, 7094, 7457, 7835, 8244, 8758, 9233, 9757, - 10313, 10981, 11693, 12391, 13237, 14048, 14932, 15846, 16842, - 18051, 19487, 20949, 22595, 24337, 26590, 28840, 31604, 34653, - 37982, 41964, 46260, 51808, 58561, 66584, 78645, 95743, 122883, - 167016, 236108, 421057, 1047367, 55056238] + bounds = [ + 0, + 2, + 72, + 119, + 165, + 208, + 256, + 300, + 345, + 383, + 429, + 474, + 521, + 572, + 618, + 676, + 726, + 779, + 830, + 879, + 931, + 992, + 1054, + 1119, + 1183, + 1244, + 1302, + 1370, + 1437, + 1504, + 1576, + 1652, + 1725, + 1806, + 1883, + 1968, + 2045, + 2133, + 2236, + 2338, + 2433, + 2552, + 2659, + 2774, + 2905, + 3049, + 3190, + 3322, + 3489, + 3667, + 3834, + 4013, + 4217, + 4361, + 4562, + 4779, + 5008, + 5233, + 5502, + 5788, + 6088, + 6396, + 6728, + 7094, + 7457, + 7835, + 8244, + 8758, + 9233, + 9757, + 10313, + 10981, + 11693, + 12391, + 13237, + 14048, + 14932, + 15846, + 16842, + 18051, + 19487, + 20949, + 22595, + 24337, + 26590, + 28840, + 31604, + 34653, + 37982, + 41964, + 46260, + 51808, + 58561, + 66584, + 78645, + 95743, + 122883, + 167016, + 236108, + 421057, + 1047367, + 55056238, + ] nbounds = len(bounds) for i in count(): - idx = random.randint(1, nbounds-1) - lower = bounds[idx-1] + idx = random.randint(1, nbounds - 1) + lower = bounds[idx - 1] upper = bounds[idx] - yield random.randint(lower, upper-1) + yield random.randint(lower, upper - 1) def gen_random_content(total=None, filesize=None): - '''generates random (file) content which sizes roughly follows the SWH + """generates random (file) content which sizes roughly follows the SWH archive file size distribution (by default). Args: total (int): the total number of objects to generate. Infinite if unset. filesize (int): generate objects with fixed size instead of random ones. - ''' + """ randomizer = Randomizer() if filesize: gen = repeat(filesize) else: gen = gen_sizes() if total: gen = islice(gen, total) for objsize in gen: yield randomizer.read(objsize) class RandomGeneratorObjStorage(ObjStorage): - '''A stupid read-only storage that generates blobs for testing purpose. - ''' + """A stupid read-only storage that generates blobs for testing purpose. + """ def __init__(self, filesize=None, total=None, **kwargs): super().__init__() if filesize: filesize = int(filesize) self.filesize = filesize if total: total = int(total) self.total = total self._content_generator = None @property def content_generator(self): if self._content_generator is None: - self._content_generator = gen_random_content( - self.total, self.filesize) + self._content_generator = gen_random_content(self.total, self.filesize) return self._content_generator def check_config(self, *, check_write): return True def __contains__(self, obj_id, *args, **kwargs): return False def __iter__(self): i = 1 while True: - j = yield (b'%d' % i) + j = yield (b"%d" % i) if self.total and i >= self.total: - logger.debug('DONE') + logger.debug("DONE") break if j is not None: i = j else: i += 1 def get(self, obj_id, *args, **kwargs): return next(self.content_generator) def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): pass def check(self, obj_id, *args, **kwargs): return True def delete(self, obj_id, *args, **kwargs): return True def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): data = io.BytesIO(next(self.content_generator)) reader = functools.partial(data.read, chunk_size) - yield from iter(reader, b'') + yield from iter(reader, b"") def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): it = iter(self) if last_obj_id: next(it) it.send(int(last_obj_id)) return islice(it, limit) diff --git a/swh/objstorage/backends/in_memory.py b/swh/objstorage/backends/in_memory.py index ae0e7cd..d1c27eb 100644 --- a/swh/objstorage/backends/in_memory.py +++ b/swh/objstorage/backends/in_memory.py @@ -1,72 +1,71 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import functools import io from swh.objstorage.exc import ObjNotFoundError, Error -from swh.objstorage.objstorage import ObjStorage, compute_hash, \ - DEFAULT_CHUNK_SIZE +from swh.objstorage.objstorage import ObjStorage, compute_hash, DEFAULT_CHUNK_SIZE class InMemoryObjStorage(ObjStorage): """In-Memory objstorage. Intended for test purposes. """ def __init__(self, **args): super().__init__() self.state = {} def check_config(self, *, check_write): return True def __contains__(self, obj_id, *args, **kwargs): return obj_id in self.state def __iter__(self): return iter(sorted(self.state)) def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): if obj_id is None: obj_id = compute_hash(content) if check_presence and obj_id in self: return obj_id self.state[obj_id] = content return obj_id def get(self, obj_id, *args, **kwargs): if obj_id not in self: raise ObjNotFoundError(obj_id) return self.state[obj_id] def check(self, obj_id, *args, **kwargs): if obj_id not in self: raise ObjNotFoundError(obj_id) if compute_hash(self.state[obj_id]) != obj_id: - raise Error('Corrupt object %s' % obj_id) + raise Error("Corrupt object %s" % obj_id) return True def delete(self, obj_id, *args, **kwargs): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) self.state.pop(obj_id) return True def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): if obj_id not in self: raise ObjNotFoundError(obj_id) data = io.BytesIO(self.state[obj_id]) reader = functools.partial(data.read, chunk_size) - yield from iter(reader, b'') + yield from iter(reader, b"") diff --git a/swh/objstorage/backends/libcloud.py b/swh/objstorage/backends/libcloud.py index d3dd661..f8de903 100644 --- a/swh/objstorage/backends/libcloud.py +++ b/swh/objstorage/backends/libcloud.py @@ -1,249 +1,254 @@ # Copyright (C) 2016-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import collections from typing import Optional from urllib.parse import urlencode from swh.model import hashutil from swh.objstorage.objstorage import ObjStorage, compute_hash from swh.objstorage.objstorage import compressors, decompressors from swh.objstorage.exc import ObjNotFoundError, Error from libcloud.storage import providers import libcloud.storage.drivers.s3 from libcloud.storage.types import Provider, ObjectDoesNotExistError def patch_libcloud_s3_urlencode(): """Patches libcloud's S3 backend to properly sign queries. Recent versions of libcloud are not affected (they use signature V4), but 1.5.0 (the one in Debian 9) is.""" def s3_urlencode(params): """Like urllib.parse.urlencode, but sorts the parameters first. This is required to properly compute the request signature, see https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#ConstructingTheCanonicalizedResourceElement """ # noqa return urlencode(collections.OrderedDict(sorted(params.items()))) libcloud.storage.drivers.s3.urlencode = s3_urlencode patch_libcloud_s3_urlencode() class CloudObjStorage(ObjStorage, metaclass=abc.ABCMeta): """Abstract ObjStorage that connect to a cloud using Libcloud Implementations of this class must redefine the _get_provider method to make it return a driver provider (i.e. object that supports `get_driver` method) which return a LibCloud driver (see https://libcloud.readthedocs.io/en/latest/storage/api.html). Args: container_name: Name of the base container path_prefix: prefix to prepend to object paths in the container, separated with a slash compression: compression algorithm to use for objects kwargs: extra arguments are passed through to the LibCloud driver """ - def __init__(self, - container_name: str, - compression: Optional[str] = None, - path_prefix: Optional[str] = None, - **kwargs): + + def __init__( + self, + container_name: str, + compression: Optional[str] = None, + path_prefix: Optional[str] = None, + **kwargs + ): super().__init__(**kwargs) self.driver = self._get_driver(**kwargs) self.container_name = container_name - self.container = self.driver.get_container( - container_name=container_name) + self.container = self.driver.get_container(container_name=container_name) self.compression = compression self.path_prefix = None if path_prefix: - self.path_prefix = path_prefix.rstrip('/') + '/' + self.path_prefix = path_prefix.rstrip("/") + "/" def _get_driver(self, **kwargs): """Initialize a driver to communicate with the cloud Kwargs: arguments passed to the StorageDriver class, typically key: key to connect to the API. secret: secret key for authentication. secure: (bool) support HTTPS host: (str) port: (int) api_version: (str) region: (str) Returns: a Libcloud driver to a cloud storage. """ # Get the driver class from its description. cls = providers.get_driver(self._get_provider()) # Initialize the driver. return cls(**kwargs) @abc.abstractmethod def _get_provider(self): """Get a libcloud driver provider This method must be overridden by subclasses to specify which of the native libcloud driver the current storage should connect to. Alternatively, provider for a custom driver may be returned, in which case the provider will have to support `get_driver` method. """ - raise NotImplementedError('%s must implement `get_provider` method' - % type(self)) + raise NotImplementedError( + "%s must implement `get_provider` method" % type(self) + ) def check_config(self, *, check_write): """Check the configuration for this object storage""" # FIXME: hopefully this blew up during instantiation return True def __contains__(self, obj_id): try: self._get_object(obj_id) except ObjNotFoundError: return False else: return True def __iter__(self): """ Iterate over the objects present in the storage Warning: Iteration over the contents of a cloud-based object storage may have bad efficiency: due to the very high amount of objects in it and the fact that it is remote, get all the contents of the current object storage may result in a lot of network requests. You almost certainly don't want to use this method in production. """ for obj in self.driver.iterate_container_objects(self.container): name = obj.name if self.path_prefix and not name.startswith(self.path_prefix): continue if self.path_prefix: - name = name[len(self.path_prefix):] + name = name[len(self.path_prefix) :] yield hashutil.hash_to_bytes(name) def __len__(self): """Compute the number of objects in the current object storage. Warning: this currently uses `__iter__`, its warning about bad performance applies. Returns: number of objects contained in the storage. """ return sum(1 for i in self) def add(self, content, obj_id=None, check_presence=True): if obj_id is None: # Checksum is missing, compute it on the fly. obj_id = compute_hash(content) if check_presence and obj_id in self: return obj_id self._put_object(content, obj_id) return obj_id def restore(self, content, obj_id=None): return self.add(content, obj_id, check_presence=False) def get(self, obj_id): - obj = b''.join(self._get_object(obj_id).as_stream()) + obj = b"".join(self._get_object(obj_id).as_stream()) d = decompressors[self.compression]() ret = d.decompress(obj) if d.unused_data: hex_obj_id = hashutil.hash_to_hex(obj_id) - raise Error('Corrupt object %s: trailing data found' % hex_obj_id) + raise Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret def check(self, obj_id): # Check that the file exists, as _get_object raises ObjNotFoundError self._get_object(obj_id) # Check the content integrity obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission obj = self._get_object(obj_id) return self.driver.delete_object(obj) def _object_path(self, obj_id): """Get the full path to an object""" hex_obj_id = hashutil.hash_to_hex(obj_id) if self.path_prefix: return self.path_prefix + hex_obj_id else: return hex_obj_id def _get_object(self, obj_id): """Get a Libcloud wrapper for an object pointer. This wrapper does not retrieve the content of the object directly. """ object_path = self._object_path(obj_id) try: return self.driver.get_object(self.container_name, object_path) except ObjectDoesNotExistError: raise ObjNotFoundError(obj_id) def _compressor(self, data): comp = compressors[self.compression]() for chunk in data: cchunk = comp.compress(chunk) if cchunk: yield cchunk trail = comp.flush() if trail: yield trail def _put_object(self, content, obj_id): """Create an object in the cloud storage. Created object will contain the content and be referenced by the given id. """ object_path = self._object_path(obj_id) if not isinstance(content, collections.Iterator): content = (content,) self.driver.upload_object_via_stream( - self._compressor(content), - self.container, object_path) + self._compressor(content), self.container, object_path + ) class AwsCloudObjStorage(CloudObjStorage): """ Amazon's S3 Cloud-based object storage """ + def _get_provider(self): return Provider.S3 class OpenStackCloudObjStorage(CloudObjStorage): """ OpenStack Swift Cloud based object storage """ + def _get_provider(self): return Provider.OPENSTACK_SWIFT diff --git a/swh/objstorage/backends/pathslicing.py b/swh/objstorage/backends/pathslicing.py index eb04dfc..01dec4e 100644 --- a/swh/objstorage/backends/pathslicing.py +++ b/swh/objstorage/backends/pathslicing.py @@ -1,389 +1,397 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import tempfile import random import collections from itertools import islice from contextlib import contextmanager from swh.model import hashutil from swh.objstorage.objstorage import ( - compressors, decompressors, - ObjStorage, compute_hash, ID_HASH_ALGO, - ID_HASH_LENGTH, DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT) + compressors, + decompressors, + ObjStorage, + compute_hash, + ID_HASH_ALGO, + ID_HASH_LENGTH, + DEFAULT_CHUNK_SIZE, + DEFAULT_LIMIT, +) from swh.objstorage.exc import ObjNotFoundError, Error BUFSIZ = 1048576 DIR_MODE = 0o755 FILE_MODE = 0o644 @contextmanager def _write_obj_file(hex_obj_id, objstorage): """ Context manager for writing object files to the object storage. During writing, data are written to a temporary file, which is atomically renamed to the right file name after closing. Usage sample: with _write_obj_file(hex_obj_id, objstorage): f.write(obj_data) Yields: a file-like object open for writing bytes. """ # Get the final paths and create the directory if absent. dir = objstorage._obj_dir(hex_obj_id) if not os.path.isdir(dir): os.makedirs(dir, DIR_MODE, exist_ok=True) path = os.path.join(dir, hex_obj_id) # Create a temporary file. - (tmp, tmp_path) = tempfile.mkstemp(suffix='.tmp', prefix='hex_obj_id.', - dir=dir) + (tmp, tmp_path) = tempfile.mkstemp(suffix=".tmp", prefix="hex_obj_id.", dir=dir) # Open the file and yield it for writing. - tmp_f = os.fdopen(tmp, 'wb') + tmp_f = os.fdopen(tmp, "wb") yield tmp_f # Make sure the contents of the temporary file are written to disk tmp_f.flush() if objstorage.use_fdatasync: os.fdatasync(tmp) else: os.fsync(tmp) # Then close the temporary file and move it to the right path. tmp_f.close() os.chmod(tmp_path, FILE_MODE) os.rename(tmp_path, path) def _read_obj_file(hex_obj_id, objstorage): """ Context manager for reading object file in the object storage. Usage sample: with _read_obj_file(hex_obj_id, objstorage) as f: b = f.read() Yields: a file-like object open for reading bytes. """ path = objstorage._obj_path(hex_obj_id) - return open(path, 'rb') + return open(path, "rb") class PathSlicingObjStorage(ObjStorage): """Implementation of the ObjStorage API based on the hash of the content. On disk, an object storage is a directory tree containing files named after their object IDs. An object ID is a checksum of its content, depending on the value of the ID_HASH_ALGO constant (see swh.model.hashutil for its meaning). To avoid directories that contain too many files, the object storage has a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. So for instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will be stored in the given object storages : - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 The files in the storage are stored in gzipped compressed format. Attributes: root (string): path to the root directory of the storage on the disk. bounds: list of tuples that indicates the beginning and the end of each subdirectory for a content. """ - def __init__(self, root, slicing, compression='gzip', **kwargs): + def __init__(self, root, slicing, compression="gzip", **kwargs): """ Create an object to access a hash-slicing based object storage. Args: root (string): path to the root directory of the storage on the disk. slicing (string): string that indicates the slicing to perform on the hash of the content to know the path where it should be stored. """ super().__init__(**kwargs) self.root = root # Make a list of tuples where each tuple contains the beginning # and the end of each slicing. self.bounds = [ - slice(*map(int, sbounds.split(':'))) - for sbounds in slicing.split('/') + slice(*map(int, sbounds.split(":"))) + for sbounds in slicing.split("/") if sbounds ] - self.use_fdatasync = hasattr(os, 'fdatasync') + self.use_fdatasync = hasattr(os, "fdatasync") self.compression = compression self.check_config(check_write=False) def check_config(self, *, check_write): """Check whether this object storage is properly configured""" root = self.root if not os.path.isdir(root): raise ValueError( 'PathSlicingObjStorage root "%s" is not a directory' % root ) max_endchar = max(map(lambda bound: bound.stop, self.bounds)) if ID_HASH_LENGTH < max_endchar: raise ValueError( - 'Algorithm %s has too short hash for slicing to char %d' + "Algorithm %s has too short hash for slicing to char %d" % (ID_HASH_ALGO, max_endchar) ) if check_write: if not os.access(self.root, os.W_OK): raise PermissionError( 'PathSlicingObjStorage root "%s" is not writable' % root ) if self.compression not in compressors: - raise ValueError('Unknown compression algorithm "%s" for ' - 'PathSlicingObjStorage' % self.compression) + raise ValueError( + 'Unknown compression algorithm "%s" for ' + "PathSlicingObjStorage" % self.compression + ) return True def __contains__(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return os.path.isfile(self._obj_path(hex_obj_id)) def __iter__(self): """Iterate over the object identifiers currently available in the storage. Warning: with the current implementation of the object storage, this method will walk the filesystem to list objects, meaning that listing all objects will be very slow for large storages. You almost certainly don't want to use this method in production. Return: Iterator over object IDs """ + def obj_iterator(): # XXX hackish: it does not verify that the depth of found files # matches the slicing depth of the storage for root, _dirs, files in os.walk(self.root): _dirs.sort() for f in sorted(files): yield bytes.fromhex(f) return obj_iterator() def __len__(self): """Compute the number of objects available in the storage. Warning: this currently uses `__iter__`, its warning about bad performances applies Return: number of objects contained in the storage """ return sum(1 for i in self) def _obj_dir(self, hex_obj_id): """ Compute the storage directory of an object. See also: PathSlicingObjStorage::_obj_path Args: hex_obj_id: object id as hexlified string. Returns: Path to the directory that contains the required object. """ slices = [hex_obj_id[bound] for bound in self.bounds] return os.path.join(self.root, *slices) def _obj_path(self, hex_obj_id): """ Compute the full path to an object into the current storage. See also: PathSlicingObjStorage::_obj_dir Args: hex_obj_id: object id as hexlified string. Returns: Path to the actual object corresponding to the given id. """ return os.path.join(self._obj_dir(hex_obj_id), hex_obj_id) def add(self, content, obj_id=None, check_presence=True): if obj_id is None: obj_id = compute_hash(content) if check_presence and obj_id in self: # If the object is already present, return immediately. return obj_id hex_obj_id = hashutil.hash_to_hex(obj_id) if not isinstance(content, collections.Iterator): content = [content] compressor = compressors[self.compression]() with _write_obj_file(hex_obj_id, self) as f: for chunk in content: f.write(compressor.compress(chunk)) f.write(compressor.flush()) return obj_id def get(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) # Open the file and return its content as bytes hex_obj_id = hashutil.hash_to_hex(obj_id) d = decompressors[self.compression]() with _read_obj_file(hex_obj_id, self) as f: out = d.decompress(f.read()) if d.unused_data: - raise Error('Corrupt object %s: trailing data found' % hex_obj_id,) + raise Error("Corrupt object %s: trailing data found" % hex_obj_id,) return out def check(self, obj_id): try: data = self.get(obj_id) except OSError: hex_obj_id = hashutil.hash_to_hex(obj_id) - raise Error( - 'Corrupt object %s: not a proper compressed file' % hex_obj_id, - ) + raise Error("Corrupt object %s: not a proper compressed file" % hex_obj_id,) checksums = hashutil.MultiHash.from_data( - data, hash_names=[ID_HASH_ALGO]).digest() + data, hash_names=[ID_HASH_ALGO] + ).digest() actual_obj_id = checksums[ID_HASH_ALGO] hex_obj_id = hashutil.hash_to_hex(obj_id) if hex_obj_id != hashutil.hash_to_hex(actual_obj_id): raise Error( - 'Corrupt object %s should have id %s' - % (hashutil.hash_to_hex(obj_id), - hashutil.hash_to_hex(actual_obj_id)) + "Corrupt object %s should have id %s" + % (hashutil.hash_to_hex(obj_id), hashutil.hash_to_hex(actual_obj_id)) ) def delete(self, obj_id): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) try: os.remove(self._obj_path(hex_obj_id)) except FileNotFoundError: raise ObjNotFoundError(obj_id) return True # Management methods def get_random(self, batch_size): def get_random_content(self, batch_size): """ Get a batch of content inside a single directory. Returns: a tuple (batch size, batch). """ dirs = [] for level in range(len(self.bounds)): path = os.path.join(self.root, *dirs) dir_list = next(os.walk(path))[1] - if 'tmp' in dir_list: - dir_list.remove('tmp') + if "tmp" in dir_list: + dir_list.remove("tmp") dirs.append(random.choice(dir_list)) path = os.path.join(self.root, *dirs) content_list = next(os.walk(path))[2] length = min(batch_size, len(content_list)) - return length, map(hashutil.hash_to_bytes, - random.sample(content_list, length)) + return ( + length, + map(hashutil.hash_to_bytes, random.sample(content_list, length)), + ) while batch_size: length, it = get_random_content(self, batch_size) batch_size = batch_size - length yield from it # Streaming methods @contextmanager def chunk_writer(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) compressor = compressors[self.compression]() with _write_obj_file(hex_obj_id, self) as f: yield lambda c: f.write(compressor.compress(c)) f.write(compressor.flush()) def add_stream(self, content_iter, obj_id, check_presence=True): if check_presence and obj_id in self: return obj_id with self.chunk_writer(obj_id) as writer: for chunk in content_iter: writer(chunk) return obj_id def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) decompressor = decompressors[self.compression]() with _read_obj_file(hex_obj_id, self) as f: while True: raw = f.read(chunk_size) if not raw: break r = decompressor.decompress(raw) if not r: continue yield r def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): if last_obj_id: it = self.iter_from(last_obj_id) else: it = iter(self) return islice(it, limit) def iter_from(self, obj_id, n_leaf=False): hex_obj_id = hashutil.hash_to_hex(obj_id) slices = [hex_obj_id[bound] for bound in self.bounds] - rlen = len(self.root.split('/')) + rlen = len(self.root.split("/")) i = 0 for root, dirs, files in os.walk(self.root): if not dirs: i += 1 - level = len(root.split('/')) - rlen + level = len(root.split("/")) - rlen dirs.sort() if dirs and root == os.path.join(self.root, *slices[:level]): cslice = slices[level] for d in dirs[:]: if d < cslice: dirs.remove(d) for f in sorted(files): if f > hex_obj_id: yield bytes.fromhex(f) if n_leaf: yield i diff --git a/swh/objstorage/backends/rados.py b/swh/objstorage/backends/rados.py index 9390287..cffa4ec 100644 --- a/swh/objstorage/backends/rados.py +++ b/swh/objstorage/backends/rados.py @@ -1,91 +1,86 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import rados from swh.model import hashutil from swh.objstorage.exc import ObjNotFoundError from swh.objstorage import objstorage READ_SIZE = 8192 class RADOSObjStorage(objstorage.ObjStorage): """Object storage implemented with RADOS""" - def __init__(self, *, rados_id, pool_name, ceph_config, - allow_delete=False): + def __init__(self, *, rados_id, pool_name, ceph_config, allow_delete=False): super().__init__(allow_delete=allow_delete) self.pool_name = pool_name - self.cluster = rados.Rados( - conf=ceph_config, - conffile='', - rados_id=rados_id, - ) + self.cluster = rados.Rados(conf=ceph_config, conffile="", rados_id=rados_id,) self.cluster.connect() self.__ioctx = None def check_config(self, *, check_write): if self.pool_name not in self.cluster.list_pools(): - raise ValueError('Pool %s does not exist' % self.pool_name) + raise ValueError("Pool %s does not exist" % self.pool_name) @staticmethod def _to_rados_obj_id(obj_id): """Convert to a RADOS object identifier""" return hashutil.hash_to_hex(obj_id) @property def ioctx(self): if not self.__ioctx: self.__ioctx = self.cluster.open_ioctx(self.pool_name) return self.__ioctx def __contains__(self, obj_id): try: self.ioctx.stat(self._to_rados_obj_id(obj_id)) except rados.ObjectNotFound: return False else: return True def add(self, content, obj_id=None, check_presence=True): if not obj_id: - raise ValueError('add needs an obj_id') + raise ValueError("add needs an obj_id") _obj_id = self._to_rados_obj_id(obj_id) if check_presence: try: self.ioctx.stat(_obj_id) except rados.ObjectNotFound: pass else: return obj_id self.ioctx.write_full(_obj_id, content) return obj_id def get(self, obj_id): chunks = [] _obj_id = self._to_rados_obj_id(obj_id) try: length, mtime = self.ioctx.stat(_obj_id) except rados.ObjectNotFound: raise ObjNotFoundError(obj_id) from None offset = 0 while offset < length: chunk = self.ioctx.read(_obj_id, offset, READ_SIZE) chunks.append(chunk) offset += len(chunk) - return b''.join(chunks) + return b"".join(chunks) def check(self, obj_id): return True def delete(self, obj_id): super().delete(obj_id) # check delete permission return True diff --git a/swh/objstorage/backends/seaweed.py b/swh/objstorage/backends/seaweed.py index f5243fc..a70857e 100644 --- a/swh/objstorage/backends/seaweed.py +++ b/swh/objstorage/backends/seaweed.py @@ -1,208 +1,206 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import io import logging from urllib.parse import urljoin, urlparse import requests from swh.model import hashutil from swh.objstorage.objstorage import ObjStorage, compute_hash from swh.objstorage.objstorage import compressors, decompressors from swh.objstorage.objstorage import DEFAULT_LIMIT from swh.objstorage.exc import ObjNotFoundError, Error LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.ERROR) class WeedFiler(object): """Simple class that encapsulates access to a seaweedfs filer service. TODO: handle errors """ def __init__(self, url): self.url = url def get(self, remote_path): url = urljoin(self.url, remote_path) - LOGGER.debug('Get file %s', url) + LOGGER.debug("Get file %s", url) return requests.get(url).content def exists(self, remote_path): url = urljoin(self.url, remote_path) - LOGGER.debug('Check file %s', url) + LOGGER.debug("Check file %s", url) return requests.head(url).status_code == 200 def put(self, fp, remote_path): url = urljoin(self.url, remote_path) - LOGGER.debug('Put file %s', url) - return requests.post(url, files={'file': fp}) + LOGGER.debug("Put file %s", url) + return requests.post(url, files={"file": fp}) def delete(self, remote_path): url = urljoin(self.url, remote_path) - LOGGER.debug('Delete file %s', url) + LOGGER.debug("Delete file %s", url) return requests.delete(url) def list(self, dir, last_file_name=None, limit=DEFAULT_LIMIT): - '''list sub folders and files of @dir. show a better look if you turn on + """list sub folders and files of @dir. show a better look if you turn on returns a dict of "sub-folders and files" - ''' - d = dir if dir.endswith('/') else (dir + '/') + """ + d = dir if dir.endswith("/") else (dir + "/") url = urljoin(self.url, d) - headers = {'Accept': 'application/json'} - params = {'limit': limit} + headers = {"Accept": "application/json"} + params = {"limit": limit} if last_file_name: - params['lastFileName'] = last_file_name + params["lastFileName"] = last_file_name - LOGGER.debug('List directory %s', url) + LOGGER.debug("List directory %s", url) rsp = requests.get(url, params=params, headers=headers) if rsp.ok: return rsp.json() else: - LOGGER.error('Error listing "%s". [HTTP %d]' % ( - url, rsp.status_code)) + LOGGER.error('Error listing "%s". [HTTP %d]' % (url, rsp.status_code)) class WeedObjStorage(ObjStorage): """ObjStorage with seaweedfs abilities, using the Filer API. https://github.com/chrislusf/seaweedfs/wiki/Filer-Server-API """ - def __init__(self, url='http://127.0.0.1:8888/swh', - compression=None, **kwargs): + + def __init__(self, url="http://127.0.0.1:8888/swh", compression=None, **kwargs): super().__init__(**kwargs) self.wf = WeedFiler(url) self.root_path = urlparse(url).path self.compression = compression def check_config(self, *, check_write): """Check the configuration for this object storage""" # FIXME: hopefully this blew up during instantiation return True def __contains__(self, obj_id): return self.wf.exists(self._path(obj_id)) def __iter__(self): """ Iterate over the objects present in the storage Warning: Iteration over the contents of a cloud-based object storage may have bad efficiency: due to the very high amount of objects in it and the fact that it is remote, get all the contents of the current object storage may result in a lot of network requests. You almost certainly don't want to use this method in production. """ obj_id = last_obj_id = None while True: for obj_id in self.list_content(last_obj_id=last_obj_id): yield obj_id if last_obj_id == obj_id: break last_obj_id = obj_id def __len__(self): """Compute the number of objects in the current object storage. Warning: this currently uses `__iter__`, its warning about bad performance applies. Returns: number of objects contained in the storage. """ return sum(1 for i in self) def add(self, content, obj_id=None, check_presence=True): if obj_id is None: # Checksum is missing, compute it on the fly. obj_id = compute_hash(content) if check_presence and obj_id in self: return obj_id def compressor(data): comp = compressors[self.compression]() for chunk in data: yield comp.compress(chunk) yield comp.flush() if isinstance(content, bytes): content = [content] # XXX should handle streaming correctly... - self.wf.put(io.BytesIO(b''.join(compressor(content))), - self._path(obj_id)) + self.wf.put(io.BytesIO(b"".join(compressor(content))), self._path(obj_id)) return obj_id def restore(self, content, obj_id=None): return self.add(content, obj_id, check_presence=False) def get(self, obj_id): try: obj = self.wf.get(self._path(obj_id)) except Exception: raise ObjNotFoundError(obj_id) d = decompressors[self.compression]() ret = d.decompress(obj) if d.unused_data: hex_obj_id = hashutil.hash_to_hex(obj_id) - raise Error('Corrupt object %s: trailing data found' % hex_obj_id) + raise Error("Corrupt object %s: trailing data found" % hex_obj_id) return ret def check(self, obj_id): # Check the content integrity obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) self.wf.delete(self._path(obj_id)) return True def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): if last_obj_id: last_obj_id = hashutil.hash_to_hex(last_obj_id) resp = self.wf.list(self.root_path, last_obj_id, limit) if resp is not None: - entries = resp['Entries'] + entries = resp["Entries"] if entries: for obj in entries: if obj is not None: - bytehex = obj['FullPath'].rsplit('/', 1)[-1] + bytehex = obj["FullPath"].rsplit("/", 1)[-1] yield hashutil.bytehex_to_hash(bytehex.encode()) # internal methods def _put_object(self, content, obj_id): """Create an object in the cloud storage. Created object will contain the content and be referenced by the given id. """ + def compressor(data): comp = compressors[self.compression]() for chunk in data: yield comp.compress(chunk) yield comp.flush() if isinstance(content, bytes): content = [content] - self.wf.put(io.BytesIO(b''.join(compressor(content))), - self._path(obj_id)) + self.wf.put(io.BytesIO(b"".join(compressor(content))), self._path(obj_id)) def _path(self, obj_id): return hashutil.hash_to_hex(obj_id) diff --git a/swh/objstorage/cli.py b/swh/objstorage/cli.py index 53810a2..3aa3514 100644 --- a/swh/objstorage/cli.py +++ b/swh/objstorage/cli.py @@ -1,91 +1,107 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import logging import time import click import aiohttp.web from swh.core.cli import CONTEXT_SETTINGS from swh.objstorage import get_objstorage from swh.objstorage.api.server import load_and_check_config, make_app -@click.group(name='objstorage', context_settings=CONTEXT_SETTINGS) -@click.option('--config-file', '-C', default=None, - type=click.Path(exists=True, dir_okay=False,), - help="Configuration file.") +@click.group(name="objstorage", context_settings=CONTEXT_SETTINGS) +@click.option( + "--config-file", + "-C", + default=None, + type=click.Path(exists=True, dir_okay=False,), + help="Configuration file.", +) @click.pass_context def cli(ctx, config_file): - '''Software Heritage Objstorage tools. - ''' + """Software Heritage Objstorage tools. + """ ctx.ensure_object(dict) cfg = load_and_check_config(config_file) - ctx.obj['config'] = cfg - - -@cli.command('rpc-serve') -@click.option('--host', default='0.0.0.0', - metavar='IP', show_default=True, - help="Host ip address to bind the server on") -@click.option('--port', '-p', default=5003, type=click.INT, - metavar='PORT', show_default=True, - help="Binding port of the server") + ctx.obj["config"] = cfg + + +@cli.command("rpc-serve") +@click.option( + "--host", + default="0.0.0.0", + metavar="IP", + show_default=True, + help="Host ip address to bind the server on", +) +@click.option( + "--port", + "-p", + default=5003, + type=click.INT, + metavar="PORT", + show_default=True, + help="Binding port of the server", +) @click.pass_context def serve(ctx, host, port): - '''Run a standalone objstorage server. + """Run a standalone objstorage server. This is not meant to be run on production systems. - ''' - app = make_app(ctx.obj['config']) - if ctx.obj['log_level'] == 'DEBUG': + """ + app = make_app(ctx.obj["config"]) + if ctx.obj["log_level"] == "DEBUG": app.update(debug=True) aiohttp.web.run_app(app, host=host, port=int(port)) -@cli.command('import') -@click.argument('directory', required=True, nargs=-1) +@cli.command("import") +@click.argument("directory", required=True, nargs=-1) @click.pass_context def import_directories(ctx, directory): - '''Import a local directory in an existing objstorage. - ''' - objstorage = get_objstorage(**ctx.obj['config']['objstorage']) + """Import a local directory in an existing objstorage. + """ + objstorage = get_objstorage(**ctx.obj["config"]["objstorage"]) nobj = 0 volume = 0 t0 = time.time() for dirname in directory: for root, _dirs, files in os.walk(dirname): for name in files: path = os.path.join(root, name) - with open(path, 'rb') as f: + with open(path, "rb") as f: objstorage.add(f.read()) volume += os.stat(path).st_size nobj += 1 - click.echo('Imported %d files for a volume of %s bytes in %d seconds' % - (nobj, volume, time.time()-t0)) + click.echo( + "Imported %d files for a volume of %s bytes in %d seconds" + % (nobj, volume, time.time() - t0) + ) -@cli.command('fsck') +@cli.command("fsck") @click.pass_context def fsck(ctx): - '''Check the objstorage is not corrupted. - ''' - objstorage = get_objstorage(**ctx.obj['config']['objstorage']) + """Check the objstorage is not corrupted. + """ + objstorage = get_objstorage(**ctx.obj["config"]["objstorage"]) for obj_id in objstorage: try: objstorage.check(obj_id) except objstorage.Error as err: logging.error(err) def main(): - return cli(auto_envvar_prefix='SWH_OBJSTORAGE') + return cli(auto_envvar_prefix="SWH_OBJSTORAGE") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/swh/objstorage/exc.py b/swh/objstorage/exc.py index c90cdde..12ce578 100644 --- a/swh/objstorage/exc.py +++ b/swh/objstorage/exc.py @@ -1,25 +1,23 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information class Error(Exception): - def __str__(self): - return 'storage error on object: %s' % self.args + return "storage error on object: %s" % self.args class ObjNotFoundError(Error): - def __str__(self): - return 'object not found: %s' % self.args + return "object not found: %s" % self.args class ObjStorageAPIError(Exception): """ Specific internal exception of an object storage (mainly connection). """ def __str__(self): args = self.args - return 'An unexpected error occurred in the api backend: %s' % args + return "An unexpected error occurred in the api backend: %s" % args diff --git a/swh/objstorage/multiplexer/__init__.py b/swh/objstorage/multiplexer/__init__.py index 33f21ff..82cc913 100644 --- a/swh/objstorage/multiplexer/__init__.py +++ b/swh/objstorage/multiplexer/__init__.py @@ -1,5 +1,5 @@ from .multiplexer_objstorage import MultiplexerObjStorage from .striping_objstorage import StripingObjStorage -__all__ = ['MultiplexerObjStorage', 'StripingObjStorage'] +__all__ = ["MultiplexerObjStorage", "StripingObjStorage"] diff --git a/swh/objstorage/multiplexer/filter/__init__.py b/swh/objstorage/multiplexer/filter/__init__.py index ec9ca16..f991761 100644 --- a/swh/objstorage/multiplexer/filter/__init__.py +++ b/swh/objstorage/multiplexer/filter/__init__.py @@ -1,99 +1,94 @@ from .read_write_filter import ReadObjStorageFilter from .id_filter import RegexIdObjStorageFilter, PrefixIdObjStorageFilter _FILTERS_CLASSES = { - 'readonly': ReadObjStorageFilter, - 'regex': RegexIdObjStorageFilter, - 'prefix': PrefixIdObjStorageFilter + "readonly": ReadObjStorageFilter, + "regex": RegexIdObjStorageFilter, + "prefix": PrefixIdObjStorageFilter, } -_FILTERS_PRIORITY = { - 'readonly': 0, - 'prefix': 1, - 'regex': 2 -} +_FILTERS_PRIORITY = {"readonly": 0, "prefix": 1, "regex": 2} def read_only(): - return {'type': 'readonly'} + return {"type": "readonly"} def id_prefix(prefix): - return {'type': 'prefix', 'prefix': prefix} + return {"type": "prefix", "prefix": prefix} def id_regex(regex): - return {'type': 'regex', 'regex': regex} + return {"type": "regex", "regex": regex} def _filter_priority(filter_type): """Get the priority of this filter. Priority is a value that indicates if the operation of the filter is time-consuming (smaller values means quick execution), or very likely to be almost always the same value (False being small, and True high). In case the filters are chained, they will be ordered in a way that small priorities (quick execution or instantly break the chain) are executed first. Default value is 1. Value 0 is recommended for storages that change behavior only by disabling some operations (making the method return None). """ return _FILTERS_PRIORITY.get(filter_type, 1) def add_filter(storage, filter_conf): """Add a filter to the given storage. Args: storage (swh.objstorage.ObjStorage): storage which will be filtered. filter_conf (dict): configuration of an ObjStorageFilter, given as a dictionary that contains the keys: - type: which represent the type of filter, one of the keys of _FILTERS_CLASSES - Every arguments that this type of filter requires. Returns: A filtered storage that perform only the valid operations. """ - type = filter_conf['type'] - args = {k: v for k, v in filter_conf.items() if k != 'type'} + type = filter_conf["type"] + args = {k: v for k, v in filter_conf.items() if k != "type"} filtered_storage = _FILTERS_CLASSES[type](storage=storage, **args) return filtered_storage def add_filters(storage, filter_confs): """ Add multiple filters to the given storage. (See filter.add_filter) Args: storage (swh.objstorage.ObjStorage): storage which will be filtered. filter_confs (list): any number of filter conf, as a dict with: - type: which represent the type of filter, one of the keys of FILTERS. - Every arguments that this type of filter require. Returns: A filtered storage that fulfill the requirement of all the given filters. """ # Reverse sorting in order to put the filter with biggest priority first. - filter_confs.sort(key=lambda conf: _filter_priority(conf['type']), - reverse=True) + filter_confs.sort(key=lambda conf: _filter_priority(conf["type"]), reverse=True) # Add the bigest filter to the storage, and reduce it to accumulate filters # on top of it, until the smallest (fastest, see filter.filter_priority) is # added. for filter_conf in filter_confs: storage = add_filter(storage, filter_conf) return storage diff --git a/swh/objstorage/multiplexer/filter/filter.py b/swh/objstorage/multiplexer/filter/filter.py index 739655e..61523ce 100644 --- a/swh/objstorage/multiplexer/filter/filter.py +++ b/swh/objstorage/multiplexer/filter/filter.py @@ -1,78 +1,77 @@ # Copyright (C) 2015-2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from ...objstorage import ObjStorage class ObjStorageFilter(ObjStorage): """Base implementation of a filter that allow inputs on ObjStorage or not. This class copy the API of ...objstorage in order to filter the inputs of this class. If the operation is allowed, return the result of this operation applied to the destination implementation. Otherwise, just return without any operation. This class is an abstract base class for a classic read/write storage. Filters can inherit from it and only redefine some methods in order to change behavior. """ def __init__(self, storage): self.storage = storage def check_config(self, *, check_write): """Check the object storage for proper configuration. Args: check_write: check whether writes to the objstorage will succeed Returns: True if the storage is properly configured """ return self.storage.check_config(check_write=check_write) def __contains__(self, *args, **kwargs): return self.storage.__contains__(*args, **kwargs) def __iter__(self): """ Iterates over the content of each storages Warning: The `__iter__` methods frequently have bad performance. You almost certainly don't want to use this method in production as the wrapped storage may cause performance issues. """ return self.storage.__iter__() def __len__(self): """ Compute the number of objects in the current object storage. Warning: performance issue in `__iter__` also applies here. Returns: number of objects contained in the storage. """ return self.storage.__len__() def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): - return self.storage.add(content, obj_id, check_presence, - *args, **kwargs) + return self.storage.add(content, obj_id, check_presence, *args, **kwargs) def restore(self, content, obj_id=None, *args, **kwargs): return self.storage.restore(content, obj_id, *args, **kwargs) def get(self, obj_id, *args, **kwargs): return self.storage.get(obj_id, *args, **kwargs) def check(self, obj_id, *args, **kwargs): return self.storage.check(obj_id, *args, **kwargs) def delete(self, obj_id, *args, **kwargs): return self.storage.delete(obj_id, *args, **kwargs) def get_random(self, batch_size, *args, **kwargs): return self.storage.get_random(batch_size, *args, **kwargs) diff --git a/swh/objstorage/multiplexer/filter/id_filter.py b/swh/objstorage/multiplexer/filter/id_filter.py index 9285f9a..e01daf2 100644 --- a/swh/objstorage/multiplexer/filter/id_filter.py +++ b/swh/objstorage/multiplexer/filter/id_filter.py @@ -1,90 +1,93 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import re import abc from swh.model import hashutil from .filter import ObjStorageFilter from ...objstorage import compute_hash from ...exc import ObjNotFoundError class IdObjStorageFilter(ObjStorageFilter, metaclass=abc.ABCMeta): """ Filter that only allow operations if the object id match a requirement. Even for read operations, check before if the id match the requirements. This may prevent for unnecessary disk access. """ @abc.abstractmethod def is_valid(self, obj_id): """ Indicates if the given id is valid. """ - raise NotImplementedError('Implementations of an IdObjStorageFilter ' - 'must have a "is_valid" method') + raise NotImplementedError( + "Implementations of an IdObjStorageFilter " 'must have a "is_valid" method' + ) def __contains__(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.__contains__(*args, obj_id=obj_id, **kwargs) return False def __len__(self): return sum(1 for i in [id for id in self.storage if self.is_valid(id)]) def __iter__(self): yield from filter(lambda id: self.is_valid(id), iter(self.storage)) def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): if obj_id is None: obj_id = compute_hash(content) if self.is_valid(obj_id): return self.storage.add(content, *args, obj_id=obj_id, **kwargs) def restore(self, content, obj_id=None, *args, **kwargs): if obj_id is None: obj_id = compute_hash(content) if self.is_valid(obj_id): - return self.storage.restore(content, *args, - obj_id=obj_id, **kwargs) + return self.storage.restore(content, *args, obj_id=obj_id, **kwargs) def get(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.get(*args, obj_id=obj_id, **kwargs) raise ObjNotFoundError(obj_id) def check(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.check(*args, obj_id=obj_id, **kwargs) raise ObjNotFoundError(obj_id) def get_random(self, *args, **kwargs): - yield from filter(lambda id: self.is_valid(id), - self.storage.get_random(*args, **kwargs)) + yield from filter( + lambda id: self.is_valid(id), self.storage.get_random(*args, **kwargs) + ) class RegexIdObjStorageFilter(IdObjStorageFilter): """ Filter that allow operations if the content's id as hex match a regex. """ + def __init__(self, storage, regex): super().__init__(storage) self.regex = re.compile(regex) def is_valid(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return self.regex.match(hex_obj_id) is not None class PrefixIdObjStorageFilter(IdObjStorageFilter): """ Filter that allow operations if the hexlified id have a given prefix. """ + def __init__(self, storage, prefix): super().__init__(storage) self.prefix = str(prefix) def is_valid(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return str(hex_obj_id).startswith(self.prefix) diff --git a/swh/objstorage/multiplexer/filter/read_write_filter.py b/swh/objstorage/multiplexer/filter/read_write_filter.py index 1536815..5e24289 100644 --- a/swh/objstorage/multiplexer/filter/read_write_filter.py +++ b/swh/objstorage/multiplexer/filter/read_write_filter.py @@ -1,24 +1,25 @@ # Copyright (C) 2015-2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from .filter import ObjStorageFilter class ReadObjStorageFilter(ObjStorageFilter): """ Filter that disable write operation of the storage. Writes will always succeed without doing any actual write operations. """ + def check_config(self, *, check_write): return self.storage.check_config(check_write=False) def add(self, *args, **kwargs): return def restore(self, *args, **kwargs): return def delete(self, *args, **kwargs): return True diff --git a/swh/objstorage/multiplexer/multiplexer_objstorage.py b/swh/objstorage/multiplexer/multiplexer_objstorage.py index 3ecd39f..f21bec4 100644 --- a/swh/objstorage/multiplexer/multiplexer_objstorage.py +++ b/swh/objstorage/multiplexer/multiplexer_objstorage.py @@ -1,315 +1,314 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import queue import random import threading from ..objstorage import ObjStorage from ..exc import ObjNotFoundError class ObjStorageThread(threading.Thread): def __init__(self, storage): super().__init__(daemon=True) self.storage = storage self.commands = queue.Queue() def run(self): while True: try: mailbox, command, args, kwargs = self.commands.get(True, 0.05) except queue.Empty: continue try: ret = getattr(self.storage, command)(*args, **kwargs) except Exception as exc: - self.queue_result(mailbox, 'exception', exc) + self.queue_result(mailbox, "exception", exc) else: - self.queue_result(mailbox, 'result', ret) + self.queue_result(mailbox, "result", ret) def queue_command(self, command, *args, mailbox=None, **kwargs): """Enqueue a new command to be processed by the thread. Args: command (str): one of the method names for the underlying storage. mailbox (queue.Queue): explicit mailbox if the calling thread wants to override it. args, kwargs: arguments for the command. Returns: queue.Queue The mailbox you can read the response from """ if not mailbox: mailbox = queue.Queue() self.commands.put((mailbox, command, args, kwargs)) return mailbox def queue_result(self, mailbox, result_type, result): """Enqueue a new result in the mailbox This also provides a reference to the storage, which can be useful when an exceptional condition arises. Args: mailbox (queue.Queue): the mailbox to which we need to enqueue the result result_type (str): one of 'result', 'exception' result: the result to pass back to the calling thread """ - mailbox.put({ - 'type': result_type, - 'result': result, - }) + mailbox.put( + {"type": result_type, "result": result,} + ) @staticmethod def get_result_from_mailbox(mailbox, *args, **kwargs): """Unpack the result from the mailbox. Arguments: mailbox (queue.Queue): A mailbox to unpack a result from args, kwargs: arguments to :func:`mailbox.get` Returns: the next result unpacked from the queue Raises: either the exception we got back from the underlying storage, or :exc:`queue.Empty` if :func:`mailbox.get` raises that. """ result = mailbox.get(*args, **kwargs) - if result['type'] == 'exception': - raise result['result'] from None + if result["type"] == "exception": + raise result["result"] from None else: - return result['result'] + return result["result"] @staticmethod def collect_results(mailbox, num_results): """Collect num_results from the mailbox""" collected = 0 ret = [] while collected < num_results: try: - ret.append(ObjStorageThread.get_result_from_mailbox( - mailbox, True, 0.05 - )) + ret.append( + ObjStorageThread.get_result_from_mailbox(mailbox, True, 0.05) + ) except queue.Empty: continue collected += 1 return ret def __getattr__(self, attr): def call(*args, **kwargs): mailbox = self.queue_command(attr, *args, **kwargs) return self.get_result_from_mailbox(mailbox) + return call def __contains__(self, *args, **kwargs): - mailbox = self.queue_command('__contains__', *args, **kwargs) + mailbox = self.queue_command("__contains__", *args, **kwargs) return self.get_result_from_mailbox(mailbox) class MultiplexerObjStorage(ObjStorage): """Implementation of ObjStorage that distributes between multiple storages. The multiplexer object storage allows an input to be demultiplexed among multiple storages that will or will not accept it by themselves (see .filter package). As the ids can be different, no pre-computed ids should be submitted. Also, there are no guarantees that the returned ids can be used directly into the storages that the multiplexer manage. Use case examples follow. Example 1:: storage_v1 = filter.read_only(PathSlicingObjStorage('/dir1', '0:2/2:4/4:6')) storage_v2 = PathSlicingObjStorage('/dir2', '0:1/0:5') storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using 'storage', all the new contents will only be added to the v2 storage, while it will be retrievable from both. Example 2:: storage_v1 = filter.id_regex( PathSlicingObjStorage('/dir1', '0:2/2:4/4:6'), r'[^012].*' ) storage_v2 = filter.if_regex( PathSlicingObjStorage('/dir2', '0:1/0:5'), r'[012]/*' ) storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using this storage, the contents with a sha1 starting with 0, 1 or 2 will be redirected (read AND write) to the storage_v2, while the others will be redirected to the storage_v1. If a content starting with 0, 1 or 2 is present in the storage_v1, it would be ignored anyway. """ def __init__(self, storages, **kwargs): super().__init__(**kwargs) self.storages = storages - self.storage_threads = [ - ObjStorageThread(storage) for storage in storages - ] + self.storage_threads = [ObjStorageThread(storage) for storage in storages] for thread in self.storage_threads: thread.start() def wrap_call(self, threads, call, *args, **kwargs): threads = list(threads) mailbox = queue.Queue() for thread in threads: thread.queue_command(call, *args, mailbox=mailbox, **kwargs) return ObjStorageThread.collect_results(mailbox, len(threads)) def get_read_threads(self, obj_id=None): yield from self.storage_threads def get_write_threads(self, obj_id=None): yield from self.storage_threads def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ return all( - self.wrap_call(self.storage_threads, 'check_config', - check_write=check_write) + self.wrap_call( + self.storage_threads, "check_config", check_write=check_write + ) ) def __contains__(self, obj_id): """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True if and only if the object is present in the current object storage. """ for storage in self.get_read_threads(obj_id): if obj_id in storage: return True return False def __iter__(self): def obj_iterator(): for storage in self.storages: yield from storage + return obj_iterator() def add(self, content, obj_id=None, check_presence=True): """ Add a new object to the object storage. If the adding step works in all the storages that accept this content, this is a success. Otherwise, the full adding step is an error even if it succeed in some of the storages. Args: content: content of the object to be added to the storage. obj_id: checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence: indicate if the presence of the content should be verified before adding the file. Returns: an id of the object into the storage. As the write-storages are always readable as well, any id will be valid to retrieve a content. """ return self.wrap_call( - self.get_write_threads(obj_id), 'add', content, - obj_id=obj_id, check_presence=check_presence, + self.get_write_threads(obj_id), + "add", + content, + obj_id=obj_id, + check_presence=check_presence, ).pop() def add_batch(self, contents, check_presence=True): """Add a batch of new objects to the object storage. """ write_threads = list(self.get_write_threads()) results = self.wrap_call( - write_threads, 'add_batch', contents, - check_presence=check_presence, + write_threads, "add_batch", contents, check_presence=check_presence, ) - summed = {'object:add': 0, 'object:add:bytes': 0} + summed = {"object:add": 0, "object:add:bytes": 0} for result in results: - summed['object:add'] += result['object:add'] - summed['object:add:bytes'] += result['object:add:bytes'] + summed["object:add"] += result["object:add"] + summed["object:add:bytes"] += result["object:add:bytes"] return { - 'object:add': summed['object:add'] // len(results), - 'object:add:bytes': summed['object:add:bytes'] // len(results), + "object:add": summed["object:add"] // len(results), + "object:add:bytes": summed["object:add:bytes"] // len(results), } def restore(self, content, obj_id=None): return self.wrap_call( - self.get_write_threads(obj_id), 'restore', content, obj_id=obj_id, + self.get_write_threads(obj_id), "restore", content, obj_id=obj_id, ).pop() def get(self, obj_id): for storage in self.get_read_threads(obj_id): try: return storage.get(obj_id) except ObjNotFoundError: continue # If no storage contains this content, raise the error raise ObjNotFoundError(obj_id) def check(self, obj_id): nb_present = 0 for storage in self.get_read_threads(obj_id): try: storage.check(obj_id) except ObjNotFoundError: continue else: nb_present += 1 # If there is an Error because of a corrupted file, then let it pass. # Raise the ObjNotFoundError only if the content couldn't be found in # all the storages. if nb_present == 0: raise ObjNotFoundError(obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission - return all( - self.wrap_call(self.get_write_threads(obj_id), 'delete', obj_id) - ) + return all(self.wrap_call(self.get_write_threads(obj_id), "delete", obj_id)) def get_random(self, batch_size): - storages_set = [storage for storage in self.storages - if len(storage) > 0] + storages_set = [storage for storage in self.storages if len(storage) > 0] if len(storages_set) <= 0: return [] while storages_set: storage = random.choice(storages_set) try: return storage.get_random(batch_size) except NotImplementedError: storages_set.remove(storage) # There is no storage that allow the get_random operation raise NotImplementedError( "There is no storage implementation into the multiplexer that " "support the 'get_random' operation" ) diff --git a/swh/objstorage/multiplexer/striping_objstorage.py b/swh/objstorage/multiplexer/striping_objstorage.py index 8f063d2..ad06d27 100644 --- a/swh/objstorage/multiplexer/striping_objstorage.py +++ b/swh/objstorage/multiplexer/striping_objstorage.py @@ -1,75 +1,71 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict import queue from .multiplexer_objstorage import ObjStorageThread, MultiplexerObjStorage class StripingObjStorage(MultiplexerObjStorage): """Stripes objects across multiple objstorages This objstorage implementation will write objects to objstorages in a predictable way: it takes the modulo of the last 8 bytes of the object identifier with the number of object storages passed, which will yield an (almost) even distribution. Objects are read from all storages in turn until it succeeds. """ + MOD_BYTES = 8 def __init__(self, storages, **kwargs): super().__init__(storages, **kwargs) self.num_storages = len(storages) def get_storage_index(self, obj_id): if obj_id is None: - raise ValueError( - 'StripingObjStorage always needs obj_id to be set' - ) + raise ValueError("StripingObjStorage always needs obj_id to be set") - index = int.from_bytes(obj_id[:-self.MOD_BYTES], 'big') + index = int.from_bytes(obj_id[: -self.MOD_BYTES], "big") return index % self.num_storages def get_write_threads(self, obj_id): idx = self.get_storage_index(obj_id) yield self.storage_threads[idx] def get_read_threads(self, obj_id=None): if obj_id: idx = self.get_storage_index(obj_id) else: idx = 0 for i in range(self.num_storages): yield self.storage_threads[(idx + i) % self.num_storages] def add_batch(self, contents, check_presence=True): """Add a batch of new objects to the object storage. """ content_by_storage_index = defaultdict(dict) for obj_id, content in contents.items(): storage_index = self.get_storage_index(obj_id) content_by_storage_index[storage_index][obj_id] = content mailbox = queue.Queue() for storage_index, contents in content_by_storage_index.items(): self.storage_threads[storage_index].queue_command( - 'add_batch', - contents, - check_presence=check_presence, - mailbox=mailbox, + "add_batch", contents, check_presence=check_presence, mailbox=mailbox, ) results = ObjStorageThread.collect_results( mailbox, len(content_by_storage_index) ) - summed = {'object:add': 0, 'object:add:bytes': 0} + summed = {"object:add": 0, "object:add:bytes": 0} for result in results: - summed['object:add'] += result['object:add'] - summed['object:add:bytes'] += result['object:add:bytes'] + summed["object:add"] += result["object:add"] + summed["object:add:bytes"] += result["object:add:bytes"] return summed diff --git a/swh/objstorage/objstorage.py b/swh/objstorage/objstorage.py index 62d5a90..3df1ab6 100644 --- a/swh/objstorage/objstorage.py +++ b/swh/objstorage/objstorage.py @@ -1,350 +1,352 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc from itertools import dropwhile, islice import bz2 import lzma import zlib from swh.model import hashutil from typing import Dict from .exc import ObjNotFoundError -ID_HASH_ALGO = 'sha1' +ID_HASH_ALGO = "sha1" ID_HASH_LENGTH = 40 # Size in bytes of the hash hexadecimal representation. DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024 # Size in bytes of the streaming chunks DEFAULT_LIMIT = 10000 def compute_hash(content): """Compute the content's hash. Args: content (bytes): The raw content to hash hash_name (str): Hash's name (default to ID_HASH_ALGO) Returns: The ID_HASH_ALGO for the content """ - return hashutil.MultiHash.from_data( - content, - hash_names=[ID_HASH_ALGO], - ).digest().get(ID_HASH_ALGO) + return ( + hashutil.MultiHash.from_data(content, hash_names=[ID_HASH_ALGO],) + .digest() + .get(ID_HASH_ALGO) + ) class NullCompressor: def compress(self, data): return data def flush(self): - return b'' + return b"" class NullDecompressor: def decompress(self, data): return data @property def unused_data(self): - return b'' + return b"" decompressors = { - 'bz2': bz2.BZ2Decompressor, - 'lzma': lzma.LZMADecompressor, - 'gzip': lambda: zlib.decompressobj(wbits=31), - 'zlib': zlib.decompressobj, - 'none': NullDecompressor, + "bz2": bz2.BZ2Decompressor, + "lzma": lzma.LZMADecompressor, + "gzip": lambda: zlib.decompressobj(wbits=31), + "zlib": zlib.decompressobj, + "none": NullDecompressor, } compressors = { - 'bz2': bz2.BZ2Compressor, - 'lzma': lzma.LZMACompressor, - 'gzip': lambda: zlib.compressobj(wbits=31), - 'zlib': zlib.compressobj, - 'none': NullCompressor, + "bz2": bz2.BZ2Compressor, + "lzma": lzma.LZMACompressor, + "gzip": lambda: zlib.compressobj(wbits=31), + "zlib": zlib.compressobj, + "none": NullCompressor, } class ObjStorage(metaclass=abc.ABCMeta): """ High-level API to manipulate the Software Heritage object storage. Conceptually, the object storage offers the following methods: - check_config() check if the object storage is properly configured - __contains__() check if an object is present, by object id - add() add a new object, returning an object id - restore() same as add() but erase an already existed content - get() retrieve the content of an object, by object id - check() check the integrity of an object, by object id - delete() remove an object And some management methods: - get_random() get random object id of existing contents (used for the content integrity checker). Some of the methods have available streaming equivalents: - add_stream() same as add() but with a chunked iterator - restore_stream() same as add_stream() but erase already existing content - get_stream() same as get() but returns a chunked iterator Each implementation of this interface can have a different behavior and its own way to store the contents. """ + def __init__(self, *, allow_delete=False, **kwargs): # A more complete permission system could be used in place of that if # it becomes needed self.allow_delete = allow_delete @abc.abstractmethod def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ pass @abc.abstractmethod def __contains__(self, obj_id, *args, **kwargs): """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True if and only if the object is present in the current object storage. """ pass @abc.abstractmethod def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): """Add a new object to the object storage. Args: content (bytes): object's raw content to add in storage. obj_id (bytes): checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ pass def add_batch(self, contents, check_presence=True) -> Dict: """Add a batch of new objects to the object storage. Args: contents: mapping from obj_id to object contents Returns: the summary of objects added to the storage (count of object, count of bytes object) """ - summary = {'object:add': 0, 'object:add:bytes': 0} + summary = {"object:add": 0, "object:add:bytes": 0} for obj_id, content in contents.items(): if check_presence and obj_id in self: continue self.add(content, obj_id, check_presence=False) - summary['object:add'] += 1 - summary['object:add:bytes'] += len(content) + summary["object:add"] += 1 + summary["object:add:bytes"] += len(content) return summary def restore(self, content, obj_id=None, *args, **kwargs): """Restore a content that have been corrupted. This function is identical to add but does not check if the object id is already in the file system. The default implementation provided by the current class is suitable for most cases. Args: content (bytes): object's raw content to add in storage obj_id (bytes): checksum of `bytes` as computed by ID_HASH_ALGO. When given, obj_id will be trusted to match bytes. If missing, obj_id will be computed on the fly. """ # check_presence to false will erase the potential previous content. return self.add(content, obj_id, check_presence=False) @abc.abstractmethod def get(self, obj_id, *args, **kwargs): """Retrieve the content of a given object. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ pass def get_batch(self, obj_ids, *args, **kwargs): """Retrieve objects' raw content in bulk from storage. Note: This function does have a default implementation in ObjStorage that is suitable for most cases. For object storages that needs to do the minimal number of requests possible (ex: remote object storages), that method can be overridden to perform a more efficient operation. Args: obj_ids ([bytes]: list of object ids. Returns: list of resulting contents, or None if the content could not be retrieved. Do not raise any exception as a fail for one content will not cancel the whole request. """ for obj_id in obj_ids: try: yield self.get(obj_id) except ObjNotFoundError: yield None @abc.abstractmethod def check(self, obj_id, *args, **kwargs): """Perform an integrity check for a given object. Verify that the file object is in place and that the content matches the object id. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. Error: if the request object is corrupted. """ pass @abc.abstractmethod def delete(self, obj_id, *args, **kwargs): """Delete an object. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. """ if not self.allow_delete: raise PermissionError("Delete is not allowed.") # Management methods def get_random(self, batch_size, *args, **kwargs): """Get random ids of existing contents. This method is used in order to get random ids to perform content integrity verifications on random contents. Args: batch_size (int): Number of ids that will be given Yields: An iterable of ids (bytes) of contents that are in the current object storage. """ pass # Streaming methods def add_stream(self, content_iter, obj_id, check_presence=True): """Add a new object to the object storage using streaming. This function is identical to add() except it takes a generator that yields the chunked content instead of the whole content at once. Args: content (bytes): chunked generator that yields the object's raw content to add in storage. obj_id (bytes): object identifier check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ raise NotImplementedError def restore_stream(self, content_iter, obj_id=None): """Restore a content that have been corrupted using streaming. This function is identical to restore() except it takes a generator that yields the chunked content instead of the whole content at once. The default implementation provided by the current class is suitable for most cases. Args: content (bytes): chunked generator that yields the object's raw content to add in storage. obj_id (bytes): object identifier """ # check_presence to false will erase the potential previous content. return self.add_stream(content_iter, obj_id, check_presence=False) def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): """Retrieve the content of a given object as a chunked iterator. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ raise NotImplementedError def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): """Generates known object ids. Args: last_obj_id (bytes): object id from which to iterate from (excluded). limit (int): max number of object ids to generate. Generates: obj_id (bytes): object ids. """ it = iter(self) if last_obj_id: it = dropwhile(lambda x: x <= last_obj_id, it) return islice(it, limit) diff --git a/swh/objstorage/tests/objstorage_testing.py b/swh/objstorage/tests/objstorage_testing.py index 2eecdd3..df28879 100644 --- a/swh/objstorage/tests/objstorage_testing.py +++ b/swh/objstorage/tests/objstorage_testing.py @@ -1,219 +1,221 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import time import collections from swh.objstorage import exc from swh.objstorage.objstorage import compute_hash class ObjStorageTestFixture: - def hash_content(self, content): obj_id = compute_hash(content) return content, obj_id def assertContentMatch(self, obj_id, expected_content): # noqa content = self.storage.get(obj_id) self.assertEqual(content, expected_content) def test_check_config(self): self.assertTrue(self.storage.check_config(check_write=False)) self.assertTrue(self.storage.check_config(check_write=True)) def test_contains(self): - content_p, obj_id_p = self.hash_content(b'contains_present') - content_m, obj_id_m = self.hash_content(b'contains_missing') + content_p, obj_id_p = self.hash_content(b"contains_present") + content_m, obj_id_m = self.hash_content(b"contains_missing") self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) def test_add_get_w_id(self): - content, obj_id = self.hash_content(b'add_get_w_id') + content, obj_id = self.hash_content(b"add_get_w_id") r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) def test_add_big(self): - content, obj_id = self.hash_content(b'add_big' * 1024 * 1024) + content, obj_id = self.hash_content(b"add_big" * 1024 * 1024) r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) def test_add_get_wo_id(self): - content, obj_id = self.hash_content(b'add_get_wo_id') + content, obj_id = self.hash_content(b"add_get_wo_id") r = self.storage.add(content) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) def test_add_get_batch(self): - content1, obj_id1 = self.hash_content(b'add_get_batch_1') - content2, obj_id2 = self.hash_content(b'add_get_batch_2') + content1, obj_id1 = self.hash_content(b"add_get_batch_1") + content2, obj_id2 = self.hash_content(b"add_get_batch_2") self.storage.add(content1, obj_id1) self.storage.add(content2, obj_id2) cr1, cr2 = self.storage.get_batch([obj_id1, obj_id2]) self.assertEqual(cr1, content1) self.assertEqual(cr2, content2) def test_get_batch_unexisting_content(self): - content, obj_id = self.hash_content(b'get_batch_unexisting_content') + content, obj_id = self.hash_content(b"get_batch_unexisting_content") result = list(self.storage.get_batch([obj_id])) self.assertTrue(len(result) == 1) self.assertIsNone(result[0]) def test_restore_content(self): - valid_content, valid_obj_id = self.hash_content(b'restore_content') - invalid_content = b'unexpected content' + valid_content, valid_obj_id = self.hash_content(b"restore_content") + invalid_content = b"unexpected content" id_adding = self.storage.add(invalid_content, valid_obj_id) self.assertEqual(id_adding, valid_obj_id) with self.assertRaises(exc.Error): self.storage.check(id_adding) id_restore = self.storage.restore(valid_content, valid_obj_id) self.assertEqual(id_restore, valid_obj_id) self.assertContentMatch(valid_obj_id, valid_content) def test_get_missing(self): - content, obj_id = self.hash_content(b'get_missing') + content, obj_id = self.hash_content(b"get_missing") with self.assertRaises(exc.ObjNotFoundError) as e: self.storage.get(obj_id) self.assertIn(obj_id, e.exception.args) def test_check_missing(self): - content, obj_id = self.hash_content(b'check_missing') + content, obj_id = self.hash_content(b"check_missing") with self.assertRaises(exc.Error): self.storage.check(obj_id) def test_check_present(self): - content, obj_id = self.hash_content(b'check_present') + content, obj_id = self.hash_content(b"check_present") self.storage.add(content, obj_id) try: self.storage.check(obj_id) except exc.Error: - self.fail('Integrity check failed') + self.fail("Integrity check failed") def test_delete_missing(self): self.storage.allow_delete = True - content, obj_id = self.hash_content(b'missing_content_to_delete') + content, obj_id = self.hash_content(b"missing_content_to_delete") with self.assertRaises(exc.Error): self.storage.delete(obj_id) def test_delete_present(self): self.storage.allow_delete = True - content, obj_id = self.hash_content(b'content_to_delete') + content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) self.assertTrue(self.storage.delete(obj_id)) with self.assertRaises(exc.Error): self.storage.get(obj_id) def test_delete_not_allowed(self): self.storage.allow_delete = False - content, obj_id = self.hash_content(b'content_to_delete') + content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.storage.delete(obj_id) def test_delete_not_allowed_by_default(self): - content, obj_id = self.hash_content(b'content_to_delete') + content, obj_id = self.hash_content(b"content_to_delete") self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.assertTrue(self.storage.delete(obj_id)) def test_add_stream(self): - content = [b'chunk1', b'chunk2'] - _, obj_id = self.hash_content(b''.join(content)) + content = [b"chunk1", b"chunk2"] + _, obj_id = self.hash_content(b"".join(content)) try: self.storage.add_stream(iter(content), obj_id=obj_id) except NotImplementedError: return - self.assertContentMatch(obj_id, b''.join(content)) + self.assertContentMatch(obj_id, b"".join(content)) def test_add_stream_sleep(self): def gen_content(): - yield b'chunk1' + yield b"chunk1" time.sleep(0.5) - yield b'chunk42' - _, obj_id = self.hash_content(b'placeholder_id') + yield b"chunk42" + + _, obj_id = self.hash_content(b"placeholder_id") try: self.storage.add_stream(gen_content(), obj_id=obj_id) except NotImplementedError: return - self.assertContentMatch(obj_id, b'chunk1chunk42') + self.assertContentMatch(obj_id, b"chunk1chunk42") def test_get_stream(self): - content = b'123456789' + content = b"123456789" _, obj_id = self.hash_content(content) self.storage.add(content, obj_id=obj_id) r = self.storage.get(obj_id) self.assertEqual(r, content) try: r = self.storage.get_stream(obj_id, chunk_size=1) except NotImplementedError: return self.assertTrue(isinstance(r, collections.Iterator)) r = list(r) - self.assertEqual(b''.join(r), content) + self.assertEqual(b"".join(r), content) def test_add_batch(self): contents = {} expected_content_add = 0 expected_content_add_bytes = 0 for i in range(50): - content = b'Test content %02d' % i + content = b"Test content %02d" % i content, obj_id = self.hash_content(content) contents[obj_id] = content expected_content_add_bytes += len(content) expected_content_add += 1 ret = self.storage.add_batch(contents) - self.assertEqual(ret, { - 'object:add': expected_content_add, - 'object:add:bytes': expected_content_add_bytes, - }) + self.assertEqual( + ret, + { + "object:add": expected_content_add, + "object:add:bytes": expected_content_add_bytes, + }, + ) for obj_id in contents: self.assertIn(obj_id, self.storage) def test_content_iterator(self): sto_obj_ids = iter(self.storage) sto_obj_ids = list(sto_obj_ids) self.assertFalse(sto_obj_ids) obj_ids = set() for i in range(100): - content, obj_id = self.hash_content(b'content %d' % i) + content, obj_id = self.hash_content(b"content %d" % i) self.storage.add(content, obj_id=obj_id) obj_ids.add(obj_id) sto_obj_ids = set(self.storage) self.assertEqual(sto_obj_ids, obj_ids) def test_list_content(self): all_ids = [] for i in range(1200): - content = b'example %d' % i + content = b"example %d" % i obj_id = compute_hash(content) self.storage.add(content, obj_id) all_ids.append(obj_id) all_ids.sort() ids = list(self.storage.list_content()) self.assertEqual(len(ids), 1200) self.assertEqual(ids[0], all_ids[0]) self.assertEqual(ids[100], all_ids[100]) self.assertEqual(ids[999], all_ids[999]) ids = list(self.storage.list_content(limit=10)) self.assertEqual(len(ids), 10) self.assertEqual(ids[0], all_ids[0]) self.assertEqual(ids[9], all_ids[9]) - ids = list(self.storage.list_content( - last_obj_id=all_ids[999], limit=100)) + ids = list(self.storage.list_content(last_obj_id=all_ids[999], limit=100)) self.assertEqual(len(ids), 100) self.assertEqual(ids[0], all_ids[1000]) self.assertEqual(ids[9], all_ids[1009]) diff --git a/swh/objstorage/tests/test_multiplexer_filter.py b/swh/objstorage/tests/test_multiplexer_filter.py index d292d2f..fbeb23a 100644 --- a/swh/objstorage/tests/test_multiplexer_filter.py +++ b/swh/objstorage/tests/test_multiplexer_filter.py @@ -1,324 +1,331 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import random import shutil import tempfile import unittest from string import ascii_lowercase from swh.model import hashutil from swh.objstorage import get_objstorage from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.multiplexer.filter import id_prefix, id_regex, read_only from swh.objstorage.objstorage import compute_hash def get_random_content(): - return bytes(''.join(random.sample(ascii_lowercase, 10)), 'utf8') + return bytes("".join(random.sample(ascii_lowercase, 10)), "utf8") class MixinTestReadFilter(unittest.TestCase): # Read only filter should not allow writing def setUp(self): super().setUp() self.tmpdir = tempfile.mkdtemp() - pstorage = {'cls': 'pathslicing', - 'args': {'root': self.tmpdir, - 'slicing': '0:5'}} + pstorage = { + "cls": "pathslicing", + "args": {"root": self.tmpdir, "slicing": "0:5"}, + } base_storage = get_objstorage(**pstorage) base_storage.id = compute_hash - self.storage = get_objstorage('filtered', - {'storage_conf': pstorage, - 'filters_conf': [read_only()]}) - self.valid_content = b'pre-existing content' - self.invalid_content = b'invalid_content' - self.true_invalid_content = b'Anything that is not correct' - self.absent_content = b'non-existent content' + self.storage = get_objstorage( + "filtered", {"storage_conf": pstorage, "filters_conf": [read_only()]} + ) + self.valid_content = b"pre-existing content" + self.invalid_content = b"invalid_content" + self.true_invalid_content = b"Anything that is not correct" + self.absent_content = b"non-existent content" # Create a valid content. self.valid_id = base_storage.add(self.valid_content) # Create an invalid id and add a content with it. self.invalid_id = base_storage.id(self.true_invalid_content) base_storage.add(self.invalid_content, obj_id=self.invalid_id) # Compute an id for a non-existing content. self.absent_id = base_storage.id(self.absent_content) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def test_can_contains(self): self.assertTrue(self.valid_id in self.storage) self.assertTrue(self.invalid_id in self.storage) self.assertFalse(self.absent_id in self.storage) def test_can_iter(self): self.assertIn(self.valid_id, iter(self.storage)) self.assertIn(self.invalid_id, iter(self.storage)) def test_can_len(self): self.assertEqual(2, len(self.storage)) def test_can_get(self): self.assertEqual(self.valid_content, self.storage.get(self.valid_id)) - self.assertEqual(self.invalid_content, - self.storage.get(self.invalid_id)) + self.assertEqual(self.invalid_content, self.storage.get(self.invalid_id)) def test_can_check(self): with self.assertRaises(ObjNotFoundError): self.storage.check(self.absent_id) with self.assertRaises(Error): self.storage.check(self.invalid_id) self.storage.check(self.valid_id) def test_can_get_random(self): self.assertEqual(1, len(list(self.storage.get_random(1)))) - self.assertEqual(len(list(self.storage)), - len(set(self.storage.get_random(1000)))) + self.assertEqual( + len(list(self.storage)), len(set(self.storage.get_random(1000))) + ) def test_cannot_add(self): - new_id = self.storage.add(b'New content') + new_id = self.storage.add(b"New content") result = self.storage.add(self.valid_content, self.valid_id) self.assertIsNone(new_id, self.storage) self.assertIsNone(result) def test_cannot_restore(self): result = self.storage.restore(self.valid_content, self.valid_id) self.assertIsNone(result) -class MixinTestIdFilter(): +class MixinTestIdFilter: """ Mixin class that tests the filters based on filter.IdFilter Methods "make_valid", "make_invalid" and "filter_storage" must be implemented by subclasses. """ def setUp(self): super().setUp() # Use a hack here : as the mock uses the content as id, it is easy to # create contents that are filtered or not. - self.prefix = '71' + self.prefix = "71" self.tmpdir = tempfile.mkdtemp() # Make the storage filtered - self.sconf = {'cls': 'pathslicing', - 'args': {'root': self.tmpdir, - 'slicing': '0:5'}} + self.sconf = { + "cls": "pathslicing", + "args": {"root": self.tmpdir, "slicing": "0:5"}, + } storage = get_objstorage(**self.sconf) self.base_storage = storage self.storage = self.filter_storage(self.sconf) # Set the id calculators storage.id = compute_hash # Present content with valid id - self.present_valid_content = self.ensure_valid(b'yroqdtotji') + self.present_valid_content = self.ensure_valid(b"yroqdtotji") self.present_valid_id = storage.id(self.present_valid_content) # Present content with invalid id - self.present_invalid_content = self.ensure_invalid(b'glxddlmmzb') + self.present_invalid_content = self.ensure_invalid(b"glxddlmmzb") self.present_invalid_id = storage.id(self.present_invalid_content) # Missing content with valid id - self.missing_valid_content = self.ensure_valid(b'rmzkdclkez') + self.missing_valid_content = self.ensure_valid(b"rmzkdclkez") self.missing_valid_id = storage.id(self.missing_valid_content) # Missing content with invalid id - self.missing_invalid_content = self.ensure_invalid(b'hlejfuginh') + self.missing_invalid_content = self.ensure_invalid(b"hlejfuginh") self.missing_invalid_id = storage.id(self.missing_invalid_content) # Present corrupted content with valid id - self.present_corrupted_valid_content = self.ensure_valid(b'cdsjwnpaij') - self.true_present_corrupted_valid_content = self.ensure_valid( - b'mgsdpawcrr') + self.present_corrupted_valid_content = self.ensure_valid(b"cdsjwnpaij") + self.true_present_corrupted_valid_content = self.ensure_valid(b"mgsdpawcrr") self.present_corrupted_valid_id = storage.id( - self.true_present_corrupted_valid_content) + self.true_present_corrupted_valid_content + ) # Present corrupted content with invalid id - self.present_corrupted_invalid_content = self.ensure_invalid( - b'pspjljnrco') - self.true_present_corrupted_invalid_content = self.ensure_invalid( - b'rjocbnnbso') + self.present_corrupted_invalid_content = self.ensure_invalid(b"pspjljnrco") + self.true_present_corrupted_invalid_content = self.ensure_invalid(b"rjocbnnbso") self.present_corrupted_invalid_id = storage.id( - self.true_present_corrupted_invalid_content) + self.true_present_corrupted_invalid_content + ) # Missing (potentially) corrupted content with valid id - self.missing_corrupted_valid_content = self.ensure_valid( - b'zxkokfgtou') - self.true_missing_corrupted_valid_content = self.ensure_valid( - b'royoncooqa') + self.missing_corrupted_valid_content = self.ensure_valid(b"zxkokfgtou") + self.true_missing_corrupted_valid_content = self.ensure_valid(b"royoncooqa") self.missing_corrupted_valid_id = storage.id( - self.true_missing_corrupted_valid_content) + self.true_missing_corrupted_valid_content + ) # Missing (potentially) corrupted content with invalid id - self.missing_corrupted_invalid_content = self.ensure_invalid( - b'hxaxnrmnyk') - self.true_missing_corrupted_invalid_content = self.ensure_invalid( - b'qhbolyuifr') + self.missing_corrupted_invalid_content = self.ensure_invalid(b"hxaxnrmnyk") + self.true_missing_corrupted_invalid_content = self.ensure_invalid(b"qhbolyuifr") self.missing_corrupted_invalid_id = storage.id( - self.true_missing_corrupted_invalid_content) + self.true_missing_corrupted_invalid_content + ) # Add the content that are supposed to be present self.storage.add(self.present_valid_content) self.storage.add(self.present_invalid_content) - self.storage.add(self.present_corrupted_valid_content, - obj_id=self.present_corrupted_valid_id) - self.storage.add(self.present_corrupted_invalid_content, - obj_id=self.present_corrupted_invalid_id) + self.storage.add( + self.present_corrupted_valid_content, obj_id=self.present_corrupted_valid_id + ) + self.storage.add( + self.present_corrupted_invalid_content, + obj_id=self.present_corrupted_invalid_id, + ) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def filter_storage(self, sconf): raise NotImplementedError( - 'Id_filter test class must have a filter_storage method') + "Id_filter test class must have a filter_storage method" + ) def ensure_valid(self, content=None): if content is None: content = get_random_content() while not self.storage.is_valid(self.base_storage.id(content)): content = get_random_content() return content def ensure_invalid(self, content=None): if content is None: content = get_random_content() while self.storage.is_valid(self.base_storage.id(content)): content = get_random_content() return content def test_contains(self): # Both contents are present, but the invalid one should be ignored. self.assertTrue(self.present_valid_id in self.storage) self.assertFalse(self.present_invalid_id in self.storage) self.assertFalse(self.missing_valid_id in self.storage) self.assertFalse(self.missing_invalid_id in self.storage) self.assertTrue(self.present_corrupted_valid_id in self.storage) self.assertFalse(self.present_corrupted_invalid_id in self.storage) self.assertFalse(self.missing_corrupted_valid_id in self.storage) self.assertFalse(self.missing_corrupted_invalid_id in self.storage) def test_iter(self): self.assertIn(self.present_valid_id, iter(self.storage)) self.assertNotIn(self.present_invalid_id, iter(self.storage)) self.assertNotIn(self.missing_valid_id, iter(self.storage)) self.assertNotIn(self.missing_invalid_id, iter(self.storage)) self.assertIn(self.present_corrupted_valid_id, iter(self.storage)) self.assertNotIn(self.present_corrupted_invalid_id, iter(self.storage)) self.assertNotIn(self.missing_corrupted_valid_id, iter(self.storage)) self.assertNotIn(self.missing_corrupted_invalid_id, iter(self.storage)) def test_len(self): # Four contents are present, but only two should be valid. self.assertEqual(2, len(self.storage)) def test_get(self): - self.assertEqual(self.present_valid_content, - self.storage.get(self.present_valid_id)) + self.assertEqual( + self.present_valid_content, self.storage.get(self.present_valid_id) + ) with self.assertRaises(ObjNotFoundError): self.storage.get(self.present_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_invalid_id) - self.assertEqual(self.present_corrupted_valid_content, - self.storage.get(self.present_corrupted_valid_id)) + self.assertEqual( + self.present_corrupted_valid_content, + self.storage.get(self.present_corrupted_valid_id), + ) with self.assertRaises(ObjNotFoundError): self.storage.get(self.present_corrupted_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_corrupted_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_corrupted_invalid_id) def test_check(self): self.storage.check(self.present_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.present_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_invalid_id) with self.assertRaises(Error): self.storage.check(self.present_corrupted_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.present_corrupted_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_corrupted_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_corrupted_invalid_id) def test_get_random(self): self.assertEqual(0, len(list(self.storage.get_random(0)))) random_content = list(self.storage.get_random(1000)) self.assertIn(self.present_valid_id, random_content) self.assertNotIn(self.present_invalid_id, random_content) self.assertNotIn(self.missing_valid_id, random_content) self.assertNotIn(self.missing_invalid_id, random_content) self.assertIn(self.present_corrupted_valid_id, random_content) self.assertNotIn(self.present_corrupted_invalid_id, random_content) self.assertNotIn(self.missing_corrupted_valid_id, random_content) self.assertNotIn(self.missing_corrupted_invalid_id, random_content) def test_add(self): # Add valid and invalid contents to the storage and check their # presence with the unfiltered storage. - valid_content = self.ensure_valid(b'ulepsrjbgt') + valid_content = self.ensure_valid(b"ulepsrjbgt") valid_id = self.base_storage.id(valid_content) - invalid_content = self.ensure_invalid(b'znvghkjked') + invalid_content = self.ensure_invalid(b"znvghkjked") invalid_id = self.base_storage.id(invalid_content) self.storage.add(valid_content) self.storage.add(invalid_content) self.assertTrue(valid_id in self.base_storage) self.assertFalse(invalid_id in self.base_storage) def test_restore(self): # Add corrupted content to the storage and the try to restore it - valid_content = self.ensure_valid(b'ulepsrjbgt') + valid_content = self.ensure_valid(b"ulepsrjbgt") valid_id = self.base_storage.id(valid_content) - corrupted_content = self.ensure_valid(b'ltjkjsloyb') + corrupted_content = self.ensure_valid(b"ltjkjsloyb") corrupted_id = self.base_storage.id(corrupted_content) self.storage.add(corrupted_content, obj_id=valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(corrupted_id) with self.assertRaises(Error): self.storage.check(valid_id) self.storage.restore(valid_content) self.storage.check(valid_id) class TestPrefixFilter(MixinTestIdFilter, unittest.TestCase): def setUp(self): - self.prefix = b'71' + self.prefix = b"71" super().setUp() def ensure_valid(self, content): obj_id = compute_hash(content) hex_obj_id = hashutil.hash_to_hex(obj_id) self.assertTrue(hex_obj_id.startswith(self.prefix)) return content def ensure_invalid(self, content): obj_id = compute_hash(content) hex_obj_id = hashutil.hash_to_hex(obj_id) self.assertFalse(hex_obj_id.startswith(self.prefix)) return content def filter_storage(self, sconf): - return get_objstorage('filtered', - {'storage_conf': sconf, - 'filters_conf': [id_prefix(self.prefix)]}) + return get_objstorage( + "filtered", + {"storage_conf": sconf, "filters_conf": [id_prefix(self.prefix)]}, + ) class TestRegexFilter(MixinTestIdFilter, unittest.TestCase): def setUp(self): - self.regex = r'[a-f][0-9].*' + self.regex = r"[a-f][0-9].*" super().setUp() def filter_storage(self, sconf): - return get_objstorage('filtered', - {'storage_conf': sconf, - 'filters_conf': [id_regex(self.regex)]}) + return get_objstorage( + "filtered", {"storage_conf": sconf, "filters_conf": [id_regex(self.regex)]} + ) diff --git a/swh/objstorage/tests/test_objstorage_api.py b/swh/objstorage/tests/test_objstorage_api.py index 59ddb5d..c77d2ee 100644 --- a/swh/objstorage/tests/test_objstorage_api.py +++ b/swh/objstorage/tests/test_objstorage_api.py @@ -1,52 +1,51 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import shutil import tempfile import unittest import pytest from swh.core.api.tests.server_testing import ServerTestFixtureAsync from swh.objstorage import get_objstorage from swh.objstorage.api.server import make_app from swh.objstorage.tests.objstorage_testing import ObjStorageTestFixture -class TestRemoteObjStorage(ServerTestFixtureAsync, ObjStorageTestFixture, - unittest.TestCase): +class TestRemoteObjStorage( + ServerTestFixtureAsync, ObjStorageTestFixture, unittest.TestCase +): """ Test the remote archive API. """ def setUp(self): self.tmpdir = tempfile.mkdtemp() self.config = { - 'objstorage': { - 'cls': 'pathslicing', - 'args': { - 'root': self.tmpdir, - 'slicing': '0:1/0:5', - 'allow_delete': True, - } + "objstorage": { + "cls": "pathslicing", + "args": { + "root": self.tmpdir, + "slicing": "0:1/0:5", + "allow_delete": True, + }, }, - 'client_max_size': 8 * 1024 * 1024, + "client_max_size": 8 * 1024 * 1024, } self.app = make_app(self.config) super().setUp() - self.storage = get_objstorage('remote', { - 'url': self.url() - }) + self.storage = get_objstorage("remote", {"url": self.url()}) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) - @pytest.mark.skip('makes no sense to test this for the remote api') + @pytest.mark.skip("makes no sense to test this for the remote api") def test_delete_not_allowed(self): pass - @pytest.mark.skip('makes no sense to test this for the remote api') + @pytest.mark.skip("makes no sense to test this for the remote api") def test_delete_not_allowed_by_default(self): pass diff --git a/swh/objstorage/tests/test_objstorage_azure.py b/swh/objstorage/tests/test_objstorage_azure.py index fc9bfd5..629dc1a 100644 --- a/swh/objstorage/tests/test_objstorage_azure.py +++ b/swh/objstorage/tests/test_objstorage_azure.py @@ -1,186 +1,179 @@ # Copyright (C) 2016-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from collections import defaultdict from unittest.mock import patch from typing import Any, Dict from azure.common import AzureMissingResourceHttpError from swh.model.hashutil import hash_to_hex from swh.objstorage import get_objstorage from swh.objstorage.objstorage import decompressors from swh.objstorage.exc import Error from .objstorage_testing import ObjStorageTestFixture -class MockBlob(): +class MockBlob: """ Libcloud object mock that replicates its API """ + def __init__(self, name, content): self.name = name self.content = content -class MockBlockBlobService(): +class MockBlockBlobService: """Mock internal azure library which AzureCloudObjStorage depends upon. """ + _data: Dict[str, Any] = {} def __init__(self, account_name, account_key, **kwargs): # do not care for the account_name and the api_secret_key here self._data = defaultdict(dict) def get_container_properties(self, container_name): self._data[container_name] return container_name in self._data def create_blob_from_bytes(self, container_name, blob_name, blob): self._data[container_name][blob_name] = blob def get_blob_to_bytes(self, container_name, blob_name): if blob_name not in self._data[container_name]: - raise AzureMissingResourceHttpError( - 'Blob %s not found' % blob_name, - 404) - return MockBlob(name=blob_name, - content=self._data[container_name][blob_name]) + raise AzureMissingResourceHttpError("Blob %s not found" % blob_name, 404) + return MockBlob(name=blob_name, content=self._data[container_name][blob_name]) def delete_blob(self, container_name, blob_name): try: self._data[container_name].pop(blob_name) except KeyError: - raise AzureMissingResourceHttpError( - 'Blob %s not found' % blob_name, 404) + raise AzureMissingResourceHttpError("Blob %s not found" % blob_name, 404) return True def exists(self, container_name, blob_name): return blob_name in self._data[container_name] def list_blobs(self, container_name, marker=None, maxresults=None): for blob_name, content in sorted(self._data[container_name].items()): if marker is None or blob_name > marker: yield MockBlob(name=blob_name, content=content) class TestAzureCloudObjStorage(ObjStorageTestFixture, unittest.TestCase): - compression = 'none' + compression = "none" def setUp(self): super().setUp() patcher = patch( - 'swh.objstorage.backends.azure.BlockBlobService', - MockBlockBlobService, + "swh.objstorage.backends.azure.BlockBlobService", MockBlockBlobService, ) patcher.start() self.addCleanup(patcher.stop) - self.storage = get_objstorage('azure', { - 'account_name': 'account-name', - 'api_secret_key': 'api-secret-key', - 'container_name': 'container-name', - 'compression': self.compression, - }) + self.storage = get_objstorage( + "azure", + { + "account_name": "account-name", + "api_secret_key": "api-secret-key", + "container_name": "container-name", + "compression": self.compression, + }, + ) def test_compression(self): - content, obj_id = self.hash_content(b'test content is compressed') + content, obj_id = self.hash_content(b"test content is compressed") self.storage.add(content, obj_id=obj_id) blob_service, container = self.storage.get_blob_service(obj_id) internal_id = self.storage._internal_id(obj_id) raw_blob = blob_service.get_blob_to_bytes(container, internal_id) d = decompressors[self.compression]() assert d.decompress(raw_blob.content) == content - assert d.unused_data == b'' + assert d.unused_data == b"" def test_trailing_data_on_stored_blob(self): - content, obj_id = self.hash_content(b'test content without garbage') + content, obj_id = self.hash_content(b"test content without garbage") self.storage.add(content, obj_id=obj_id) blob_service, container = self.storage.get_blob_service(obj_id) internal_id = self.storage._internal_id(obj_id) - blob_service._data[container][internal_id] += b'trailing garbage' + blob_service._data[container][internal_id] += b"trailing garbage" - if self.compression == 'none': + if self.compression == "none": with self.assertRaises(Error) as e: self.storage.check(obj_id) else: with self.assertRaises(Error) as e: self.storage.get(obj_id) - assert 'trailing data' in e.exception.args[0] + assert "trailing data" in e.exception.args[0] class TestAzureCloudObjStorageGzip(TestAzureCloudObjStorage): - compression = 'gzip' + compression = "gzip" class TestAzureCloudObjStorageZlib(TestAzureCloudObjStorage): - compression = 'zlib' + compression = "zlib" class TestAzureCloudObjStorageLzma(TestAzureCloudObjStorage): - compression = 'lzma' + compression = "lzma" class TestAzureCloudObjStorageBz2(TestAzureCloudObjStorage): - compression = 'bz2' + compression = "bz2" -class TestPrefixedAzureCloudObjStorage(ObjStorageTestFixture, - unittest.TestCase): +class TestPrefixedAzureCloudObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() patcher = patch( - 'swh.objstorage.backends.azure.BlockBlobService', - MockBlockBlobService, + "swh.objstorage.backends.azure.BlockBlobService", MockBlockBlobService, ) patcher.start() self.addCleanup(patcher.stop) self.accounts = {} - for prefix in '0123456789abcdef': + for prefix in "0123456789abcdef": self.accounts[prefix] = { - 'account_name': 'account_%s' % prefix, - 'api_secret_key': 'secret_key_%s' % prefix, - 'container_name': 'container_%s' % prefix, + "account_name": "account_%s" % prefix, + "api_secret_key": "secret_key_%s" % prefix, + "container_name": "container_%s" % prefix, } - self.storage = get_objstorage('azure-prefixed', { - 'accounts': self.accounts - }) + self.storage = get_objstorage("azure-prefixed", {"accounts": self.accounts}) def test_prefixedazure_instantiation_missing_prefixes(self): - del self.accounts['d'] - del self.accounts['e'] + del self.accounts["d"] + del self.accounts["e"] - with self.assertRaisesRegex(ValueError, 'Missing prefixes'): - get_objstorage('azure-prefixed', { - 'accounts': self.accounts - }) + with self.assertRaisesRegex(ValueError, "Missing prefixes"): + get_objstorage("azure-prefixed", {"accounts": self.accounts}) def test_prefixedazure_instantiation_inconsistent_prefixes(self): - self.accounts['00'] = self.accounts['0'] + self.accounts["00"] = self.accounts["0"] - with self.assertRaisesRegex(ValueError, 'Inconsistent prefixes'): - get_objstorage('azure-prefixed', { - 'accounts': self.accounts - }) + with self.assertRaisesRegex(ValueError, "Inconsistent prefixes"): + get_objstorage("azure-prefixed", {"accounts": self.accounts}) def test_prefixedazure_sharding_behavior(self): for i in range(100): - content, obj_id = self.hash_content(b'test_content_%02d' % i) + content, obj_id = self.hash_content(b"test_content_%02d" % i) self.storage.add(content, obj_id=obj_id) hex_obj_id = hash_to_hex(obj_id) prefix = hex_obj_id[0] self.assertTrue( self.storage.prefixes[prefix][0].exists( - self.accounts[prefix]['container_name'], hex_obj_id - )) + self.accounts[prefix]["container_name"], hex_obj_id + ) + ) diff --git a/swh/objstorage/tests/test_objstorage_cloud.py b/swh/objstorage/tests/test_objstorage_cloud.py index 1a5c04b..f6d175b 100644 --- a/swh/objstorage/tests/test_objstorage_cloud.py +++ b/swh/objstorage/tests/test_objstorage_cloud.py @@ -1,162 +1,164 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Optional import unittest from libcloud.common.types import InvalidCredsError -from libcloud.storage.types import (ContainerDoesNotExistError, - ObjectDoesNotExistError) +from libcloud.storage.types import ContainerDoesNotExistError, ObjectDoesNotExistError from swh.objstorage.objstorage import decompressors from swh.objstorage.exc import Error from swh.objstorage.backends.libcloud import CloudObjStorage from .objstorage_testing import ObjStorageTestFixture -API_KEY = 'API_KEY' -API_SECRET_KEY = 'API SECRET KEY' -CONTAINER_NAME = 'test_container' +API_KEY = "API_KEY" +API_SECRET_KEY = "API SECRET KEY" +CONTAINER_NAME = "test_container" -class MockLibcloudObject(): +class MockLibcloudObject: """ Libcloud object mock that replicates its API """ + def __init__(self, name, content): self.name = name self.content = list(content) def as_stream(self): yield from iter(self.content) -class MockLibcloudDriver(): +class MockLibcloudDriver: """ Mock driver that replicates the used LibCloud API """ + def __init__(self, api_key, api_secret_key): self.containers = {CONTAINER_NAME: {}} # Storage is initialized self.api_key = api_key self.api_secret_key = api_secret_key def _check_credentials(self): # Private method may be known as another name in Libcloud but is used # to replicate libcloud behavior (i.e. check credential at each # request) if self.api_key != API_KEY or self.api_secret_key != API_SECRET_KEY: raise InvalidCredsError() def get_container(self, container_name): try: return self.containers[container_name] except KeyError: - raise ContainerDoesNotExistError(container_name=container_name, - driver=self, value=None) + raise ContainerDoesNotExistError( + container_name=container_name, driver=self, value=None + ) def iterate_container_objects(self, container): self._check_credentials() yield from (v for k, v in sorted(container.items())) def get_object(self, container_name, obj_id): self._check_credentials() try: container = self.get_container(container_name) return container[obj_id] except KeyError: - raise ObjectDoesNotExistError(object_name=obj_id, - driver=self, value=None) + raise ObjectDoesNotExistError(object_name=obj_id, driver=self, value=None) def delete_object(self, obj): self._check_credentials() try: container = self.get_container(CONTAINER_NAME) container.pop(obj.name) return True except KeyError: - raise ObjectDoesNotExistError(object_name=obj.name, - driver=self, value=None) + raise ObjectDoesNotExistError(object_name=obj.name, driver=self, value=None) def upload_object_via_stream(self, content, container, obj_id): self._check_credentials() obj = MockLibcloudObject(obj_id, content) container[obj_id] = obj class MockCloudObjStorage(CloudObjStorage): """ Cloud object storage that uses a mocked driver """ + def _get_driver(self, **kwargs): return MockLibcloudDriver(**kwargs) def _get_provider(self): # Implement this for the abc requirement, but behavior is defined in # _get_driver. pass class TestCloudObjStorage(ObjStorageTestFixture, unittest.TestCase): - compression = 'none' + compression = "none" path_prefix: Optional[str] = None def setUp(self): super().setUp() self.storage = MockCloudObjStorage( CONTAINER_NAME, - api_key=API_KEY, api_secret_key=API_SECRET_KEY, + api_key=API_KEY, + api_secret_key=API_SECRET_KEY, compression=self.compression, path_prefix=self.path_prefix, ) def test_compression(self): - content, obj_id = self.hash_content(b'add_get_w_id') + content, obj_id = self.hash_content(b"add_get_w_id") self.storage.add(content, obj_id=obj_id) libcloud_object = self.storage._get_object(obj_id) - raw_content = b''.join(libcloud_object.content) + raw_content = b"".join(libcloud_object.content) d = decompressors[self.compression]() assert d.decompress(raw_content) == content - assert d.unused_data == b'' + assert d.unused_data == b"" def test_trailing_data_on_stored_blob(self): - content, obj_id = self.hash_content(b'test content without garbage') + content, obj_id = self.hash_content(b"test content without garbage") self.storage.add(content, obj_id=obj_id) libcloud_object = self.storage._get_object(obj_id) - libcloud_object.content.append(b'trailing garbage') + libcloud_object.content.append(b"trailing garbage") - if self.compression == 'none': + if self.compression == "none": with self.assertRaises(Error) as e: self.storage.check(obj_id) else: with self.assertRaises(Error) as e: self.storage.get(obj_id) - assert 'trailing data' in e.exception.args[0] + assert "trailing data" in e.exception.args[0] class TestCloudObjStorageBz2(TestCloudObjStorage): - compression = 'bz2' + compression = "bz2" class TestCloudObjStorageGzip(TestCloudObjStorage): - compression = 'gzip' + compression = "gzip" class TestCloudObjStorageLzma(TestCloudObjStorage): - compression = 'lzma' + compression = "lzma" class TestCloudObjStorageZlib(TestCloudObjStorage): - compression = 'zlib' + compression = "zlib" class TestCloudObjStoragePrefix(TestCloudObjStorage): - path_prefix = 'contents' + path_prefix = "contents" def test_path_prefix(self): - content, obj_id = self.hash_content(b'test content') + content, obj_id = self.hash_content(b"test content") self.storage.add(content, obj_id=obj_id) container = self.storage.driver.containers[CONTAINER_NAME] object_path = self.storage._object_path(obj_id) - assert object_path.startswith(self.path_prefix + '/') + assert object_path.startswith(self.path_prefix + "/") assert object_path in container diff --git a/swh/objstorage/tests/test_objstorage_in_memory.py b/swh/objstorage/tests/test_objstorage_in_memory.py index e0ba1ca..2931b9b 100644 --- a/swh/objstorage/tests/test_objstorage_in_memory.py +++ b/swh/objstorage/tests/test_objstorage_in_memory.py @@ -1,16 +1,16 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from swh.objstorage import get_objstorage from .objstorage_testing import ObjStorageTestFixture class TestInMemoryObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() - self.storage = get_objstorage(cls='memory', args={}) + self.storage = get_objstorage(cls="memory", args={}) diff --git a/swh/objstorage/tests/test_objstorage_instantiation.py b/swh/objstorage/tests/test_objstorage_instantiation.py index 6c80779..e15ee4e 100644 --- a/swh/objstorage/tests/test_objstorage_instantiation.py +++ b/swh/objstorage/tests/test_objstorage_instantiation.py @@ -1,49 +1,40 @@ # Copyright (C) 2015-2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import shutil import tempfile import unittest from swh.objstorage import get_objstorage from swh.objstorage.api.client import RemoteObjStorage from swh.objstorage.backends.pathslicing import PathSlicingObjStorage class TestObjStorageInitialization(unittest.TestCase): """ Test that the methods for ObjStorage initializations with `get_objstorage` works properly. """ def setUp(self): self.path = tempfile.mkdtemp() self.path2 = tempfile.mkdtemp() # Server is launched at self.url() - self.config = {'storage_base': self.path2, - 'storage_slicing': '0:1/0:5'} + self.config = {"storage_base": self.path2, "storage_slicing": "0:1/0:5"} super().setUp() def tearDown(self): super().tearDown() shutil.rmtree(self.path) shutil.rmtree(self.path2) def test_pathslicing_objstorage(self): - conf = { - 'cls': 'pathslicing', - 'args': {'root': self.path, 'slicing': '0:2/0:5'} - } + conf = {"cls": "pathslicing", "args": {"root": self.path, "slicing": "0:2/0:5"}} st = get_objstorage(**conf) self.assertTrue(isinstance(st, PathSlicingObjStorage)) def test_remote_objstorage(self): - conf = { - 'cls': 'remote', - 'args': { - 'url': 'http://127.0.0.1:4242/' - } - } + conf = {"cls": "remote", "args": {"url": "http://127.0.0.1:4242/"}} st = get_objstorage(**conf) self.assertTrue(isinstance(st, RemoteObjStorage)) diff --git a/swh/objstorage/tests/test_objstorage_multiplexer.py b/swh/objstorage/tests/test_objstorage_multiplexer.py index 843d07e..cec4beb 100644 --- a/swh/objstorage/tests/test_objstorage_multiplexer.py +++ b/swh/objstorage/tests/test_objstorage_multiplexer.py @@ -1,67 +1,68 @@ # Copyright (C) 2015-2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import shutil import tempfile import unittest from swh.objstorage import PathSlicingObjStorage from swh.objstorage.multiplexer import MultiplexerObjStorage from swh.objstorage.multiplexer.filter import add_filter, read_only from .objstorage_testing import ObjStorageTestFixture class TestMultiplexerObjStorage(ObjStorageTestFixture, unittest.TestCase): - def setUp(self): super().setUp() self.tmpdir = tempfile.mkdtemp() - os.mkdir(os.path.join(self.tmpdir, 'root1')) - os.mkdir(os.path.join(self.tmpdir, 'root2')) + os.mkdir(os.path.join(self.tmpdir, "root1")) + os.mkdir(os.path.join(self.tmpdir, "root2")) self.storage_v1 = PathSlicingObjStorage( - os.path.join(self.tmpdir, 'root1'), '0:2/2:4') + os.path.join(self.tmpdir, "root1"), "0:2/2:4" + ) self.storage_v2 = PathSlicingObjStorage( - os.path.join(self.tmpdir, 'root2'), '0:1/0:5') + os.path.join(self.tmpdir, "root2"), "0:1/0:5" + ) self.r_storage = add_filter(self.storage_v1, read_only()) self.w_storage = self.storage_v2 self.storage = MultiplexerObjStorage([self.r_storage, self.w_storage]) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def test_contains(self): - content_p, obj_id_p = self.hash_content(b'contains_present') - content_m, obj_id_m = self.hash_content(b'contains_missing') + content_p, obj_id_p = self.hash_content(b"contains_present") + content_m, obj_id_m = self.hash_content(b"contains_missing") self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) def test_delete_missing(self): self.storage_v1.allow_delete = True self.storage_v2.allow_delete = True super().test_delete_missing() def test_delete_present(self): self.storage_v1.allow_delete = True self.storage_v2.allow_delete = True super().test_delete_present() def test_get_random_contents(self): - content, obj_id = self.hash_content(b'get_random_content') + content, obj_id = self.hash_content(b"get_random_content") self.storage.add(content) random_contents = list(self.storage.get_random(1)) self.assertEqual(1, len(random_contents)) self.assertIn(obj_id, random_contents) def test_access_readonly(self): # Add a content to the readonly storage - content, obj_id = self.hash_content(b'content in read-only') + content, obj_id = self.hash_content(b"content in read-only") self.storage_v1.add(content) # Try to retrieve it on the main storage self.assertIn(obj_id, self.storage) diff --git a/swh/objstorage/tests/test_objstorage_pathslicing.py b/swh/objstorage/tests/test_objstorage_pathslicing.py index 1cbc34c..ddcf01a 100644 --- a/swh/objstorage/tests/test_objstorage_pathslicing.py +++ b/swh/objstorage/tests/test_objstorage_pathslicing.py @@ -1,157 +1,161 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import shutil import tempfile import unittest from unittest.mock import patch, DEFAULT from swh.model import hashutil from swh.objstorage import exc, get_objstorage, ID_HASH_LENGTH from .objstorage_testing import ObjStorageTestFixture class TestPathSlicingObjStorage(ObjStorageTestFixture, unittest.TestCase): - compression = 'none' + compression = "none" def setUp(self): super().setUp() - self.slicing = '0:2/2:4/4:6' + self.slicing = "0:2/2:4/4:6" self.tmpdir = tempfile.mkdtemp() self.storage = get_objstorage( - 'pathslicing', { - 'root': self.tmpdir, - 'slicing': self.slicing, - 'compression': self.compression, - } + "pathslicing", + { + "root": self.tmpdir, + "slicing": self.slicing, + "compression": self.compression, + }, ) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def content_path(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return self.storage._obj_path(hex_obj_id) def test_iter(self): - content, obj_id = self.hash_content(b'iter') + content, obj_id = self.hash_content(b"iter") self.assertEqual(list(iter(self.storage)), []) self.storage.add(content, obj_id=obj_id) self.assertEqual(list(iter(self.storage)), [obj_id]) def test_len(self): - content, obj_id = self.hash_content(b'len') + content, obj_id = self.hash_content(b"len") self.assertEqual(len(self.storage), 0) self.storage.add(content, obj_id=obj_id) self.assertEqual(len(self.storage), 1) def test_check_ok(self): - content, obj_id = self.hash_content(b'check_ok') + content, obj_id = self.hash_content(b"check_ok") self.storage.add(content, obj_id=obj_id) assert self.storage.check(obj_id) is None assert self.storage.check(obj_id.hex()) is None def test_check_id_mismatch(self): - content, obj_id = self.hash_content(b'check_id_mismatch') - self.storage.add(b'unexpected content', obj_id=obj_id) + content, obj_id = self.hash_content(b"check_id_mismatch") + self.storage.add(b"unexpected content", obj_id=obj_id) with self.assertRaises(exc.Error) as error: self.storage.check(obj_id) - self.assertEqual(( - 'Corrupt object %s should have id ' - '12ebb2d6c81395bcc5cab965bdff640110cb67ff' % obj_id.hex(),), - error.exception.args) + self.assertEqual( + ( + "Corrupt object %s should have id " + "12ebb2d6c81395bcc5cab965bdff640110cb67ff" % obj_id.hex(), + ), + error.exception.args, + ) def test_get_random_contents(self): - content, obj_id = self.hash_content(b'get_random_content') + content, obj_id = self.hash_content(b"get_random_content") self.storage.add(content, obj_id=obj_id) random_contents = list(self.storage.get_random(1)) self.assertEqual(1, len(random_contents)) self.assertIn(obj_id, random_contents) def test_iterate_from(self): all_ids = [] for i in range(100): - content, obj_id = self.hash_content(b'content %d' % i) + content, obj_id = self.hash_content(b"content %d" % i) self.storage.add(content, obj_id=obj_id) all_ids.append(obj_id) all_ids.sort() - ids = list(self.storage.iter_from(b'\x00' * (ID_HASH_LENGTH // 2))) + ids = list(self.storage.iter_from(b"\x00" * (ID_HASH_LENGTH // 2))) self.assertEqual(len(ids), len(all_ids)) self.assertEqual(ids, all_ids) ids = list(self.storage.iter_from(all_ids[0])) - self.assertEqual(len(ids), len(all_ids)-1) + self.assertEqual(len(ids), len(all_ids) - 1) self.assertEqual(ids, all_ids[1:]) ids = list(self.storage.iter_from(all_ids[-1], n_leaf=True)) n_leaf = ids[-1] ids = ids[:-1] self.assertEqual(n_leaf, 1) self.assertEqual(len(ids), 0) ids = list(self.storage.iter_from(all_ids[-2], n_leaf=True)) n_leaf = ids[-1] ids = ids[:-1] self.assertEqual(n_leaf, 2) # beware, this depends on the hash algo self.assertEqual(len(ids), 1) self.assertEqual(ids, all_ids[-1:]) def test_fdatasync_default(self): - content, obj_id = self.hash_content(b'check_fdatasync') - with patch.multiple('os', fsync=DEFAULT, fdatasync=DEFAULT) as patched: + content, obj_id = self.hash_content(b"check_fdatasync") + with patch.multiple("os", fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) if self.storage.use_fdatasync: - assert patched['fdatasync'].call_count == 1 - assert patched['fsync'].call_count == 0 + assert patched["fdatasync"].call_count == 1 + assert patched["fsync"].call_count == 0 else: - assert patched['fdatasync'].call_count == 0 - assert patched['fsync'].call_count == 1 + assert patched["fdatasync"].call_count == 0 + assert patched["fsync"].call_count == 1 def test_fdatasync_forced_on(self): self.storage.use_fdatasync = True - content, obj_id = self.hash_content(b'check_fdatasync') - with patch.multiple('os', fsync=DEFAULT, fdatasync=DEFAULT) as patched: + content, obj_id = self.hash_content(b"check_fdatasync") + with patch.multiple("os", fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) - assert patched['fdatasync'].call_count == 1 - assert patched['fsync'].call_count == 0 + assert patched["fdatasync"].call_count == 1 + assert patched["fsync"].call_count == 0 def test_fdatasync_forced_off(self): self.storage.use_fdatasync = False - content, obj_id = self.hash_content(b'check_fdatasync') - with patch.multiple('os', fsync=DEFAULT, fdatasync=DEFAULT) as patched: + content, obj_id = self.hash_content(b"check_fdatasync") + with patch.multiple("os", fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) - assert patched['fdatasync'].call_count == 0 - assert patched['fsync'].call_count == 1 + assert patched["fdatasync"].call_count == 0 + assert patched["fsync"].call_count == 1 def test_check_not_compressed(self): - content, obj_id = self.hash_content(b'check_not_compressed') + content, obj_id = self.hash_content(b"check_not_compressed") self.storage.add(content, obj_id=obj_id) - with open(self.content_path(obj_id), 'ab') as f: # Add garbage. - f.write(b'garbage') + with open(self.content_path(obj_id), "ab") as f: # Add garbage. + f.write(b"garbage") with self.assertRaises(exc.Error) as error: self.storage.check(obj_id) - if self.compression == 'none': - self.assertIn('Corrupt object', error.exception.args[0]) + if self.compression == "none": + self.assertIn("Corrupt object", error.exception.args[0]) else: - self.assertIn('trailing data found', error.exception.args[0]) + self.assertIn("trailing data found", error.exception.args[0]) class TestPathSlicingObjStorageGzip(TestPathSlicingObjStorage): - compression = 'gzip' + compression = "gzip" class TestPathSlicingObjStorageZlib(TestPathSlicingObjStorage): - compression = 'zlib' + compression = "zlib" class TestPathSlicingObjStorageBz2(TestPathSlicingObjStorage): - compression = 'bz2' + compression = "bz2" class TestPathSlicingObjStorageLzma(TestPathSlicingObjStorage): - compression = 'lzma' + compression = "lzma" diff --git a/swh/objstorage/tests/test_objstorage_random_generator.py b/swh/objstorage/tests/test_objstorage_random_generator.py index 9b0b17d..699ff4a 100644 --- a/swh/objstorage/tests/test_objstorage_random_generator.py +++ b/swh/objstorage/tests/test_objstorage_random_generator.py @@ -1,46 +1,45 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import Iterator from swh.objstorage import get_objstorage def test_random_generator_objstorage(): - sto = get_objstorage('random', {}) + sto = get_objstorage("random", {}) assert sto blobs = [sto.get(None) for i in range(100)] lengths = [len(x) for x in blobs] assert max(lengths) <= 55056238 def test_random_generator_objstorage_get_stream(): - sto = get_objstorage('random', {}) + sto = get_objstorage("random", {}) gen = sto.get_stream(None) assert isinstance(gen, Iterator) assert list(gen) # ensure the iterator can be consumed def test_random_generator_objstorage_list_content(): - sto = get_objstorage('random', {'total': 100}) + sto = get_objstorage("random", {"total": 100}) assert isinstance(sto.list_content(), Iterator) - assert list(sto.list_content()) == \ - [b'%d' % i for i in range(1, 101)] - assert list(sto.list_content(limit=10)) == \ - [b'%d' % i for i in range(1, 11)] - assert list(sto.list_content(last_obj_id=b'10', limit=10)) == \ - [b'%d' % i for i in range(11, 21)] + assert list(sto.list_content()) == [b"%d" % i for i in range(1, 101)] + assert list(sto.list_content(limit=10)) == [b"%d" % i for i in range(1, 11)] + assert list(sto.list_content(last_obj_id=b"10", limit=10)) == [ + b"%d" % i for i in range(11, 21) + ] def test_random_generator_objstorage_total(): - sto = get_objstorage('random', {'total': 5}) + sto = get_objstorage("random", {"total": 5}) assert len([x for x in sto]) == 5 def test_random_generator_objstorage_size(): - sto = get_objstorage('random', {'filesize': 10}) + sto = get_objstorage("random", {"filesize": 10}) for i in range(10): assert len(sto.get(None)) == 10 diff --git a/swh/objstorage/tests/test_objstorage_seaweedfs.py b/swh/objstorage/tests/test_objstorage_seaweedfs.py index c344e06..005eaca 100644 --- a/swh/objstorage/tests/test_objstorage_seaweedfs.py +++ b/swh/objstorage/tests/test_objstorage_seaweedfs.py @@ -1,91 +1,91 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from swh.objstorage.objstorage import decompressors from swh.objstorage.exc import Error from swh.objstorage.backends.seaweed import WeedObjStorage, DEFAULT_LIMIT from swh.objstorage.tests.objstorage_testing import ObjStorageTestFixture class MockWeedFiler: """ WeedFiler mock that replicates its API """ + def __init__(self, url): self.url = url self.content = {} def get(self, remote_path): return self.content[remote_path] def put(self, fp, remote_path): self.content[remote_path] = fp.read() def exists(self, remote_path): return remote_path in self.content def delete(self, remote_path): del self.content[remote_path] def list(self, dir, last_file_name=None, limit=DEFAULT_LIMIT): keys = sorted(self.content.keys()) if last_file_name is None: idx = 0 else: idx = keys.index(last_file_name) + 1 - return {'Entries': [{'FullPath': x} for x in keys[idx:idx+limit]]} + return {"Entries": [{"FullPath": x} for x in keys[idx : idx + limit]]} class TestWeedObjStorage(ObjStorageTestFixture, unittest.TestCase): - compression = 'none' + compression = "none" def setUp(self): super().setUp() - self.url = 'http://127.0.0.1/test' - self.storage = WeedObjStorage(url=self.url, - compression=self.compression) + self.url = "http://127.0.0.1/test" + self.storage = WeedObjStorage(url=self.url, compression=self.compression) self.storage.wf = MockWeedFiler(self.url) def test_compression(self): - content, obj_id = self.hash_content(b'test compression') + content, obj_id = self.hash_content(b"test compression") self.storage.add(content, obj_id=obj_id) raw_content = self.storage.wf.get(self.storage._path(obj_id)) d = decompressors[self.compression]() assert d.decompress(raw_content) == content - assert d.unused_data == b'' + assert d.unused_data == b"" def test_trailing_data_on_stored_blob(self): - content, obj_id = self.hash_content(b'test content without garbage') + content, obj_id = self.hash_content(b"test content without garbage") self.storage.add(content, obj_id=obj_id) path = self.storage._path(obj_id) - self.storage.wf.content[path] += b'trailing garbage' + self.storage.wf.content[path] += b"trailing garbage" - if self.compression == 'none': + if self.compression == "none": with self.assertRaises(Error) as e: self.storage.check(obj_id) else: with self.assertRaises(Error) as e: self.storage.get(obj_id) - assert 'trailing data' in e.exception.args[0] + assert "trailing data" in e.exception.args[0] class TestWeedObjStorageBz2(TestWeedObjStorage): - compression = 'bz2' + compression = "bz2" class TestWeedObjStorageGzip(TestWeedObjStorage): - compression = 'gzip' + compression = "gzip" class TestWeedObjStorageLzma(TestWeedObjStorage): - compression = 'lzma' + compression = "lzma" class TestWeedObjStorageZlib(TestWeedObjStorage): - compression = 'zlib' + compression = "zlib" diff --git a/swh/objstorage/tests/test_objstorage_striping.py b/swh/objstorage/tests/test_objstorage_striping.py index 36ab874..2e34550 100644 --- a/swh/objstorage/tests/test_objstorage_striping.py +++ b/swh/objstorage/tests/test_objstorage_striping.py @@ -1,83 +1,80 @@ # Copyright (C) 2015-2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import shutil import tempfile import unittest from swh.objstorage import get_objstorage from .objstorage_testing import ObjStorageTestFixture class TestStripingObjStorage(ObjStorageTestFixture, unittest.TestCase): - def setUp(self): super().setUp() self.base_dir = tempfile.mkdtemp() - os.mkdir(os.path.join(self.base_dir, 'root1')) - os.mkdir(os.path.join(self.base_dir, 'root2')) + os.mkdir(os.path.join(self.base_dir, "root1")) + os.mkdir(os.path.join(self.base_dir, "root2")) storage_config = { - 'cls': 'striping', - 'args': { - 'objstorages': [ + "cls": "striping", + "args": { + "objstorages": [ { - 'cls': 'pathslicing', - 'args': { - 'root': os.path.join(self.base_dir, 'root1'), - 'slicing': '0:2', - 'allow_delete': True, - } + "cls": "pathslicing", + "args": { + "root": os.path.join(self.base_dir, "root1"), + "slicing": "0:2", + "allow_delete": True, + }, }, { - 'cls': 'pathslicing', - 'args': { - 'root': os.path.join(self.base_dir, 'root2'), - 'slicing': '0:2', - 'allow_delete': True, - } + "cls": "pathslicing", + "args": { + "root": os.path.join(self.base_dir, "root2"), + "slicing": "0:2", + "allow_delete": True, + }, }, ] - } + }, } self.storage = get_objstorage(**storage_config) def tearDown(self): shutil.rmtree(self.base_dir) def test_add_get_wo_id(self): self.skipTest("can't add without id in the multiplexer storage") def test_add_striping_behavior(self): exp_storage_counts = [0, 0] storage_counts = [0, 0] for i in range(100): - content, obj_id = self.hash_content( - b'striping_behavior_test%02d' % i - ) + content, obj_id = self.hash_content(b"striping_behavior_test%02d" % i) self.storage.add(content, obj_id) exp_storage_counts[self.storage.get_storage_index(obj_id)] += 1 count = 0 for i, storage in enumerate(self.storage.storages): if obj_id not in storage: continue count += 1 storage_counts[i] += 1 self.assertEqual(count, 1) self.assertEqual(storage_counts, exp_storage_counts) def test_get_striping_behavior(self): # Make sure we can read objects that are available in any backend # storage - content, obj_id = self.hash_content(b'striping_behavior_test') + content, obj_id = self.hash_content(b"striping_behavior_test") for storage in self.storage.storages: storage.add(content, obj_id) self.assertIn(obj_id, self.storage) storage.delete(obj_id) self.assertNotIn(obj_id, self.storage) def test_list_content(self): - self.skipTest('Quite a chellenge to make it work') + self.skipTest("Quite a chellenge to make it work") diff --git a/swh/objstorage/tests/test_server.py b/swh/objstorage/tests/test_server.py index e234379..882faf3 100644 --- a/swh/objstorage/tests/test_server.py +++ b/swh/objstorage/tests/test_server.py @@ -1,134 +1,114 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import pytest import yaml from swh.objstorage.api.server import load_and_check_config -def prepare_config_file(tmpdir, content, name='config.yml'): +def prepare_config_file(tmpdir, content, name="config.yml"): """Prepare configuration file in `$tmpdir/name` with content `content`. Args: tmpdir (LocalPath): root directory content (str/dict): Content of the file either as string or as a dict. If a dict, converts the dict into a yaml string. name (str): configuration filename Returns path (str) of the configuration file prepared. """ config_path = tmpdir / name if isinstance(content, dict): # convert if needed content = yaml.dump(content) - config_path.write_text(content, encoding='utf-8') + config_path.write_text(content, encoding="utf-8") # pytest on python3.5 does not support LocalPath manipulation, so # convert path to string return str(config_path) def test_load_and_check_config_no_configuration(): """Inexistent configuration files raises""" with pytest.raises(EnvironmentError) as e: load_and_check_config(None) - assert e.value.args[0] == 'Configuration file must be defined' + assert e.value.args[0] == "Configuration file must be defined" - config_path = '/indexer/inexistent/config.yml' + config_path = "/indexer/inexistent/config.yml" with pytest.raises(FileNotFoundError) as e: load_and_check_config(config_path) - assert e.value.args[0] == 'Configuration file %s does not exist' % ( - config_path, ) + assert e.value.args[0] == "Configuration file %s does not exist" % (config_path,) def test_load_and_check_config_invalid_configuration_toplevel(tmpdir): """Invalid configuration raises""" - config = { - 'something': 'useless' - } + config = {"something": "useless"} config_path = prepare_config_file(tmpdir, content=config) with pytest.raises(KeyError) as e: load_and_check_config(config_path) - assert ( - e.value.args[0] == - 'Invalid configuration; missing objstorage config entry' - ) + assert e.value.args[0] == "Invalid configuration; missing objstorage config entry" def test_load_and_check_config_invalid_configuration(tmpdir): """Invalid configuration raises""" for data, missing_keys in [ - ({'objstorage': {'something': 'useless'}}, ['cls', 'args']), - ({'objstorage': {'cls': 'something'}}, ['args']), + ({"objstorage": {"something": "useless"}}, ["cls", "args"]), + ({"objstorage": {"cls": "something"}}, ["args"]), ]: config_path = prepare_config_file(tmpdir, content=data) with pytest.raises(KeyError) as e: load_and_check_config(config_path) - assert ( - e.value.args[0] == - 'Invalid configuration; missing %s config entry' % ( - ', '.join(missing_keys), ) + assert e.value.args[0] == "Invalid configuration; missing %s config entry" % ( + ", ".join(missing_keys), ) def test_load_and_check_config_invalid_configuration_level2(tmpdir): """Invalid configuration at 2nd level raises""" config = { - 'objstorage': { - 'cls': 'pathslicing', - 'args': { - 'root': 'root', - 'slicing': 'slicing', - }, - 'client_max_size': '10', + "objstorage": { + "cls": "pathslicing", + "args": {"root": "root", "slicing": "slicing",}, + "client_max_size": "10", } } - for key in ('root', 'slicing'): + for key in ("root", "slicing"): c = copy.deepcopy(config) - c['objstorage']['args'].pop(key) + c["objstorage"]["args"].pop(key) config_path = prepare_config_file(tmpdir, c) with pytest.raises(KeyError) as e: load_and_check_config(config_path) assert ( - e.value.args[0] == - "Invalid configuration; missing args.%s config entry" % key + e.value.args[0] + == "Invalid configuration; missing args.%s config entry" % key ) def test_load_and_check_config_fine(tmpdir): """pathslicing configuration fine loads ok""" config = { - 'objstorage': { - 'cls': 'pathslicing', - 'args': { - 'root': 'root', - 'slicing': 'slicing', - } + "objstorage": { + "cls": "pathslicing", + "args": {"root": "root", "slicing": "slicing",}, } } config_path = prepare_config_file(tmpdir, config) cfg = load_and_check_config(config_path) assert cfg == config def test_load_and_check_config_fine2(tmpdir): - config = { - 'client_max_size': '10', - 'objstorage': { - 'cls': 'remote', - 'args': {} - } - } + config = {"client_max_size": "10", "objstorage": {"cls": "remote", "args": {}}} config_path = prepare_config_file(tmpdir, config) cfg = load_and_check_config(config_path) assert cfg == config diff --git a/tox.ini b/tox.ini index 6366716..b4b54bc 100644 --- a/tox.ini +++ b/tox.ini @@ -1,28 +1,35 @@ [tox] envlist=flake8,py3,mypy [testenv] extras = testing deps = pytest-cov dev: pdbpp commands = pytest --cov={envsitepackagesdir}/swh/objstorage \ {envsitepackagesdir}/swh/objstorage \ --cov-branch {posargs} +[testenv:black] +skip_install = true +deps = + black +commands = + {envpython} -m black --check swh + [testenv:flake8] skip_install = true deps = flake8 commands = {envpython} -m flake8 [testenv:mypy] extras = testing deps = mypy commands = mypy swh