diff --git a/swh/objstorage/__init__.py b/swh/objstorage/__init__.py index 0682dcb..d88d589 100644 --- a/swh/objstorage/__init__.py +++ b/swh/objstorage/__init__.py @@ -1,105 +1,105 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from .objstorage import ObjStorage -from .objstorage_pathslicing import PathSlicingObjStorage -from .objstorage_in_memory import InMemoryObjStorage -from .api.client import RemoteObjStorage -from .multiplexer import MultiplexerObjStorage, StripingObjStorage -from .multiplexer.filter import add_filters - -from swh.objstorage.objstorage_weed import WeedObjStorage +from swh.objstorage.objstorage import ObjStorage, ID_HASH_LENGTH # noqa +from swh.objstorage.backends.pathslicing import PathSlicingObjStorage +from swh.objstorage.backends.in_memory import InMemoryObjStorage +from swh.objstorage.api.client import RemoteObjStorage +from swh.objstorage.multiplexer import ( + MultiplexerObjStorage, StripingObjStorage) +from swh.objstorage.multiplexer.filter import add_filters +from swh.objstorage.backends.seaweed import WeedObjStorage __all__ = ['get_objstorage', 'ObjStorage'] _STORAGE_CLASSES = { 'pathslicing': PathSlicingObjStorage, 'remote': RemoteObjStorage, 'memory': InMemoryObjStorage, 'weed': WeedObjStorage, } _STORAGE_CLASSES_MISSING = { } try: - from swh.objstorage.cloud.objstorage_azure import ( + from swh.objstorage.backends.azure import ( AzureCloudObjStorage, PrefixedAzureCloudObjStorage, ) _STORAGE_CLASSES['azure'] = AzureCloudObjStorage _STORAGE_CLASSES['azure-prefixed'] = PrefixedAzureCloudObjStorage except ImportError as e: _STORAGE_CLASSES_MISSING['azure'] = e.args[0] _STORAGE_CLASSES_MISSING['azure-prefixed'] = e.args[0] try: - from swh.objstorage.objstorage_rados import RADOSObjStorage + from swh.objstorage.backends.rados import RADOSObjStorage _STORAGE_CLASSES['rados'] = RADOSObjStorage except ImportError as e: _STORAGE_CLASSES_MISSING['rados'] = e.args[0] try: - from swh.objstorage.cloud.objstorage_cloud import ( + from swh.objstorage.backends.libcloud import ( AwsCloudObjStorage, OpenStackCloudObjStorage, ) _STORAGE_CLASSES['s3'] = AwsCloudObjStorage _STORAGE_CLASSES['swift'] = OpenStackCloudObjStorage except ImportError as e: _STORAGE_CLASSES_MISSING['s3'] = e.args[0] _STORAGE_CLASSES_MISSING['swift'] = e.args[0] def get_objstorage(cls, args): """ Create an ObjStorage using the given implementation class. Args: cls (str): objstorage class unique key contained in the _STORAGE_CLASSES dict. args (dict): arguments for the required class of objstorage that must match exactly the one in the `__init__` method of the class. Returns: subclass of ObjStorage that match the given `storage_class` argument. Raises: ValueError: if the given storage class is not a valid objstorage key. """ if cls in _STORAGE_CLASSES: return _STORAGE_CLASSES[cls](**args) else: raise ValueError('Storage class {} is not available: {}'.format( cls, _STORAGE_CLASSES_MISSING.get(cls, 'unknown name'))) def _construct_filtered_objstorage(storage_conf, filters_conf): return add_filters( get_objstorage(**storage_conf), filters_conf ) _STORAGE_CLASSES['filtered'] = _construct_filtered_objstorage def _construct_multiplexer_objstorage(objstorages): storages = [get_objstorage(**conf) for conf in objstorages] return MultiplexerObjStorage(storages) _STORAGE_CLASSES['multiplexer'] = _construct_multiplexer_objstorage def _construct_striping_objstorage(objstorages): storages = [get_objstorage(**conf) for conf in objstorages] return StripingObjStorage(storages) _STORAGE_CLASSES['striping'] = _construct_striping_objstorage diff --git a/swh/objstorage/backends/__init__.py b/swh/objstorage/backends/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/swh/objstorage/cloud/objstorage_azure.py b/swh/objstorage/backends/azure.py similarity index 100% rename from swh/objstorage/cloud/objstorage_azure.py rename to swh/objstorage/backends/azure.py diff --git a/swh/objstorage/objstorage_in_memory.py b/swh/objstorage/backends/in_memory.py similarity index 100% rename from swh/objstorage/objstorage_in_memory.py rename to swh/objstorage/backends/in_memory.py diff --git a/swh/objstorage/cloud/objstorage_cloud.py b/swh/objstorage/backends/libcloud.py similarity index 100% rename from swh/objstorage/cloud/objstorage_cloud.py rename to swh/objstorage/backends/libcloud.py diff --git a/swh/objstorage/objstorage_pathslicing.py b/swh/objstorage/backends/pathslicing.py similarity index 98% rename from swh/objstorage/objstorage_pathslicing.py rename to swh/objstorage/backends/pathslicing.py index 799e5b5..f71acfb 100644 --- a/swh/objstorage/objstorage_pathslicing.py +++ b/swh/objstorage/backends/pathslicing.py @@ -1,373 +1,374 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import functools import os import gzip import tempfile import random import collections from itertools import islice from contextlib import contextmanager from swh.model import hashutil -from .objstorage import (ObjStorage, compute_hash, ID_HASH_ALGO, - ID_HASH_LENGTH, DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT) -from .exc import ObjNotFoundError, Error +from swh.objstorage.objstorage import ( + ObjStorage, compute_hash, ID_HASH_ALGO, + ID_HASH_LENGTH, DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT) +from swh.objstorage.exc import ObjNotFoundError, Error GZIP_BUFSIZ = 1048576 DIR_MODE = 0o755 FILE_MODE = 0o644 @contextmanager def _write_obj_file(hex_obj_id, objstorage): """ Context manager for writing object files to the object storage. During writing, data are written to a temporary file, which is atomically renamed to the right file name after closing. This context manager also takes care of (gzip) compressing the data on the fly. Usage sample: with _write_obj_file(hex_obj_id, objstorage): f.write(obj_data) Yields: a file-like object open for writing bytes. """ # Get the final paths and create the directory if absent. dir = objstorage._obj_dir(hex_obj_id) if not os.path.isdir(dir): os.makedirs(dir, DIR_MODE, exist_ok=True) path = os.path.join(dir, hex_obj_id) # Create a temporary file. (tmp, tmp_path) = tempfile.mkstemp(suffix='.tmp', prefix='hex_obj_id.', dir=dir) # Open the file and yield it for writing. tmp_f = os.fdopen(tmp, 'wb') with gzip.GzipFile(filename=tmp_path, fileobj=tmp_f) as f: yield f # Then close the temporary file and move it to the right directory. tmp_f.close() os.chmod(tmp_path, FILE_MODE) os.rename(tmp_path, path) @contextmanager def _read_obj_file(hex_obj_id, objstorage): """ Context manager for reading object file in the object storage. Usage sample: with _read_obj_file(hex_obj_id, objstorage) as f: b = f.read() Yields: a file-like object open for reading bytes. """ path = objstorage._obj_path(hex_obj_id) with gzip.GzipFile(path, 'rb') as f: yield f class PathSlicingObjStorage(ObjStorage): """Implementation of the ObjStorage API based on the hash of the content. On disk, an object storage is a directory tree containing files named after their object IDs. An object ID is a checksum of its content, depending on the value of the ID_HASH_ALGO constant (see swh.model.hashutil for its meaning). To avoid directories that contain too many files, the object storage has a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. So for instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will be stored in the given object storages : - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 The files in the storage are stored in gzipped compressed format. Attributes: root (string): path to the root directory of the storage on the disk. bounds: list of tuples that indicates the beginning and the end of each subdirectory for a content. """ def __init__(self, root, slicing, **kwargs): """ Create an object to access a hash-slicing based object storage. Args: root (string): path to the root directory of the storage on the disk. slicing (string): string that indicates the slicing to perform on the hash of the content to know the path where it should be stored. """ super().__init__(**kwargs) self.root = root # Make a list of tuples where each tuple contains the beginning # and the end of each slicing. self.bounds = [ slice(*map(int, sbounds.split(':'))) for sbounds in slicing.split('/') if sbounds ] self.check_config(check_write=False) def check_config(self, *, check_write): """Check whether this object storage is properly configured""" root = self.root if not os.path.isdir(root): raise ValueError( 'PathSlicingObjStorage root "%s" is not a directory' % root ) max_endchar = max(map(lambda bound: bound.stop, self.bounds)) if ID_HASH_LENGTH < max_endchar: raise ValueError( 'Algorithm %s has too short hash for slicing to char %d' % (ID_HASH_ALGO, max_endchar) ) if check_write: if not os.access(self.root, os.W_OK): raise PermissionError( 'PathSlicingObjStorage root "%s" is not writable' % root ) return True def __contains__(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return os.path.isfile(self._obj_path(hex_obj_id)) def __iter__(self): """Iterate over the object identifiers currently available in the storage. Warning: with the current implementation of the object storage, this method will walk the filesystem to list objects, meaning that listing all objects will be very slow for large storages. You almost certainly don't want to use this method in production. Return: Iterator over object IDs """ def obj_iterator(): # XXX hackish: it does not verify that the depth of found files # matches the slicing depth of the storage for root, _dirs, files in os.walk(self.root): _dirs.sort() for f in sorted(files): yield bytes.fromhex(f) return obj_iterator() def __len__(self): """Compute the number of objects available in the storage. Warning: this currently uses `__iter__`, its warning about bad performances applies Return: number of objects contained in the storage """ return sum(1 for i in self) def _obj_dir(self, hex_obj_id): """ Compute the storage directory of an object. See also: PathSlicingObjStorage::_obj_path Args: hex_obj_id: object id as hexlified string. Returns: Path to the directory that contains the required object. """ slices = [hex_obj_id[bound] for bound in self.bounds] return os.path.join(self.root, *slices) def _obj_path(self, hex_obj_id): """ Compute the full path to an object into the current storage. See also: PathSlicingObjStorage::_obj_dir Args: hex_obj_id: object id as hexlified string. Returns: Path to the actual object corresponding to the given id. """ return os.path.join(self._obj_dir(hex_obj_id), hex_obj_id) def add(self, content, obj_id=None, check_presence=True): if obj_id is None: obj_id = compute_hash(content) if check_presence and obj_id in self: # If the object is already present, return immediately. return obj_id hex_obj_id = hashutil.hash_to_hex(obj_id) if isinstance(content, collections.Iterator): content = b''.join(content) with _write_obj_file(hex_obj_id, self) as f: f.write(content) return obj_id def get(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) # Open the file and return its content as bytes hex_obj_id = hashutil.hash_to_hex(obj_id) with _read_obj_file(hex_obj_id, self) as f: return f.read() def check(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) try: with gzip.open(self._obj_path(hex_obj_id)) as f: length = None if ID_HASH_ALGO.endswith('_git'): # if the hashing algorithm is git-like, we need to know the # content size to hash on the fly. Do a first pass here to # compute the size length = 0 while True: chunk = f.read(GZIP_BUFSIZ) length += len(chunk) if not chunk: break f.rewind() checksums = hashutil.MultiHash.from_file( f, hash_names=[ID_HASH_ALGO], length=length).digest() actual_obj_id = checksums[ID_HASH_ALGO] if obj_id != actual_obj_id: raise Error( 'Corrupt object %s should have id %s' % (hashutil.hash_to_hex(obj_id), hashutil.hash_to_hex(actual_obj_id)) ) except (OSError, IOError): # IOError is for compatibility with older python versions raise Error('Corrupt object %s is not a gzip file' % obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) try: os.remove(self._obj_path(hex_obj_id)) except FileNotFoundError: raise ObjNotFoundError(obj_id) return True # Management methods def get_random(self, batch_size): def get_random_content(self, batch_size): """ Get a batch of content inside a single directory. Returns: a tuple (batch size, batch). """ dirs = [] for level in range(len(self.bounds)): path = os.path.join(self.root, *dirs) dir_list = next(os.walk(path))[1] if 'tmp' in dir_list: dir_list.remove('tmp') dirs.append(random.choice(dir_list)) path = os.path.join(self.root, *dirs) content_list = next(os.walk(path))[2] length = min(batch_size, len(content_list)) return length, map(hashutil.hash_to_bytes, random.sample(content_list, length)) while batch_size: length, it = get_random_content(self, batch_size) batch_size = batch_size - length yield from it # Streaming methods @contextmanager def chunk_writer(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) with _write_obj_file(hex_obj_id, self) as f: yield f.write def add_stream(self, content_iter, obj_id, check_presence=True): if check_presence and obj_id in self: return obj_id with self.chunk_writer(obj_id) as writer: for chunk in content_iter: writer(chunk) return obj_id def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) with _read_obj_file(hex_obj_id, self) as f: reader = functools.partial(f.read, chunk_size) yield from iter(reader, b'') def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): if last_obj_id: it = self.iter_from(last_obj_id) else: it = iter(self) return islice(it, limit) def iter_from(self, obj_id, n_leaf=False): hex_obj_id = hashutil.hash_to_hex(obj_id) slices = [hex_obj_id[bound] for bound in self.bounds] rlen = len(self.root.split('/')) i = 0 for root, dirs, files in os.walk(self.root): if not dirs: i += 1 level = len(root.split('/')) - rlen dirs.sort() if dirs and root == os.path.join(self.root, *slices[:level]): cslice = slices[level] for d in dirs[:]: if d < cslice: dirs.remove(d) for f in sorted(files): if f > hex_obj_id: yield bytes.fromhex(f) if n_leaf: yield i diff --git a/swh/objstorage/objstorage_rados.py b/swh/objstorage/backends/rados.py similarity index 100% rename from swh/objstorage/objstorage_rados.py rename to swh/objstorage/backends/rados.py diff --git a/swh/objstorage/objstorage_weed.py b/swh/objstorage/backends/seaweed.py similarity index 100% rename from swh/objstorage/objstorage_weed.py rename to swh/objstorage/backends/seaweed.py diff --git a/swh/objstorage/cloud/__init__.py b/swh/objstorage/cloud/__init__.py deleted file mode 100644 index 03ffe16..0000000 --- a/swh/objstorage/cloud/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .objstorage_cloud import AwsCloudObjStorage, OpenStackCloudObjStorage -from .objstorage_azure import AzureCloudObjStorage - - -__all__ = [ - 'AwsCloudObjStorage', - 'OpenStackCloudObjStorage', - 'AzureCloudObjStorage', -] diff --git a/swh/objstorage/tests/test_objstorage_azure.py b/swh/objstorage/tests/test_objstorage_azure.py index 69699f8..8adccb2 100644 --- a/swh/objstorage/tests/test_objstorage_azure.py +++ b/swh/objstorage/tests/test_objstorage_azure.py @@ -1,133 +1,133 @@ # Copyright (C) 2016-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from collections import defaultdict from unittest.mock import patch from azure.common import AzureMissingResourceHttpError from swh.model.hashutil import hash_to_hex from swh.objstorage import get_objstorage from .objstorage_testing import ObjStorageTestFixture class MockBlob(): """ Libcloud object mock that replicates its API """ def __init__(self, name, content): self.name = name self.content = content class MockBlockBlobService(): """Mock internal azure library which AzureCloudObjStorage depends upon. """ data = {} def __init__(self, account_name, account_key, **kwargs): # do not care for the account_name and the api_secret_key here self.data = defaultdict(dict) def get_container_properties(self, container_name): self.data[container_name] return container_name in self.data def create_blob_from_bytes(self, container_name, blob_name, blob): self.data[container_name][blob_name] = blob def get_blob_to_bytes(self, container_name, blob_name): if blob_name not in self.data[container_name]: raise AzureMissingResourceHttpError( 'Blob %s not found' % blob_name, 404) return MockBlob(name=blob_name, content=self.data[container_name][blob_name]) def delete_blob(self, container_name, blob_name): try: self.data[container_name].pop(blob_name) except KeyError: raise AzureMissingResourceHttpError( 'Blob %s not found' % blob_name, 404) return True def exists(self, container_name, blob_name): return blob_name in self.data[container_name] def list_blobs(self, container_name, marker=None, maxresults=None): for blob_name, content in sorted(self.data[container_name].items()): if marker is None or blob_name > marker: yield MockBlob(name=blob_name, content=content) class TestAzureCloudObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() patcher = patch( - 'swh.objstorage.cloud.objstorage_azure.BlockBlobService', + 'swh.objstorage.backends.azure.BlockBlobService', MockBlockBlobService, ) patcher.start() self.addCleanup(patcher.stop) self.storage = get_objstorage('azure', { 'account_name': 'account-name', 'api_secret_key': 'api-secret-key', 'container_name': 'container-name', }) class TestPrefixedAzureCloudObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() patcher = patch( - 'swh.objstorage.cloud.objstorage_azure.BlockBlobService', + 'swh.objstorage.backends.azure.BlockBlobService', MockBlockBlobService, ) patcher.start() self.addCleanup(patcher.stop) self.accounts = {} for prefix in '0123456789abcdef': self.accounts[prefix] = { 'account_name': 'account_%s' % prefix, 'api_secret_key': 'secret_key_%s' % prefix, 'container_name': 'container_%s' % prefix, } self.storage = get_objstorage('azure-prefixed', { 'accounts': self.accounts }) def test_prefixedazure_instantiation_missing_prefixes(self): del self.accounts['d'] del self.accounts['e'] with self.assertRaisesRegex(ValueError, 'Missing prefixes'): get_objstorage('azure-prefixed', { 'accounts': self.accounts }) def test_prefixedazure_instantiation_inconsistent_prefixes(self): self.accounts['00'] = self.accounts['0'] with self.assertRaisesRegex(ValueError, 'Inconsistent prefixes'): get_objstorage('azure-prefixed', { 'accounts': self.accounts }) def test_prefixedazure_sharding_behavior(self): for i in range(100): content, obj_id = self.hash_content(b'test_content_%02d' % i) self.storage.add(content, obj_id=obj_id) hex_obj_id = hash_to_hex(obj_id) prefix = hex_obj_id[0] self.assertTrue( self.storage.prefixes[prefix][0].exists( self.accounts[prefix]['container_name'], hex_obj_id )) diff --git a/swh/objstorage/tests/test_objstorage_cloud.py b/swh/objstorage/tests/test_objstorage_cloud.py index f8f79ea..086aa6e 100644 --- a/swh/objstorage/tests/test_objstorage_cloud.py +++ b/swh/objstorage/tests/test_objstorage_cloud.py @@ -1,166 +1,166 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest import bz2 import lzma import zlib from libcloud.common.types import InvalidCredsError from libcloud.storage.types import (ContainerDoesNotExistError, ObjectDoesNotExistError) from swh.model import hashutil -from swh.objstorage.cloud.objstorage_cloud import CloudObjStorage +from swh.objstorage.backends.libcloud import CloudObjStorage from .objstorage_testing import ObjStorageTestFixture API_KEY = 'API_KEY' API_SECRET_KEY = 'API SECRET KEY' CONTAINER_NAME = 'test_container' class MockLibcloudObject(): """ Libcloud object mock that replicates its API """ def __init__(self, name, content): self.name = name self.content = list(content) def as_stream(self): yield from iter(self.content) class MockLibcloudDriver(): """ Mock driver that replicates the used LibCloud API """ def __init__(self, api_key, api_secret_key): self.containers = {CONTAINER_NAME: {}} # Storage is initialized self.api_key = api_key self.api_secret_key = api_secret_key def _check_credentials(self): # Private method may be known as another name in Libcloud but is used # to replicate libcloud behavior (i.e. check credential at each # request) if self.api_key != API_KEY or self.api_secret_key != API_SECRET_KEY: raise InvalidCredsError() def get_container(self, container_name): try: return self.containers[container_name] except KeyError: raise ContainerDoesNotExistError(container_name=container_name, driver=self, value=None) def iterate_container_objects(self, container): self._check_credentials() yield from (v for k, v in sorted(container.items())) def get_object(self, container_name, obj_id): self._check_credentials() try: container = self.get_container(container_name) return container[obj_id] except KeyError: raise ObjectDoesNotExistError(object_name=obj_id, driver=self, value=None) def delete_object(self, obj): self._check_credentials() try: container = self.get_container(CONTAINER_NAME) container.pop(obj.name) return True except KeyError: raise ObjectDoesNotExistError(object_name=obj.name, driver=self, value=None) def upload_object_via_stream(self, content, container, obj_id): self._check_credentials() obj = MockLibcloudObject(obj_id, content) container[obj_id] = obj class MockCloudObjStorage(CloudObjStorage): """ Cloud object storage that uses a mocked driver """ def _get_driver(self, **kwargs): return MockLibcloudDriver(**kwargs) def _get_provider(self): # Implement this for the abc requirement, but behavior is defined in # _get_driver. pass class TestCloudObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() self.storage = MockCloudObjStorage( CONTAINER_NAME, api_key=API_KEY, api_secret_key=API_SECRET_KEY, ) def test_compression(self): content, obj_id = self.hash_content(b'add_get_w_id') self.storage.add(content, obj_id=obj_id) data = self.storage.driver.containers[CONTAINER_NAME] obj_id = hashutil.hash_to_hex(obj_id) self.assertEqual(b''.join(data[obj_id].content), content) class TestCloudObjStorageBz2(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() self.storage = MockCloudObjStorage( CONTAINER_NAME, compression='bz2', api_key=API_KEY, api_secret_key=API_SECRET_KEY, ) def test_compression(self): content, obj_id = self.hash_content(b'add_get_w_id') self.storage.add(content, obj_id=obj_id) data = self.storage.driver.containers[CONTAINER_NAME] obj_id = hashutil.hash_to_hex(obj_id) self.assertEqual(bz2.decompress(b''.join(data[obj_id].content)), content) class TestCloudObjStorageLzma(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() self.storage = MockCloudObjStorage( CONTAINER_NAME, compression='lzma', api_key=API_KEY, api_secret_key=API_SECRET_KEY, ) def test_compression(self): content, obj_id = self.hash_content(b'add_get_w_id') self.storage.add(content, obj_id=obj_id) data = self.storage.driver.containers[CONTAINER_NAME] obj_id = hashutil.hash_to_hex(obj_id) self.assertEqual(lzma.decompress(b''.join(data[obj_id].content)), content) class TestCloudObjStorageZlib(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() self.storage = MockCloudObjStorage( CONTAINER_NAME, compression='zlib', api_key=API_KEY, api_secret_key=API_SECRET_KEY, ) def test_compression(self): content, obj_id = self.hash_content(b'add_get_w_id') self.storage.add(content, obj_id=obj_id) data = self.storage.driver.containers[CONTAINER_NAME] obj_id = hashutil.hash_to_hex(obj_id) self.assertEqual(zlib.decompress(b''.join(data[obj_id].content)), content) diff --git a/swh/objstorage/tests/test_objstorage_instantiation.py b/swh/objstorage/tests/test_objstorage_instantiation.py index 674f9be..6c80779 100644 --- a/swh/objstorage/tests/test_objstorage_instantiation.py +++ b/swh/objstorage/tests/test_objstorage_instantiation.py @@ -1,49 +1,49 @@ # Copyright (C) 2015-2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import shutil import tempfile import unittest from swh.objstorage import get_objstorage from swh.objstorage.api.client import RemoteObjStorage -from swh.objstorage.objstorage_pathslicing import PathSlicingObjStorage +from swh.objstorage.backends.pathslicing import PathSlicingObjStorage class TestObjStorageInitialization(unittest.TestCase): """ Test that the methods for ObjStorage initializations with `get_objstorage` works properly. """ def setUp(self): self.path = tempfile.mkdtemp() self.path2 = tempfile.mkdtemp() # Server is launched at self.url() self.config = {'storage_base': self.path2, 'storage_slicing': '0:1/0:5'} super().setUp() def tearDown(self): super().tearDown() shutil.rmtree(self.path) shutil.rmtree(self.path2) def test_pathslicing_objstorage(self): conf = { 'cls': 'pathslicing', 'args': {'root': self.path, 'slicing': '0:2/0:5'} } st = get_objstorage(**conf) self.assertTrue(isinstance(st, PathSlicingObjStorage)) def test_remote_objstorage(self): conf = { 'cls': 'remote', 'args': { 'url': 'http://127.0.0.1:4242/' } } st = get_objstorage(**conf) self.assertTrue(isinstance(st, RemoteObjStorage))