diff --git a/swh/objstorage/objstorage.py b/swh/objstorage/objstorage.py index 214c0c7..22a3de1 100644 --- a/swh/objstorage/objstorage.py +++ b/swh/objstorage/objstorage.py @@ -1,342 +1,342 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc from itertools import dropwhile, islice import bz2 import lzma import zlib from swh.model import hashutil from .exc import ObjNotFoundError ID_HASH_ALGO = 'sha1' ID_HASH_LENGTH = 40 # Size in bytes of the hash hexadecimal representation. DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024 # Size in bytes of the streaming chunks DEFAULT_LIMIT = 10000 def compute_hash(content): """Compute the content's hash. Args: content (bytes): The raw content to hash hash_name (str): Hash's name (default to ID_HASH_ALGO) Returns: The ID_HASH_ALGO for the content """ return hashutil.MultiHash.from_data( content, hash_names=[ID_HASH_ALGO], ).digest().get(ID_HASH_ALGO) class NullCompressor: def compress(self, data): return data def flush(self): return b'' class NullDecompressor: def decompress(self, data): return data @property def unused_data(self): return b'' decompressors = { 'bz2': bz2.BZ2Decompressor, 'lzma': lzma.LZMADecompressor, 'gzip': lambda: zlib.decompressobj(wbits=31), 'zlib': zlib.decompressobj, - None: NullDecompressor, + 'none': NullDecompressor, } compressors = { 'bz2': bz2.BZ2Compressor, 'lzma': lzma.LZMACompressor, 'gzip': lambda: zlib.compressobj(wbits=31), 'zlib': zlib.compressobj, - None: NullCompressor, - } + 'none': NullCompressor, +} class ObjStorage(metaclass=abc.ABCMeta): """ High-level API to manipulate the Software Heritage object storage. Conceptually, the object storage offers the following methods: - check_config() check if the object storage is properly configured - __contains__() check if an object is present, by object id - add() add a new object, returning an object id - restore() same as add() but erase an already existed content - get() retrieve the content of an object, by object id - check() check the integrity of an object, by object id - delete() remove an object And some management methods: - get_random() get random object id of existing contents (used for the content integrity checker). Some of the methods have available streaming equivalents: - add_stream() same as add() but with a chunked iterator - restore_stream() same as add_stream() but erase already existing content - get_stream() same as get() but returns a chunked iterator Each implementation of this interface can have a different behavior and its own way to store the contents. """ def __init__(self, *, allow_delete=False, **kwargs): # A more complete permission system could be used in place of that if # it becomes needed self.allow_delete = allow_delete @abc.abstractmethod def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ pass @abc.abstractmethod def __contains__(self, obj_id, *args, **kwargs): """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True if and only if the object is present in the current object storage. """ pass @abc.abstractmethod def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): """Add a new object to the object storage. Args: content (bytes): object's raw content to add in storage. obj_id (bytes): checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ pass def add_batch(self, contents, check_presence=True): """Add a batch of new objects to the object storage. Args: contents (dict): mapping from obj_id to object contents Returns: the number of objects added to the storage """ ctr = 0 for obj_id, content in contents.items(): self.add(content, obj_id, check_presence=check_presence) ctr += 1 return ctr def restore(self, content, obj_id=None, *args, **kwargs): """Restore a content that have been corrupted. This function is identical to add but does not check if the object id is already in the file system. The default implementation provided by the current class is suitable for most cases. Args: content (bytes): object's raw content to add in storage obj_id (bytes): checksum of `bytes` as computed by ID_HASH_ALGO. When given, obj_id will be trusted to match bytes. If missing, obj_id will be computed on the fly. """ # check_presence to false will erase the potential previous content. return self.add(content, obj_id, check_presence=False) @abc.abstractmethod def get(self, obj_id, *args, **kwargs): """Retrieve the content of a given object. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ pass def get_batch(self, obj_ids, *args, **kwargs): """Retrieve objects' raw content in bulk from storage. Note: This function does have a default implementation in ObjStorage that is suitable for most cases. For object storages that needs to do the minimal number of requests possible (ex: remote object storages), that method can be overridden to perform a more efficient operation. Args: obj_ids ([bytes]: list of object ids. Returns: list of resulting contents, or None if the content could not be retrieved. Do not raise any exception as a fail for one content will not cancel the whole request. """ for obj_id in obj_ids: try: yield self.get(obj_id) except ObjNotFoundError: yield None @abc.abstractmethod def check(self, obj_id, *args, **kwargs): """Perform an integrity check for a given object. Verify that the file object is in place and that the content matches the object id. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. Error: if the request object is corrupted. """ pass @abc.abstractmethod def delete(self, obj_id, *args, **kwargs): """Delete an object. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. """ if not self.allow_delete: raise PermissionError("Delete is not allowed.") # Management methods def get_random(self, batch_size, *args, **kwargs): """Get random ids of existing contents. This method is used in order to get random ids to perform content integrity verifications on random contents. Args: batch_size (int): Number of ids that will be given Yields: An iterable of ids (bytes) of contents that are in the current object storage. """ pass # Streaming methods def add_stream(self, content_iter, obj_id, check_presence=True): """Add a new object to the object storage using streaming. This function is identical to add() except it takes a generator that yields the chunked content instead of the whole content at once. Args: content (bytes): chunked generator that yields the object's raw content to add in storage. obj_id (bytes): object identifier check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ raise NotImplementedError def restore_stream(self, content_iter, obj_id=None): """Restore a content that have been corrupted using streaming. This function is identical to restore() except it takes a generator that yields the chunked content instead of the whole content at once. The default implementation provided by the current class is suitable for most cases. Args: content (bytes): chunked generator that yields the object's raw content to add in storage. obj_id (bytes): object identifier """ # check_presence to false will erase the potential previous content. return self.add_stream(content_iter, obj_id, check_presence=False) def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): """Retrieve the content of a given object as a chunked iterator. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ raise NotImplementedError def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): """Generates known object ids. Args: last_obj_id (bytes): object id from which to iterate from (excluded). limit (int): max number of object ids to generate. Generates: obj_id (bytes): object ids. """ it = iter(self) if last_obj_id: it = dropwhile(lambda x: x <= last_obj_id, it) return islice(it, limit) diff --git a/swh/objstorage/tests/test_objstorage_azure.py b/swh/objstorage/tests/test_objstorage_azure.py index 3ea1e14..708c166 100644 --- a/swh/objstorage/tests/test_objstorage_azure.py +++ b/swh/objstorage/tests/test_objstorage_azure.py @@ -1,186 +1,186 @@ # Copyright (C) 2016-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from collections import defaultdict from unittest.mock import patch -from typing import Any, Dict, Optional +from typing import Any, Dict from azure.common import AzureMissingResourceHttpError from swh.model.hashutil import hash_to_hex from swh.objstorage import get_objstorage from swh.objstorage.objstorage import decompressors from swh.objstorage.exc import Error from .objstorage_testing import ObjStorageTestFixture class MockBlob(): """ Libcloud object mock that replicates its API """ def __init__(self, name, content): self.name = name self.content = content class MockBlockBlobService(): """Mock internal azure library which AzureCloudObjStorage depends upon. """ _data = {} # type: Dict[str, Any] def __init__(self, account_name, account_key, **kwargs): # do not care for the account_name and the api_secret_key here self._data = defaultdict(dict) def get_container_properties(self, container_name): self._data[container_name] return container_name in self._data def create_blob_from_bytes(self, container_name, blob_name, blob): self._data[container_name][blob_name] = blob def get_blob_to_bytes(self, container_name, blob_name): if blob_name not in self._data[container_name]: raise AzureMissingResourceHttpError( 'Blob %s not found' % blob_name, 404) return MockBlob(name=blob_name, content=self._data[container_name][blob_name]) def delete_blob(self, container_name, blob_name): try: self._data[container_name].pop(blob_name) except KeyError: raise AzureMissingResourceHttpError( 'Blob %s not found' % blob_name, 404) return True def exists(self, container_name, blob_name): return blob_name in self._data[container_name] def list_blobs(self, container_name, marker=None, maxresults=None): for blob_name, content in sorted(self._data[container_name].items()): if marker is None or blob_name > marker: yield MockBlob(name=blob_name, content=content) class TestAzureCloudObjStorage(ObjStorageTestFixture, unittest.TestCase): - compression = None # type: Optional[str] + compression = 'none' def setUp(self): super().setUp() patcher = patch( 'swh.objstorage.backends.azure.BlockBlobService', MockBlockBlobService, ) patcher.start() self.addCleanup(patcher.stop) self.storage = get_objstorage('azure', { 'account_name': 'account-name', 'api_secret_key': 'api-secret-key', 'container_name': 'container-name', 'compression': self.compression, }) def test_compression(self): content, obj_id = self.hash_content(b'test content is compressed') self.storage.add(content, obj_id=obj_id) blob_service, container = self.storage.get_blob_service(obj_id) internal_id = self.storage._internal_id(obj_id) raw_blob = blob_service.get_blob_to_bytes(container, internal_id) d = decompressors[self.compression]() assert d.decompress(raw_blob.content) == content assert d.unused_data == b'' def test_trailing_data_on_stored_blob(self): content, obj_id = self.hash_content(b'test content without garbage') self.storage.add(content, obj_id=obj_id) blob_service, container = self.storage.get_blob_service(obj_id) internal_id = self.storage._internal_id(obj_id) blob_service._data[container][internal_id] += b'trailing garbage' - if self.compression is not None: + if self.compression == 'none': with self.assertRaises(Error) as e: - self.storage.get(obj_id) - assert 'trailing data' in e.exception.args[0] + self.storage.check(obj_id) else: with self.assertRaises(Error) as e: - self.storage.check(obj_id) + self.storage.get(obj_id) + assert 'trailing data' in e.exception.args[0] class TestAzureCloudObjStorageGzip(TestAzureCloudObjStorage): compression = 'gzip' class TestAzureCloudObjStorageZlib(TestAzureCloudObjStorage): compression = 'zlib' class TestAzureCloudObjStorageLzma(TestAzureCloudObjStorage): compression = 'lzma' class TestAzureCloudObjStorageBz2(TestAzureCloudObjStorage): compression = 'bz2' class TestPrefixedAzureCloudObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() patcher = patch( 'swh.objstorage.backends.azure.BlockBlobService', MockBlockBlobService, ) patcher.start() self.addCleanup(patcher.stop) self.accounts = {} for prefix in '0123456789abcdef': self.accounts[prefix] = { 'account_name': 'account_%s' % prefix, 'api_secret_key': 'secret_key_%s' % prefix, 'container_name': 'container_%s' % prefix, } self.storage = get_objstorage('azure-prefixed', { 'accounts': self.accounts }) def test_prefixedazure_instantiation_missing_prefixes(self): del self.accounts['d'] del self.accounts['e'] with self.assertRaisesRegex(ValueError, 'Missing prefixes'): get_objstorage('azure-prefixed', { 'accounts': self.accounts }) def test_prefixedazure_instantiation_inconsistent_prefixes(self): self.accounts['00'] = self.accounts['0'] with self.assertRaisesRegex(ValueError, 'Inconsistent prefixes'): get_objstorage('azure-prefixed', { 'accounts': self.accounts }) def test_prefixedazure_sharding_behavior(self): for i in range(100): content, obj_id = self.hash_content(b'test_content_%02d' % i) self.storage.add(content, obj_id=obj_id) hex_obj_id = hash_to_hex(obj_id) prefix = hex_obj_id[0] self.assertTrue( self.storage.prefixes[prefix][0].exists( self.accounts[prefix]['container_name'], hex_obj_id )) diff --git a/swh/objstorage/tests/test_objstorage_cloud.py b/swh/objstorage/tests/test_objstorage_cloud.py index 7cab501..2f25996 100644 --- a/swh/objstorage/tests/test_objstorage_cloud.py +++ b/swh/objstorage/tests/test_objstorage_cloud.py @@ -1,150 +1,149 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from libcloud.common.types import InvalidCredsError from libcloud.storage.types import (ContainerDoesNotExistError, ObjectDoesNotExistError) -from typing import Optional from swh.model import hashutil from swh.objstorage.objstorage import decompressors from swh.objstorage.exc import Error from swh.objstorage.backends.libcloud import CloudObjStorage from .objstorage_testing import ObjStorageTestFixture API_KEY = 'API_KEY' API_SECRET_KEY = 'API SECRET KEY' CONTAINER_NAME = 'test_container' class MockLibcloudObject(): """ Libcloud object mock that replicates its API """ def __init__(self, name, content): self.name = name self.content = list(content) def as_stream(self): yield from iter(self.content) class MockLibcloudDriver(): """ Mock driver that replicates the used LibCloud API """ def __init__(self, api_key, api_secret_key): self.containers = {CONTAINER_NAME: {}} # Storage is initialized self.api_key = api_key self.api_secret_key = api_secret_key def _check_credentials(self): # Private method may be known as another name in Libcloud but is used # to replicate libcloud behavior (i.e. check credential at each # request) if self.api_key != API_KEY or self.api_secret_key != API_SECRET_KEY: raise InvalidCredsError() def get_container(self, container_name): try: return self.containers[container_name] except KeyError: raise ContainerDoesNotExistError(container_name=container_name, driver=self, value=None) def iterate_container_objects(self, container): self._check_credentials() yield from (v for k, v in sorted(container.items())) def get_object(self, container_name, obj_id): self._check_credentials() try: container = self.get_container(container_name) return container[obj_id] except KeyError: raise ObjectDoesNotExistError(object_name=obj_id, driver=self, value=None) def delete_object(self, obj): self._check_credentials() try: container = self.get_container(CONTAINER_NAME) container.pop(obj.name) return True except KeyError: raise ObjectDoesNotExistError(object_name=obj.name, driver=self, value=None) def upload_object_via_stream(self, content, container, obj_id): self._check_credentials() obj = MockLibcloudObject(obj_id, content) container[obj_id] = obj class MockCloudObjStorage(CloudObjStorage): """ Cloud object storage that uses a mocked driver """ def _get_driver(self, **kwargs): return MockLibcloudDriver(**kwargs) def _get_provider(self): # Implement this for the abc requirement, but behavior is defined in # _get_driver. pass class TestCloudObjStorage(ObjStorageTestFixture, unittest.TestCase): - compression = None # type: Optional[str] + compression = 'none' def setUp(self): super().setUp() self.storage = MockCloudObjStorage( CONTAINER_NAME, api_key=API_KEY, api_secret_key=API_SECRET_KEY, compression=self.compression, ) def test_compression(self): content, obj_id = self.hash_content(b'add_get_w_id') self.storage.add(content, obj_id=obj_id) data = self.storage.driver.containers[CONTAINER_NAME] obj_id = hashutil.hash_to_hex(obj_id) raw_content = b''.join(data[obj_id].content) d = decompressors[self.compression]() assert d.decompress(raw_content) == content assert d.unused_data == b'' def test_trailing_data_on_stored_blob(self): content, obj_id = self.hash_content(b'test content without garbage') self.storage.add(content, obj_id=obj_id) data = self.storage.driver.containers[CONTAINER_NAME] obj_id = hashutil.hash_to_hex(obj_id) data[obj_id].content.append(b'trailing garbage') - if self.compression is not None: + if self.compression == 'none': with self.assertRaises(Error) as e: - self.storage.get(obj_id) - assert 'trailing data' in e.exception.args[0] + self.storage.check(obj_id) else: with self.assertRaises(Error) as e: - self.storage.check(obj_id) + self.storage.get(obj_id) + assert 'trailing data' in e.exception.args[0] class TestCloudObjStorageBz2(TestCloudObjStorage): compression = 'bz2' class TestCloudObjStorageGzip(TestCloudObjStorage): compression = 'gzip' class TestCloudObjStorageLzma(TestCloudObjStorage): compression = 'lzma' class TestCloudObjStorageZlib(TestCloudObjStorage): compression = 'zlib' diff --git a/swh/objstorage/tests/test_objstorage_pathslicing.py b/swh/objstorage/tests/test_objstorage_pathslicing.py index e5df992..1cbc34c 100644 --- a/swh/objstorage/tests/test_objstorage_pathslicing.py +++ b/swh/objstorage/tests/test_objstorage_pathslicing.py @@ -1,159 +1,157 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import shutil import tempfile import unittest from unittest.mock import patch, DEFAULT -from typing import Optional - from swh.model import hashutil from swh.objstorage import exc, get_objstorage, ID_HASH_LENGTH from .objstorage_testing import ObjStorageTestFixture class TestPathSlicingObjStorage(ObjStorageTestFixture, unittest.TestCase): - compression = None # type: Optional[str] + compression = 'none' def setUp(self): super().setUp() self.slicing = '0:2/2:4/4:6' self.tmpdir = tempfile.mkdtemp() self.storage = get_objstorage( 'pathslicing', { 'root': self.tmpdir, 'slicing': self.slicing, 'compression': self.compression, } ) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def content_path(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return self.storage._obj_path(hex_obj_id) def test_iter(self): content, obj_id = self.hash_content(b'iter') self.assertEqual(list(iter(self.storage)), []) self.storage.add(content, obj_id=obj_id) self.assertEqual(list(iter(self.storage)), [obj_id]) def test_len(self): content, obj_id = self.hash_content(b'len') self.assertEqual(len(self.storage), 0) self.storage.add(content, obj_id=obj_id) self.assertEqual(len(self.storage), 1) def test_check_ok(self): content, obj_id = self.hash_content(b'check_ok') self.storage.add(content, obj_id=obj_id) assert self.storage.check(obj_id) is None assert self.storage.check(obj_id.hex()) is None def test_check_id_mismatch(self): content, obj_id = self.hash_content(b'check_id_mismatch') self.storage.add(b'unexpected content', obj_id=obj_id) with self.assertRaises(exc.Error) as error: self.storage.check(obj_id) self.assertEqual(( 'Corrupt object %s should have id ' '12ebb2d6c81395bcc5cab965bdff640110cb67ff' % obj_id.hex(),), error.exception.args) def test_get_random_contents(self): content, obj_id = self.hash_content(b'get_random_content') self.storage.add(content, obj_id=obj_id) random_contents = list(self.storage.get_random(1)) self.assertEqual(1, len(random_contents)) self.assertIn(obj_id, random_contents) def test_iterate_from(self): all_ids = [] for i in range(100): content, obj_id = self.hash_content(b'content %d' % i) self.storage.add(content, obj_id=obj_id) all_ids.append(obj_id) all_ids.sort() ids = list(self.storage.iter_from(b'\x00' * (ID_HASH_LENGTH // 2))) self.assertEqual(len(ids), len(all_ids)) self.assertEqual(ids, all_ids) ids = list(self.storage.iter_from(all_ids[0])) self.assertEqual(len(ids), len(all_ids)-1) self.assertEqual(ids, all_ids[1:]) ids = list(self.storage.iter_from(all_ids[-1], n_leaf=True)) n_leaf = ids[-1] ids = ids[:-1] self.assertEqual(n_leaf, 1) self.assertEqual(len(ids), 0) ids = list(self.storage.iter_from(all_ids[-2], n_leaf=True)) n_leaf = ids[-1] ids = ids[:-1] self.assertEqual(n_leaf, 2) # beware, this depends on the hash algo self.assertEqual(len(ids), 1) self.assertEqual(ids, all_ids[-1:]) def test_fdatasync_default(self): content, obj_id = self.hash_content(b'check_fdatasync') with patch.multiple('os', fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) if self.storage.use_fdatasync: assert patched['fdatasync'].call_count == 1 assert patched['fsync'].call_count == 0 else: assert patched['fdatasync'].call_count == 0 assert patched['fsync'].call_count == 1 def test_fdatasync_forced_on(self): self.storage.use_fdatasync = True content, obj_id = self.hash_content(b'check_fdatasync') with patch.multiple('os', fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) assert patched['fdatasync'].call_count == 1 assert patched['fsync'].call_count == 0 def test_fdatasync_forced_off(self): self.storage.use_fdatasync = False content, obj_id = self.hash_content(b'check_fdatasync') with patch.multiple('os', fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) assert patched['fdatasync'].call_count == 0 assert patched['fsync'].call_count == 1 def test_check_not_compressed(self): content, obj_id = self.hash_content(b'check_not_compressed') self.storage.add(content, obj_id=obj_id) with open(self.content_path(obj_id), 'ab') as f: # Add garbage. f.write(b'garbage') with self.assertRaises(exc.Error) as error: self.storage.check(obj_id) - if self.compression is None: + if self.compression == 'none': self.assertIn('Corrupt object', error.exception.args[0]) else: self.assertIn('trailing data found', error.exception.args[0]) class TestPathSlicingObjStorageGzip(TestPathSlicingObjStorage): compression = 'gzip' class TestPathSlicingObjStorageZlib(TestPathSlicingObjStorage): compression = 'zlib' class TestPathSlicingObjStorageBz2(TestPathSlicingObjStorage): compression = 'bz2' class TestPathSlicingObjStorageLzma(TestPathSlicingObjStorage): compression = 'lzma' diff --git a/swh/objstorage/tests/test_objstorage_seaweedfs.py b/swh/objstorage/tests/test_objstorage_seaweedfs.py index d069676..c344e06 100644 --- a/swh/objstorage/tests/test_objstorage_seaweedfs.py +++ b/swh/objstorage/tests/test_objstorage_seaweedfs.py @@ -1,93 +1,91 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest -from typing import Optional - from swh.objstorage.objstorage import decompressors from swh.objstorage.exc import Error from swh.objstorage.backends.seaweed import WeedObjStorage, DEFAULT_LIMIT from swh.objstorage.tests.objstorage_testing import ObjStorageTestFixture class MockWeedFiler: """ WeedFiler mock that replicates its API """ def __init__(self, url): self.url = url self.content = {} def get(self, remote_path): return self.content[remote_path] def put(self, fp, remote_path): self.content[remote_path] = fp.read() def exists(self, remote_path): return remote_path in self.content def delete(self, remote_path): del self.content[remote_path] def list(self, dir, last_file_name=None, limit=DEFAULT_LIMIT): keys = sorted(self.content.keys()) if last_file_name is None: idx = 0 else: idx = keys.index(last_file_name) + 1 return {'Entries': [{'FullPath': x} for x in keys[idx:idx+limit]]} class TestWeedObjStorage(ObjStorageTestFixture, unittest.TestCase): - compression = None # type: Optional[str] + compression = 'none' def setUp(self): super().setUp() self.url = 'http://127.0.0.1/test' self.storage = WeedObjStorage(url=self.url, compression=self.compression) self.storage.wf = MockWeedFiler(self.url) def test_compression(self): content, obj_id = self.hash_content(b'test compression') self.storage.add(content, obj_id=obj_id) raw_content = self.storage.wf.get(self.storage._path(obj_id)) d = decompressors[self.compression]() assert d.decompress(raw_content) == content assert d.unused_data == b'' def test_trailing_data_on_stored_blob(self): content, obj_id = self.hash_content(b'test content without garbage') self.storage.add(content, obj_id=obj_id) path = self.storage._path(obj_id) self.storage.wf.content[path] += b'trailing garbage' - if self.compression is not None: + if self.compression == 'none': with self.assertRaises(Error) as e: - self.storage.get(obj_id) - assert 'trailing data' in e.exception.args[0] + self.storage.check(obj_id) else: with self.assertRaises(Error) as e: - self.storage.check(obj_id) + self.storage.get(obj_id) + assert 'trailing data' in e.exception.args[0] class TestWeedObjStorageBz2(TestWeedObjStorage): compression = 'bz2' class TestWeedObjStorageGzip(TestWeedObjStorage): compression = 'gzip' class TestWeedObjStorageLzma(TestWeedObjStorage): compression = 'lzma' class TestWeedObjStorageZlib(TestWeedObjStorage): compression = 'zlib'