diff --git a/swh/objstorage/backends/pathslicing.py b/swh/objstorage/backends/pathslicing.py index c282126..eb04dfc 100644 --- a/swh/objstorage/backends/pathslicing.py +++ b/swh/objstorage/backends/pathslicing.py @@ -1,383 +1,389 @@ -# Copyright (C) 2015-2018 The Software Heritage developers +# Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -import functools import os -import gzip import tempfile import random import collections from itertools import islice from contextlib import contextmanager from swh.model import hashutil from swh.objstorage.objstorage import ( + compressors, decompressors, ObjStorage, compute_hash, ID_HASH_ALGO, ID_HASH_LENGTH, DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT) from swh.objstorage.exc import ObjNotFoundError, Error -GZIP_BUFSIZ = 1048576 +BUFSIZ = 1048576 DIR_MODE = 0o755 FILE_MODE = 0o644 @contextmanager def _write_obj_file(hex_obj_id, objstorage): """ Context manager for writing object files to the object storage. During writing, data are written to a temporary file, which is atomically - renamed to the right file name after closing. This context manager also - takes care of (gzip) compressing the data on the fly. + renamed to the right file name after closing. Usage sample: with _write_obj_file(hex_obj_id, objstorage): f.write(obj_data) Yields: a file-like object open for writing bytes. """ # Get the final paths and create the directory if absent. dir = objstorage._obj_dir(hex_obj_id) if not os.path.isdir(dir): os.makedirs(dir, DIR_MODE, exist_ok=True) path = os.path.join(dir, hex_obj_id) # Create a temporary file. (tmp, tmp_path) = tempfile.mkstemp(suffix='.tmp', prefix='hex_obj_id.', dir=dir) # Open the file and yield it for writing. tmp_f = os.fdopen(tmp, 'wb') - with gzip.GzipFile(filename=tmp_path, fileobj=tmp_f) as f: - yield f + yield tmp_f # Make sure the contents of the temporary file are written to disk tmp_f.flush() if objstorage.use_fdatasync: os.fdatasync(tmp) else: os.fsync(tmp) - # Then close the temporary file and move it to the right directory. + # Then close the temporary file and move it to the right path. tmp_f.close() os.chmod(tmp_path, FILE_MODE) os.rename(tmp_path, path) -@contextmanager def _read_obj_file(hex_obj_id, objstorage): """ Context manager for reading object file in the object storage. Usage sample: with _read_obj_file(hex_obj_id, objstorage) as f: b = f.read() Yields: a file-like object open for reading bytes. """ path = objstorage._obj_path(hex_obj_id) - with gzip.GzipFile(path, 'rb') as f: - yield f + + return open(path, 'rb') class PathSlicingObjStorage(ObjStorage): """Implementation of the ObjStorage API based on the hash of the content. On disk, an object storage is a directory tree containing files named after their object IDs. An object ID is a checksum of its content, depending on the value of the ID_HASH_ALGO constant (see swh.model.hashutil for its meaning). To avoid directories that contain too many files, the object storage has a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. So for instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will be stored in the given object storages : - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 The files in the storage are stored in gzipped compressed format. Attributes: root (string): path to the root directory of the storage on the disk. bounds: list of tuples that indicates the beginning and the end of each subdirectory for a content. """ - def __init__(self, root, slicing, **kwargs): + def __init__(self, root, slicing, compression='gzip', **kwargs): """ Create an object to access a hash-slicing based object storage. Args: root (string): path to the root directory of the storage on the disk. slicing (string): string that indicates the slicing to perform on the hash of the content to know the path where it should be stored. """ super().__init__(**kwargs) self.root = root # Make a list of tuples where each tuple contains the beginning # and the end of each slicing. self.bounds = [ slice(*map(int, sbounds.split(':'))) for sbounds in slicing.split('/') if sbounds ] self.use_fdatasync = hasattr(os, 'fdatasync') + self.compression = compression self.check_config(check_write=False) def check_config(self, *, check_write): """Check whether this object storage is properly configured""" root = self.root if not os.path.isdir(root): raise ValueError( 'PathSlicingObjStorage root "%s" is not a directory' % root ) max_endchar = max(map(lambda bound: bound.stop, self.bounds)) if ID_HASH_LENGTH < max_endchar: raise ValueError( 'Algorithm %s has too short hash for slicing to char %d' % (ID_HASH_ALGO, max_endchar) ) if check_write: if not os.access(self.root, os.W_OK): raise PermissionError( 'PathSlicingObjStorage root "%s" is not writable' % root ) + if self.compression not in compressors: + raise ValueError('Unknown compression algorithm "%s" for ' + 'PathSlicingObjStorage' % self.compression) + return True def __contains__(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return os.path.isfile(self._obj_path(hex_obj_id)) def __iter__(self): """Iterate over the object identifiers currently available in the storage. Warning: with the current implementation of the object storage, this method will walk the filesystem to list objects, meaning that listing all objects will be very slow for large storages. You almost certainly don't want to use this method in production. Return: Iterator over object IDs """ def obj_iterator(): # XXX hackish: it does not verify that the depth of found files # matches the slicing depth of the storage for root, _dirs, files in os.walk(self.root): _dirs.sort() for f in sorted(files): yield bytes.fromhex(f) return obj_iterator() def __len__(self): """Compute the number of objects available in the storage. Warning: this currently uses `__iter__`, its warning about bad performances applies Return: number of objects contained in the storage """ return sum(1 for i in self) def _obj_dir(self, hex_obj_id): """ Compute the storage directory of an object. See also: PathSlicingObjStorage::_obj_path Args: hex_obj_id: object id as hexlified string. Returns: Path to the directory that contains the required object. """ slices = [hex_obj_id[bound] for bound in self.bounds] return os.path.join(self.root, *slices) def _obj_path(self, hex_obj_id): """ Compute the full path to an object into the current storage. See also: PathSlicingObjStorage::_obj_dir Args: hex_obj_id: object id as hexlified string. Returns: Path to the actual object corresponding to the given id. """ return os.path.join(self._obj_dir(hex_obj_id), hex_obj_id) def add(self, content, obj_id=None, check_presence=True): if obj_id is None: obj_id = compute_hash(content) if check_presence and obj_id in self: # If the object is already present, return immediately. return obj_id hex_obj_id = hashutil.hash_to_hex(obj_id) - if isinstance(content, collections.Iterator): - content = b''.join(content) + if not isinstance(content, collections.Iterator): + content = [content] + compressor = compressors[self.compression]() with _write_obj_file(hex_obj_id, self) as f: - f.write(content) + for chunk in content: + f.write(compressor.compress(chunk)) + f.write(compressor.flush()) return obj_id def get(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) # Open the file and return its content as bytes hex_obj_id = hashutil.hash_to_hex(obj_id) + d = decompressors[self.compression]() with _read_obj_file(hex_obj_id, self) as f: - return f.read() + out = d.decompress(f.read()) + if d.unused_data: + raise Error('Corrupt object %s: trailing data found' % hex_obj_id,) + + return out def check(self, obj_id): - if obj_id not in self: - raise ObjNotFoundError(obj_id) + try: + data = self.get(obj_id) + except OSError: + hex_obj_id = hashutil.hash_to_hex(obj_id) + raise Error( + 'Corrupt object %s: not a proper compressed file' % hex_obj_id, + ) + + checksums = hashutil.MultiHash.from_data( + data, hash_names=[ID_HASH_ALGO]).digest() + actual_obj_id = checksums[ID_HASH_ALGO] hex_obj_id = hashutil.hash_to_hex(obj_id) - try: - with gzip.open(self._obj_path(hex_obj_id)) as f: - length = None - if ID_HASH_ALGO.endswith('_git'): - # if the hashing algorithm is git-like, we need to know the - # content size to hash on the fly. Do a first pass here to - # compute the size - length = 0 - while True: - chunk = f.read(GZIP_BUFSIZ) - length += len(chunk) - if not chunk: - break - f.rewind() - - checksums = hashutil.MultiHash.from_file( - f, hash_names=[ID_HASH_ALGO], length=length).digest() - actual_obj_id = checksums[ID_HASH_ALGO] - if hex_obj_id != hashutil.hash_to_hex(actual_obj_id): - raise Error( - 'Corrupt object %s should have id %s' - % (hashutil.hash_to_hex(obj_id), - hashutil.hash_to_hex(actual_obj_id)) - ) - except (OSError, IOError): - # IOError is for compatibility with older python versions - raise Error('Corrupt object %s is not a gzip file' % hex_obj_id) + if hex_obj_id != hashutil.hash_to_hex(actual_obj_id): + raise Error( + 'Corrupt object %s should have id %s' + % (hashutil.hash_to_hex(obj_id), + hashutil.hash_to_hex(actual_obj_id)) + ) def delete(self, obj_id): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) try: os.remove(self._obj_path(hex_obj_id)) except FileNotFoundError: raise ObjNotFoundError(obj_id) return True # Management methods def get_random(self, batch_size): def get_random_content(self, batch_size): """ Get a batch of content inside a single directory. Returns: a tuple (batch size, batch). """ dirs = [] for level in range(len(self.bounds)): path = os.path.join(self.root, *dirs) dir_list = next(os.walk(path))[1] if 'tmp' in dir_list: dir_list.remove('tmp') dirs.append(random.choice(dir_list)) path = os.path.join(self.root, *dirs) content_list = next(os.walk(path))[2] length = min(batch_size, len(content_list)) return length, map(hashutil.hash_to_bytes, random.sample(content_list, length)) while batch_size: length, it = get_random_content(self, batch_size) batch_size = batch_size - length yield from it # Streaming methods @contextmanager def chunk_writer(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) + compressor = compressors[self.compression]() with _write_obj_file(hex_obj_id, self) as f: - yield f.write + yield lambda c: f.write(compressor.compress(c)) + f.write(compressor.flush()) def add_stream(self, content_iter, obj_id, check_presence=True): if check_presence and obj_id in self: return obj_id with self.chunk_writer(obj_id) as writer: for chunk in content_iter: writer(chunk) return obj_id def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) + decompressor = decompressors[self.compression]() with _read_obj_file(hex_obj_id, self) as f: - reader = functools.partial(f.read, chunk_size) - yield from iter(reader, b'') + while True: + raw = f.read(chunk_size) + if not raw: + break + r = decompressor.decompress(raw) + if not r: + continue + yield r def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): if last_obj_id: it = self.iter_from(last_obj_id) else: it = iter(self) return islice(it, limit) def iter_from(self, obj_id, n_leaf=False): hex_obj_id = hashutil.hash_to_hex(obj_id) slices = [hex_obj_id[bound] for bound in self.bounds] rlen = len(self.root.split('/')) i = 0 for root, dirs, files in os.walk(self.root): if not dirs: i += 1 level = len(root.split('/')) - rlen dirs.sort() if dirs and root == os.path.join(self.root, *slices[:level]): cslice = slices[level] for d in dirs[:]: if d < cslice: dirs.remove(d) for f in sorted(files): if f > hex_obj_id: yield bytes.fromhex(f) if n_leaf: yield i diff --git a/swh/objstorage/tests/objstorage_testing.py b/swh/objstorage/tests/objstorage_testing.py index f32ae0b..daa2de4 100644 --- a/swh/objstorage/tests/objstorage_testing.py +++ b/swh/objstorage/tests/objstorage_testing.py @@ -1,212 +1,211 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import time import collections from swh.objstorage import exc from swh.objstorage.objstorage import compute_hash class ObjStorageTestFixture: def hash_content(self, content): obj_id = compute_hash(content) return content, obj_id def assertContentMatch(self, obj_id, expected_content): # noqa content = self.storage.get(obj_id) self.assertEqual(content, expected_content) def test_check_config(self): self.assertTrue(self.storage.check_config(check_write=False)) self.assertTrue(self.storage.check_config(check_write=True)) def test_contains(self): content_p, obj_id_p = self.hash_content(b'contains_present') content_m, obj_id_m = self.hash_content(b'contains_missing') self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) def test_add_get_w_id(self): content, obj_id = self.hash_content(b'add_get_w_id') r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) def test_add_big(self): content, obj_id = self.hash_content(b'add_big' * 1024 * 1024) r = self.storage.add(content, obj_id=obj_id) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) def test_add_get_wo_id(self): content, obj_id = self.hash_content(b'add_get_wo_id') r = self.storage.add(content) self.assertEqual(obj_id, r) self.assertContentMatch(obj_id, content) def test_add_get_batch(self): content1, obj_id1 = self.hash_content(b'add_get_batch_1') content2, obj_id2 = self.hash_content(b'add_get_batch_2') self.storage.add(content1, obj_id1) self.storage.add(content2, obj_id2) cr1, cr2 = self.storage.get_batch([obj_id1, obj_id2]) self.assertEqual(cr1, content1) self.assertEqual(cr2, content2) def test_get_batch_unexisting_content(self): content, obj_id = self.hash_content(b'get_batch_unexisting_content') result = list(self.storage.get_batch([obj_id])) self.assertTrue(len(result) == 1) self.assertIsNone(result[0]) def test_restore_content(self): valid_content, valid_obj_id = self.hash_content(b'restore_content') invalid_content = b'unexpected content' id_adding = self.storage.add(invalid_content, valid_obj_id) self.assertEqual(id_adding, valid_obj_id) with self.assertRaises(exc.Error): self.storage.check(id_adding) id_restore = self.storage.restore(valid_content, valid_obj_id) self.assertEqual(id_restore, valid_obj_id) self.assertContentMatch(valid_obj_id, valid_content) def test_get_missing(self): content, obj_id = self.hash_content(b'get_missing') with self.assertRaises(exc.ObjNotFoundError) as e: self.storage.get(obj_id) self.assertIn(obj_id, e.exception.args) def test_check_missing(self): content, obj_id = self.hash_content(b'check_missing') with self.assertRaises(exc.Error): self.storage.check(obj_id) def test_check_present(self): content, obj_id = self.hash_content(b'check_present') self.storage.add(content, obj_id) try: self.storage.check(obj_id) except exc.Error: self.fail('Integrity check failed') def test_delete_missing(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b'missing_content_to_delete') with self.assertRaises(exc.Error): self.storage.delete(obj_id) def test_delete_present(self): self.storage.allow_delete = True content, obj_id = self.hash_content(b'content_to_delete') self.storage.add(content, obj_id=obj_id) self.assertTrue(self.storage.delete(obj_id)) with self.assertRaises(exc.Error): self.storage.get(obj_id) def test_delete_not_allowed(self): self.storage.allow_delete = False content, obj_id = self.hash_content(b'content_to_delete') self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.storage.delete(obj_id) def test_delete_not_allowed_by_default(self): content, obj_id = self.hash_content(b'content_to_delete') self.storage.add(content, obj_id=obj_id) with self.assertRaises(PermissionError): self.assertTrue(self.storage.delete(obj_id)) def test_add_stream(self): content = [b'chunk1', b'chunk2'] _, obj_id = self.hash_content(b''.join(content)) try: self.storage.add_stream(iter(content), obj_id=obj_id) except NotImplementedError: return self.assertContentMatch(obj_id, b''.join(content)) def test_add_stream_sleep(self): def gen_content(): yield b'chunk1' time.sleep(0.5) yield b'chunk42' _, obj_id = self.hash_content(b'placeholder_id') try: self.storage.add_stream(gen_content(), obj_id=obj_id) except NotImplementedError: return self.assertContentMatch(obj_id, b'chunk1chunk42') def test_get_stream(self): content = b'123456789' _, obj_id = self.hash_content(content) self.storage.add(content, obj_id=obj_id) r = self.storage.get(obj_id) self.assertEqual(r, content) try: r = self.storage.get_stream(obj_id, chunk_size=1) except NotImplementedError: return self.assertTrue(isinstance(r, collections.Iterator)) r = list(r) - self.assertEqual(len(r), 9) self.assertEqual(b''.join(r), content) def test_add_batch(self): contents = {} for i in range(50): content = b'Test content %02d' % i content, obj_id = self.hash_content(content) contents[obj_id] = content ret = self.storage.add_batch(contents) self.assertEqual(len(contents), ret) for obj_id in contents: self.assertIn(obj_id, self.storage) def test_content_iterator(self): sto_obj_ids = iter(self.storage) sto_obj_ids = list(sto_obj_ids) self.assertFalse(sto_obj_ids) obj_ids = set() for i in range(100): content, obj_id = self.hash_content(b'content %d' % i) self.storage.add(content, obj_id=obj_id) obj_ids.add(obj_id) sto_obj_ids = set(self.storage) self.assertEqual(sto_obj_ids, obj_ids) def test_list_content(self): all_ids = [] for i in range(1200): content = b'example %d' % i obj_id = compute_hash(content) self.storage.add(content, obj_id) all_ids.append(obj_id) all_ids.sort() ids = list(self.storage.list_content()) self.assertEqual(len(ids), 1200) self.assertEqual(ids[0], all_ids[0]) self.assertEqual(ids[100], all_ids[100]) self.assertEqual(ids[999], all_ids[999]) ids = list(self.storage.list_content(limit=10)) self.assertEqual(len(ids), 10) self.assertEqual(ids[0], all_ids[0]) self.assertEqual(ids[9], all_ids[9]) ids = list(self.storage.list_content( last_obj_id=all_ids[999], limit=100)) self.assertEqual(len(ids), 100) self.assertEqual(ids[0], all_ids[1000]) self.assertEqual(ids[9], all_ids[1009]) diff --git a/swh/objstorage/tests/test_objstorage_pathslicing.py b/swh/objstorage/tests/test_objstorage_pathslicing.py index 40f73d6..e5df992 100644 --- a/swh/objstorage/tests/test_objstorage_pathslicing.py +++ b/swh/objstorage/tests/test_objstorage_pathslicing.py @@ -1,139 +1,159 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import shutil import tempfile import unittest from unittest.mock import patch, DEFAULT -import gzip + +from typing import Optional from swh.model import hashutil from swh.objstorage import exc, get_objstorage, ID_HASH_LENGTH from .objstorage_testing import ObjStorageTestFixture class TestPathSlicingObjStorage(ObjStorageTestFixture, unittest.TestCase): + compression = None # type: Optional[str] def setUp(self): super().setUp() self.slicing = '0:2/2:4/4:6' self.tmpdir = tempfile.mkdtemp() self.storage = get_objstorage( - 'pathslicing', - {'root': self.tmpdir, 'slicing': self.slicing} + 'pathslicing', { + 'root': self.tmpdir, + 'slicing': self.slicing, + 'compression': self.compression, + } ) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def content_path(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return self.storage._obj_path(hex_obj_id) def test_iter(self): content, obj_id = self.hash_content(b'iter') self.assertEqual(list(iter(self.storage)), []) self.storage.add(content, obj_id=obj_id) self.assertEqual(list(iter(self.storage)), [obj_id]) def test_len(self): content, obj_id = self.hash_content(b'len') self.assertEqual(len(self.storage), 0) self.storage.add(content, obj_id=obj_id) self.assertEqual(len(self.storage), 1) def test_check_ok(self): content, obj_id = self.hash_content(b'check_ok') self.storage.add(content, obj_id=obj_id) - self.storage.check(obj_id) - self.storage.check(obj_id.hex()) - - def test_check_not_gzip(self): - content, obj_id = self.hash_content(b'check_not_gzip') - self.storage.add(content, obj_id=obj_id) - with open(self.content_path(obj_id), 'ab') as f: # Add garbage. - f.write(b'garbage') - with self.assertRaises(exc.Error) as error: - self.storage.check(obj_id) - self.assertEquals(( - 'Corrupt object %s is not a gzip file' % obj_id.hex(),), - error.exception.args) + assert self.storage.check(obj_id) is None + assert self.storage.check(obj_id.hex()) is None def test_check_id_mismatch(self): content, obj_id = self.hash_content(b'check_id_mismatch') - self.storage.add(content, obj_id=obj_id) - with gzip.open(self.content_path(obj_id), 'wb') as f: - f.write(b'unexpected content') + self.storage.add(b'unexpected content', obj_id=obj_id) with self.assertRaises(exc.Error) as error: self.storage.check(obj_id) - self.assertEquals(( + self.assertEqual(( 'Corrupt object %s should have id ' '12ebb2d6c81395bcc5cab965bdff640110cb67ff' % obj_id.hex(),), error.exception.args) def test_get_random_contents(self): content, obj_id = self.hash_content(b'get_random_content') self.storage.add(content, obj_id=obj_id) random_contents = list(self.storage.get_random(1)) self.assertEqual(1, len(random_contents)) self.assertIn(obj_id, random_contents) def test_iterate_from(self): all_ids = [] for i in range(100): content, obj_id = self.hash_content(b'content %d' % i) self.storage.add(content, obj_id=obj_id) all_ids.append(obj_id) all_ids.sort() ids = list(self.storage.iter_from(b'\x00' * (ID_HASH_LENGTH // 2))) self.assertEqual(len(ids), len(all_ids)) self.assertEqual(ids, all_ids) ids = list(self.storage.iter_from(all_ids[0])) self.assertEqual(len(ids), len(all_ids)-1) self.assertEqual(ids, all_ids[1:]) ids = list(self.storage.iter_from(all_ids[-1], n_leaf=True)) n_leaf = ids[-1] ids = ids[:-1] self.assertEqual(n_leaf, 1) self.assertEqual(len(ids), 0) ids = list(self.storage.iter_from(all_ids[-2], n_leaf=True)) n_leaf = ids[-1] ids = ids[:-1] self.assertEqual(n_leaf, 2) # beware, this depends on the hash algo self.assertEqual(len(ids), 1) self.assertEqual(ids, all_ids[-1:]) def test_fdatasync_default(self): content, obj_id = self.hash_content(b'check_fdatasync') with patch.multiple('os', fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) if self.storage.use_fdatasync: assert patched['fdatasync'].call_count == 1 assert patched['fsync'].call_count == 0 else: assert patched['fdatasync'].call_count == 0 assert patched['fsync'].call_count == 1 def test_fdatasync_forced_on(self): self.storage.use_fdatasync = True content, obj_id = self.hash_content(b'check_fdatasync') with patch.multiple('os', fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) assert patched['fdatasync'].call_count == 1 assert patched['fsync'].call_count == 0 def test_fdatasync_forced_off(self): self.storage.use_fdatasync = False content, obj_id = self.hash_content(b'check_fdatasync') with patch.multiple('os', fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) assert patched['fdatasync'].call_count == 0 assert patched['fsync'].call_count == 1 + + def test_check_not_compressed(self): + content, obj_id = self.hash_content(b'check_not_compressed') + self.storage.add(content, obj_id=obj_id) + with open(self.content_path(obj_id), 'ab') as f: # Add garbage. + f.write(b'garbage') + with self.assertRaises(exc.Error) as error: + self.storage.check(obj_id) + if self.compression is None: + self.assertIn('Corrupt object', error.exception.args[0]) + else: + self.assertIn('trailing data found', error.exception.args[0]) + + +class TestPathSlicingObjStorageGzip(TestPathSlicingObjStorage): + compression = 'gzip' + + +class TestPathSlicingObjStorageZlib(TestPathSlicingObjStorage): + compression = 'zlib' + + +class TestPathSlicingObjStorageBz2(TestPathSlicingObjStorage): + compression = 'bz2' + + +class TestPathSlicingObjStorageLzma(TestPathSlicingObjStorage): + compression = 'lzma'