diff --git a/swh/objstorage/api/client.py b/swh/objstorage/api/client.py index 6642cd6..cd767f9 100644 --- a/swh/objstorage/api/client.py +++ b/swh/objstorage/api/client.py @@ -1,54 +1,56 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.core.api import RPCClient from swh.core.utils import iter_chunks from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError, ObjStorageAPIError from swh.objstorage.interface import ObjStorageInterface -from swh.objstorage.objstorage import DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT - -SHA1_SIZE = 20 +from swh.objstorage.objstorage import ( + DEFAULT_CHUNK_SIZE, + DEFAULT_LIMIT, + ID_DIGEST_LENGTH, +) class RemoteObjStorage(RPCClient): """Proxy to a remote object storage. This class allows to connect to an object storage server via http protocol. Attributes: url (string): The url of the server to connect. Must end with a '/' session: The session to send requests. """ api_exception = ObjStorageAPIError reraise_exceptions = [ObjNotFoundError, Error] backend_class = ObjStorageInterface def restore(self, content, obj_id=None): return self.add(content, obj_id, check_presence=False) def add_stream(self, content_iter, obj_id, check_presence=True): raise NotImplementedError def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): obj_id = hashutil.hash_to_hex(obj_id) return self._get_stream( "content/get_stream/{}".format(obj_id), chunk_size=chunk_size ) def __iter__(self): yield from self.list_content() def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): params = {"limit": limit} if last_obj_id: params["last_obj_id"] = hashutil.hash_to_hex(last_obj_id) yield from iter_chunks( - self._get_stream("content", params=params), chunk_size=SHA1_SIZE + self._get_stream("content", params=params), chunk_size=ID_DIGEST_LENGTH ) diff --git a/swh/objstorage/backends/pathslicing.py b/swh/objstorage/backends/pathslicing.py index f0b571d..65de5e0 100644 --- a/swh/objstorage/backends/pathslicing.py +++ b/swh/objstorage/backends/pathslicing.py @@ -1,445 +1,445 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections.abc import Iterator from contextlib import contextmanager from itertools import islice import os import random import tempfile from typing import List from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.objstorage import ( DEFAULT_CHUNK_SIZE, DEFAULT_LIMIT, ID_HASH_ALGO, - ID_HASH_LENGTH, + ID_HEXDIGEST_LENGTH, ObjStorage, compressors, compute_hash, decompressors, ) BUFSIZ = 1048576 DIR_MODE = 0o755 FILE_MODE = 0o644 class PathSlicer: """Helper class to compute a path based on a hash. Used to compute a directory path based on the object hash according to a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. For instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will have the following computed path: - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 Args: root (str): path to the root directory of the storage on the disk. slicing (str): the slicing configuration. """ def __init__(self, root: str, slicing: str): self.root = root # Make a list of tuples where each tuple contains the beginning # and the end of each slicing. try: self.bounds = [ slice(*(int(x) if x else None for x in sbounds.split(":"))) for sbounds in slicing.split("/") if sbounds ] except TypeError: raise ValueError( "Invalid slicing declaration; " "it should be a of the form ':[/:]..." ) def check_config(self): """Check the slicing configuration is valid. Raises: ValueError: if the slicing configuration is invalid. """ if len(self): max_char = max( max(bound.start or 0, bound.stop or 0) for bound in self.bounds ) - if ID_HASH_LENGTH < max_char: + if ID_HEXDIGEST_LENGTH < max_char: raise ValueError( "Algorithm %s has too short hash for slicing to char %d" % (ID_HASH_ALGO, max_char) ) def get_directory(self, hex_obj_id: str) -> str: """ Compute the storage directory of an object. See also: PathSlicer::get_path Args: hex_obj_id: object id as hexlified string. Returns: Absolute path (including root) to the directory that contains the given object id. """ return os.path.join(self.root, *self.get_slices(hex_obj_id)) def get_path(self, hex_obj_id: str) -> str: """ Compute the full path to an object into the current storage. See also: PathSlicer::get_directory Args: hex_obj_id(str): object id as hexlified string. Returns: Absolute path (including root) to the object corresponding to the given object id. """ return os.path.join(self.get_directory(hex_obj_id), hex_obj_id) def get_slices(self, hex_obj_id: str) -> List[str]: """Compute the path elements for the given hash. Args: hex_obj_id(str): object id as hexlified string. Returns: Relative path to the actual object corresponding to the given id as a list. """ - assert len(hex_obj_id) == ID_HASH_LENGTH + assert len(hex_obj_id) == ID_HEXDIGEST_LENGTH return [hex_obj_id[bound] for bound in self.bounds] def __len__(self) -> int: """Number of slices of the slicer""" return len(self.bounds) class PathSlicingObjStorage(ObjStorage): """Implementation of the ObjStorage API based on the hash of the content. On disk, an object storage is a directory tree containing files named after their object IDs. An object ID is a checksum of its content, depending on the value of the ID_HASH_ALGO constant (see swh.model.hashutil for its meaning). To avoid directories that contain too many files, the object storage has a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. So for instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will be stored in the given object storages : - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 The files in the storage are stored in gzipped compressed format. Args: root (str): path to the root directory of the storage on the disk. slicing (str): string that indicates the slicing to perform on the hash of the content to know the path where it should be stored (see the documentation of the PathSlicer class). """ def __init__(self, root, slicing, compression="gzip", **kwargs): super().__init__(**kwargs) self.root = root self.slicer = PathSlicer(root, slicing) self.use_fdatasync = hasattr(os, "fdatasync") self.compression = compression self.check_config(check_write=False) def check_config(self, *, check_write): """Check whether this object storage is properly configured""" self.slicer.check_config() if not os.path.isdir(self.root): raise ValueError( 'PathSlicingObjStorage root "%s" is not a directory' % self.root ) if check_write: if not os.access(self.root, os.W_OK): raise PermissionError( 'PathSlicingObjStorage root "%s" is not writable' % self.root ) if self.compression not in compressors: raise ValueError( 'Unknown compression algorithm "%s" for ' "PathSlicingObjStorage" % self.compression ) return True def __contains__(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return os.path.isfile(self.slicer.get_path(hex_obj_id)) def __iter__(self): """Iterate over the object identifiers currently available in the storage. Warning: with the current implementation of the object storage, this method will walk the filesystem to list objects, meaning that listing all objects will be very slow for large storages. You almost certainly don't want to use this method in production. Return: Iterator over object IDs """ def obj_iterator(): # XXX hackish: it does not verify that the depth of found files # matches the slicing depth of the storage for root, _dirs, files in os.walk(self.root): _dirs.sort() for f in sorted(files): yield bytes.fromhex(f) return obj_iterator() def __len__(self): """Compute the number of objects available in the storage. Warning: this currently uses `__iter__`, its warning about bad performances applies Return: number of objects contained in the storage """ return sum(1 for i in self) def add(self, content, obj_id=None, check_presence=True): if obj_id is None: obj_id = compute_hash(content) if check_presence and obj_id in self: # If the object is already present, return immediately. return obj_id hex_obj_id = hashutil.hash_to_hex(obj_id) if not isinstance(content, Iterator): content = [content] compressor = compressors[self.compression]() with self._write_obj_file(hex_obj_id) as f: for chunk in content: f.write(compressor.compress(chunk)) f.write(compressor.flush()) return obj_id def get(self, obj_id): if obj_id not in self: raise ObjNotFoundError(obj_id) # Open the file and return its content as bytes hex_obj_id = hashutil.hash_to_hex(obj_id) d = decompressors[self.compression]() with open(self.slicer.get_path(hex_obj_id), "rb") as f: out = d.decompress(f.read()) if d.unused_data: raise Error("Corrupt object %s: trailing data found" % hex_obj_id,) return out def check(self, obj_id): try: data = self.get(obj_id) except OSError: hex_obj_id = hashutil.hash_to_hex(obj_id) raise Error("Corrupt object %s: not a proper compressed file" % hex_obj_id,) checksums = hashutil.MultiHash.from_data( data, hash_names=[ID_HASH_ALGO] ).digest() actual_obj_id = checksums[ID_HASH_ALGO] hex_obj_id = hashutil.hash_to_hex(obj_id) if hex_obj_id != hashutil.hash_to_hex(actual_obj_id): raise Error( "Corrupt object %s should have id %s" % (hashutil.hash_to_hex(obj_id), hashutil.hash_to_hex(actual_obj_id)) ) def delete(self, obj_id): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) try: os.remove(self.slicer.get_path(hex_obj_id)) except FileNotFoundError: raise ObjNotFoundError(obj_id) return True # Management methods def get_random(self, batch_size): def get_random_content(self, batch_size): """ Get a batch of content inside a single directory. Returns: a tuple (batch size, batch). """ dirs = [] for level in range(len(self.slicer)): path = os.path.join(self.root, *dirs) dir_list = next(os.walk(path))[1] if "tmp" in dir_list: dir_list.remove("tmp") dirs.append(random.choice(dir_list)) path = os.path.join(self.root, *dirs) content_list = next(os.walk(path))[2] length = min(batch_size, len(content_list)) return ( length, map(hashutil.hash_to_bytes, random.sample(content_list, length)), ) while batch_size: length, it = get_random_content(self, batch_size) batch_size = batch_size - length yield from it # Streaming methods @contextmanager def chunk_writer(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) compressor = compressors[self.compression]() with self._write_obj_file(hex_obj_id) as f: yield lambda c: f.write(compressor.compress(c)) f.write(compressor.flush()) def add_stream(self, content_iter, obj_id, check_presence=True): """Add a new object to the object storage using streaming. This function is identical to add() except it takes a generator that yields the chunked content instead of the whole content at once. Args: content (bytes): chunked generator that yields the object's raw content to add in storage. obj_id (bytes): object identifier check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ if check_presence and obj_id in self: return obj_id with self.chunk_writer(obj_id) as writer: for chunk in content_iter: writer(chunk) return obj_id def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) decompressor = decompressors[self.compression]() with open(self.slicer.get_path(hex_obj_id), "rb") as f: while True: raw = f.read(chunk_size) if not raw: break r = decompressor.decompress(raw) if not r: continue yield r def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): if last_obj_id: it = self.iter_from(last_obj_id) else: it = iter(self) return islice(it, limit) def iter_from(self, obj_id, n_leaf=False): hex_obj_id = hashutil.hash_to_hex(obj_id) slices = self.slicer.get_slices(hex_obj_id) rlen = len(self.root.split("/")) i = 0 for root, dirs, files in os.walk(self.root): if not dirs: i += 1 level = len(root.split("/")) - rlen dirs.sort() if dirs and root == os.path.join(self.root, *slices[:level]): cslice = slices[level] for d in dirs[:]: if d < cslice: dirs.remove(d) for f in sorted(files): if f > hex_obj_id: yield bytes.fromhex(f) if n_leaf: yield i @contextmanager def _write_obj_file(self, hex_obj_id): """ Context manager for writing object files to the object storage. During writing, data are written to a temporary file, which is atomically renamed to the right file name after closing. Usage sample: with objstorage._write_obj_file(hex_obj_id): f.write(obj_data) Yields: a file-like object open for writing bytes. """ # Get the final paths and create the directory if absent. dir = self.slicer.get_directory(hex_obj_id) if not os.path.isdir(dir): os.makedirs(dir, DIR_MODE, exist_ok=True) path = os.path.join(dir, hex_obj_id) # Create a temporary file. (tmp, tmp_path) = tempfile.mkstemp(suffix=".tmp", prefix="hex_obj_id.", dir=dir) # Open the file and yield it for writing. tmp_f = os.fdopen(tmp, "wb") yield tmp_f # Make sure the contents of the temporary file are written to disk tmp_f.flush() if self.use_fdatasync: os.fdatasync(tmp) else: os.fsync(tmp) # Then close the temporary file and move it to the right path. tmp_f.close() os.chmod(tmp_path, FILE_MODE) os.rename(tmp_path, path) diff --git a/swh/objstorage/factory.py b/swh/objstorage/factory.py index faf0a79..0d0a358 100644 --- a/swh/objstorage/factory.py +++ b/swh/objstorage/factory.py @@ -1,124 +1,124 @@ # Copyright (C) 2016-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Callable, Dict, Union import warnings from swh.objstorage.api.client import RemoteObjStorage from swh.objstorage.backends.generator import RandomGeneratorObjStorage from swh.objstorage.backends.http import HTTPReadOnlyObjStorage from swh.objstorage.backends.in_memory import InMemoryObjStorage from swh.objstorage.backends.noop import NoopObjStorage from swh.objstorage.backends.pathslicing import PathSlicingObjStorage from swh.objstorage.backends.seaweedfs import SeaweedFilerObjStorage from swh.objstorage.backends.winery import WineryObjStorage from swh.objstorage.multiplexer import MultiplexerObjStorage, StripingObjStorage from swh.objstorage.multiplexer.filter import add_filters -from swh.objstorage.objstorage import ID_HASH_LENGTH, ObjStorage # noqa +from swh.objstorage.objstorage import ID_HEXDIGEST_LENGTH, ObjStorage # noqa __all__ = ["get_objstorage", "ObjStorage"] _STORAGE_CLASSES: Dict[str, Union[type, Callable[..., type]]] = { "pathslicing": PathSlicingObjStorage, "remote": RemoteObjStorage, "memory": InMemoryObjStorage, "seaweedfs": SeaweedFilerObjStorage, "random": RandomGeneratorObjStorage, "http": HTTPReadOnlyObjStorage, "winery": WineryObjStorage, "noop": NoopObjStorage, } _STORAGE_CLASSES_MISSING = {} _STORAGE_CLASSES_DEPRECATED = {"weed": "seaweedfs"} try: from swh.objstorage.backends.azure import ( AzureCloudObjStorage, PrefixedAzureCloudObjStorage, ) _STORAGE_CLASSES["azure"] = AzureCloudObjStorage _STORAGE_CLASSES["azure-prefixed"] = PrefixedAzureCloudObjStorage except ImportError as e: _STORAGE_CLASSES_MISSING["azure"] = e.args[0] _STORAGE_CLASSES_MISSING["azure-prefixed"] = e.args[0] try: from swh.objstorage.backends.libcloud import ( AwsCloudObjStorage, OpenStackCloudObjStorage, ) _STORAGE_CLASSES["s3"] = AwsCloudObjStorage _STORAGE_CLASSES["swift"] = OpenStackCloudObjStorage except ImportError as e: _STORAGE_CLASSES_MISSING["s3"] = e.args[0] _STORAGE_CLASSES_MISSING["swift"] = e.args[0] def get_objstorage(cls: str, args=None, **kwargs): """ Create an ObjStorage using the given implementation class. Args: cls: objstorage class unique key contained in the _STORAGE_CLASSES dict. kwargs: arguments for the required class of objstorage that must match exactly the one in the `__init__` method of the class. Returns: subclass of ObjStorage that match the given `storage_class` argument. Raises: ValueError: if the given storage class is not a valid objstorage key. """ if cls in _STORAGE_CLASSES_DEPRECATED: warnings.warn( f"{cls} objstorage class is deprecated, " f"use {_STORAGE_CLASSES_DEPRECATED[cls]} class instead.", DeprecationWarning, ) cls = _STORAGE_CLASSES_DEPRECATED[cls] if cls in _STORAGE_CLASSES: if args is not None: warnings.warn( 'Explicit "args" key is deprecated for objstorage initialization, ' "use class arguments keys directly instead.", DeprecationWarning, ) # TODO: when removing this, drop the "args" backwards compatibility # from swh.objstorage.api.server configuration checker kwargs = args return _STORAGE_CLASSES[cls](**kwargs) else: raise ValueError( "Storage class {} is not available: {}".format( cls, _STORAGE_CLASSES_MISSING.get(cls, "unknown name") ) ) def _construct_filtered_objstorage(storage_conf, filters_conf): return add_filters(get_objstorage(**storage_conf), filters_conf) _STORAGE_CLASSES["filtered"] = _construct_filtered_objstorage def _construct_multiplexer_objstorage(objstorages): storages = [get_objstorage(**conf) for conf in objstorages] return MultiplexerObjStorage(storages) _STORAGE_CLASSES["multiplexer"] = _construct_multiplexer_objstorage def _construct_striping_objstorage(objstorages): storages = [get_objstorage(**conf) for conf in objstorages] return StripingObjStorage(storages) _STORAGE_CLASSES["striping"] = _construct_striping_objstorage diff --git a/swh/objstorage/objstorage.py b/swh/objstorage/objstorage.py index 2a39306..c109086 100644 --- a/swh/objstorage/objstorage.py +++ b/swh/objstorage/objstorage.py @@ -1,144 +1,153 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import bz2 from itertools import dropwhile, islice import lzma from typing import Dict import zlib from swh.model import hashutil from .exc import ObjNotFoundError ID_HASH_ALGO = "sha1" -ID_HASH_LENGTH = 40 # Size in bytes of the hash hexadecimal representation. -DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024 # Size in bytes of the streaming chunks + +ID_HEXDIGEST_LENGTH = 40 +"""Size in bytes of the hash hexadecimal representation.""" + +ID_DIGEST_LENGTH = 20 +"""Size in bytes of the hash""" + +DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024 +"""Size in bytes of the streaming chunks""" + DEFAULT_LIMIT = 10000 +"""Default number of results of ``list_content``.""" def compute_hash(content): """Compute the content's hash. Args: content (bytes): The raw content to hash hash_name (str): Hash's name (default to ID_HASH_ALGO) Returns: The ID_HASH_ALGO for the content """ return ( hashutil.MultiHash.from_data(content, hash_names=[ID_HASH_ALGO],) .digest() .get(ID_HASH_ALGO) ) class NullCompressor: def compress(self, data): return data def flush(self): return b"" class NullDecompressor: def decompress(self, data): return data @property def unused_data(self): return b"" decompressors = { "bz2": bz2.BZ2Decompressor, "lzma": lzma.LZMADecompressor, "gzip": lambda: zlib.decompressobj(wbits=31), "zlib": zlib.decompressobj, "none": NullDecompressor, } compressors = { "bz2": bz2.BZ2Compressor, "lzma": lzma.LZMACompressor, "gzip": lambda: zlib.compressobj(wbits=31), "zlib": zlib.compressobj, "none": NullCompressor, } class ObjStorage(metaclass=abc.ABCMeta): def __init__(self, *, allow_delete=False, **kwargs): # A more complete permission system could be used in place of that if # it becomes needed self.allow_delete = allow_delete @abc.abstractmethod def check_config(self, *, check_write): pass @abc.abstractmethod def __contains__(self, obj_id): pass @abc.abstractmethod def add(self, content, obj_id=None, check_presence=True): pass def add_batch(self, contents, check_presence=True) -> Dict: summary = {"object:add": 0, "object:add:bytes": 0} for obj_id, content in contents.items(): if check_presence and obj_id in self: continue self.add(content, obj_id, check_presence=False) summary["object:add"] += 1 summary["object:add:bytes"] += len(content) return summary def restore(self, content, obj_id=None): # check_presence to false will erase the potential previous content. return self.add(content, obj_id, check_presence=False) @abc.abstractmethod def get(self, obj_id): pass def get_batch(self, obj_ids): for obj_id in obj_ids: try: yield self.get(obj_id) except ObjNotFoundError: yield None @abc.abstractmethod def check(self, obj_id): pass @abc.abstractmethod def delete(self, obj_id): if not self.allow_delete: raise PermissionError("Delete is not allowed.") # Management methods def get_random(self, batch_size): pass # Streaming methods def add_stream(self, content_iter, obj_id, check_presence=True): raise NotImplementedError def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): raise NotImplementedError def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): it = iter(self) if last_obj_id: it = dropwhile(lambda x: x <= last_obj_id, it) return islice(it, limit) diff --git a/swh/objstorage/tests/test_objstorage_pathslicing.py b/swh/objstorage/tests/test_objstorage_pathslicing.py index 72ac0f1..95fb1ef 100644 --- a/swh/objstorage/tests/test_objstorage_pathslicing.py +++ b/swh/objstorage/tests/test_objstorage_pathslicing.py @@ -1,163 +1,163 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import shutil import tempfile import unittest from unittest.mock import DEFAULT, patch from swh.model import hashutil from swh.objstorage import exc from swh.objstorage.factory import get_objstorage -from swh.objstorage.objstorage import ID_HASH_LENGTH +from swh.objstorage.objstorage import ID_DIGEST_LENGTH from .objstorage_testing import ObjStorageTestFixture class TestPathSlicingObjStorage(ObjStorageTestFixture, unittest.TestCase): compression = "none" def setUp(self): super().setUp() self.slicing = "0:2/2:4/4:6" self.tmpdir = tempfile.mkdtemp() self.storage = get_objstorage( "pathslicing", { "root": self.tmpdir, "slicing": self.slicing, "compression": self.compression, }, ) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def content_path(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return self.storage.slicer.get_path(hex_obj_id) def test_iter(self): content, obj_id = self.hash_content(b"iter") self.assertEqual(list(iter(self.storage)), []) self.storage.add(content, obj_id=obj_id) self.assertEqual(list(iter(self.storage)), [obj_id]) def test_len(self): content, obj_id = self.hash_content(b"len") self.assertEqual(len(self.storage), 0) self.storage.add(content, obj_id=obj_id) self.assertEqual(len(self.storage), 1) def test_check_ok(self): content, obj_id = self.hash_content(b"check_ok") self.storage.add(content, obj_id=obj_id) assert self.storage.check(obj_id) is None assert self.storage.check(obj_id.hex()) is None def test_check_id_mismatch(self): content, obj_id = self.hash_content(b"check_id_mismatch") self.storage.add(b"unexpected content", obj_id=obj_id) with self.assertRaises(exc.Error) as error: self.storage.check(obj_id) self.assertEqual( ( "Corrupt object %s should have id " "12ebb2d6c81395bcc5cab965bdff640110cb67ff" % obj_id.hex(), ), error.exception.args, ) def test_get_random_contents(self): content, obj_id = self.hash_content(b"get_random_content") self.storage.add(content, obj_id=obj_id) random_contents = list(self.storage.get_random(1)) self.assertEqual(1, len(random_contents)) self.assertIn(obj_id, random_contents) def test_iterate_from(self): all_ids = [] for i in range(100): content, obj_id = self.hash_content(b"content %d" % i) self.storage.add(content, obj_id=obj_id) all_ids.append(obj_id) all_ids.sort() - ids = list(self.storage.iter_from(b"\x00" * (ID_HASH_LENGTH // 2))) + ids = list(self.storage.iter_from(b"\x00" * ID_DIGEST_LENGTH)) self.assertEqual(len(ids), len(all_ids)) self.assertEqual(ids, all_ids) ids = list(self.storage.iter_from(all_ids[0])) self.assertEqual(len(ids), len(all_ids) - 1) self.assertEqual(ids, all_ids[1:]) ids = list(self.storage.iter_from(all_ids[-1], n_leaf=True)) n_leaf = ids[-1] ids = ids[:-1] self.assertEqual(n_leaf, 1) self.assertEqual(len(ids), 0) ids = list(self.storage.iter_from(all_ids[-2], n_leaf=True)) n_leaf = ids[-1] ids = ids[:-1] self.assertEqual(n_leaf, 2) # beware, this depends on the hash algo self.assertEqual(len(ids), 1) self.assertEqual(ids, all_ids[-1:]) def test_fdatasync_default(self): content, obj_id = self.hash_content(b"check_fdatasync") with patch.multiple("os", fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) if self.storage.use_fdatasync: assert patched["fdatasync"].call_count == 1 assert patched["fsync"].call_count == 0 else: assert patched["fdatasync"].call_count == 0 assert patched["fsync"].call_count == 1 def test_fdatasync_forced_on(self): self.storage.use_fdatasync = True content, obj_id = self.hash_content(b"check_fdatasync") with patch.multiple("os", fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) assert patched["fdatasync"].call_count == 1 assert patched["fsync"].call_count == 0 def test_fdatasync_forced_off(self): self.storage.use_fdatasync = False content, obj_id = self.hash_content(b"check_fdatasync") with patch.multiple("os", fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) assert patched["fdatasync"].call_count == 0 assert patched["fsync"].call_count == 1 def test_check_not_compressed(self): content, obj_id = self.hash_content(b"check_not_compressed") self.storage.add(content, obj_id=obj_id) with open(self.content_path(obj_id), "ab") as f: # Add garbage. f.write(b"garbage") with self.assertRaises(exc.Error) as error: self.storage.check(obj_id) if self.compression == "none": self.assertIn("Corrupt object", error.exception.args[0]) else: self.assertIn("trailing data found", error.exception.args[0]) class TestPathSlicingObjStorageGzip(TestPathSlicingObjStorage): compression = "gzip" class TestPathSlicingObjStorageZlib(TestPathSlicingObjStorage): compression = "zlib" class TestPathSlicingObjStorageBz2(TestPathSlicingObjStorage): compression = "bz2" class TestPathSlicingObjStorageLzma(TestPathSlicingObjStorage): compression = "lzma"