diff --git a/swh/objstorage/backends/pathslicing.py b/swh/objstorage/backends/pathslicing.py index 730d98f..c839e39 100644 --- a/swh/objstorage/backends/pathslicing.py +++ b/swh/objstorage/backends/pathslicing.py @@ -1,399 +1,368 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from contextlib import contextmanager from itertools import islice import os -import random import tempfile -from typing import Iterable, Iterator, List, Optional +from typing import Iterator, List, Optional from swh.model import hashutil from swh.objstorage.constants import DEFAULT_LIMIT, ID_HASH_ALGO, ID_HEXDIGEST_LENGTH from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.interface import ObjId from swh.objstorage.objstorage import ObjStorage, compressors, decompressors BUFSIZ = 1048576 DIR_MODE = 0o755 FILE_MODE = 0o644 class PathSlicer: """Helper class to compute a path based on a hash. Used to compute a directory path based on the object hash according to a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. For instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will have the following computed path: - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 Args: root (str): path to the root directory of the storage on the disk. slicing (str): the slicing configuration. """ def __init__(self, root: str, slicing: str): self.root = root # Make a list of tuples where each tuple contains the beginning # and the end of each slicing. try: self.bounds = [ slice(*(int(x) if x else None for x in sbounds.split(":"))) for sbounds in slicing.split("/") if sbounds ] except TypeError: raise ValueError( "Invalid slicing declaration; " "it should be a of the form ':[/:]..." ) def check_config(self): """Check the slicing configuration is valid. Raises: ValueError: if the slicing configuration is invalid. """ if len(self): max_char = max( max(bound.start or 0, bound.stop or 0) for bound in self.bounds ) if ID_HEXDIGEST_LENGTH < max_char: raise ValueError( "Algorithm %s has too short hash for slicing to char %d" % (ID_HASH_ALGO, max_char) ) def get_directory(self, hex_obj_id: str) -> str: """Compute the storage directory of an object. See also: PathSlicer::get_path Args: hex_obj_id: object id as hexlified string. Returns: Absolute path (including root) to the directory that contains the given object id. """ return os.path.join(self.root, *self.get_slices(hex_obj_id)) def get_path(self, hex_obj_id: str) -> str: """Compute the full path to an object into the current storage. See also: PathSlicer::get_directory Args: hex_obj_id(str): object id as hexlified string. Returns: Absolute path (including root) to the object corresponding to the given object id. """ return os.path.join(self.get_directory(hex_obj_id), hex_obj_id) def get_slices(self, hex_obj_id: str) -> List[str]: """Compute the path elements for the given hash. Args: hex_obj_id(str): object id as hexlified string. Returns: Relative path to the actual object corresponding to the given id as a list. """ assert len(hex_obj_id) == ID_HEXDIGEST_LENGTH return [hex_obj_id[bound] for bound in self.bounds] def __len__(self) -> int: """Number of slices of the slicer""" return len(self.bounds) class PathSlicingObjStorage(ObjStorage): """Implementation of the ObjStorage API based on the hash of the content. On disk, an object storage is a directory tree containing files named after their object IDs. An object ID is a checksum of its content, depending on the value of the ID_HASH_ALGO constant (see swh.model.hashutil for its meaning). To avoid directories that contain too many files, the object storage has a given slicing. Each slicing correspond to a directory that is named according to the hash of its content. So for instance a file with SHA1 34973274ccef6ab4dfaaf86599792fa9c3fe4689 will be stored in the given object storages : - 0:2/2:4/4:6 : 34/97/32/34973274ccef6ab4dfaaf86599792fa9c3fe4689 - 0:1/0:5/ : 3/34973/34973274ccef6ab4dfaaf86599792fa9c3fe4689 The files in the storage are stored in gzipped compressed format. Args: root (str): path to the root directory of the storage on the disk. slicing (str): string that indicates the slicing to perform on the hash of the content to know the path where it should be stored (see the documentation of the PathSlicer class). """ def __init__(self, root, slicing, compression="gzip", **kwargs): super().__init__(**kwargs) self.root = root self.slicer = PathSlicer(root, slicing) self.use_fdatasync = hasattr(os, "fdatasync") self.compression = compression self.check_config(check_write=False) def check_config(self, *, check_write): """Check whether this object storage is properly configured""" self.slicer.check_config() if not os.path.isdir(self.root): raise ValueError( 'PathSlicingObjStorage root "%s" is not a directory' % self.root ) if check_write: if not os.access(self.root, os.W_OK): raise PermissionError( 'PathSlicingObjStorage root "%s" is not writable' % self.root ) if self.compression not in compressors: raise ValueError( 'Unknown compression algorithm "%s" for ' "PathSlicingObjStorage" % self.compression ) return True def __contains__(self, obj_id: ObjId) -> bool: hex_obj_id = hashutil.hash_to_hex(obj_id) return os.path.isfile(self.slicer.get_path(hex_obj_id)) def __iter__(self) -> Iterator[bytes]: """Iterate over the object identifiers currently available in the storage. Warning: with the current implementation of the object storage, this method will walk the filesystem to list objects, meaning that listing all objects will be very slow for large storages. You almost certainly don't want to use this method in production. Return: Iterator over object IDs """ def obj_iterator(): # XXX hackish: it does not verify that the depth of found files # matches the slicing depth of the storage for root, _dirs, files in os.walk(self.root): _dirs.sort() for f in sorted(files): yield bytes.fromhex(f) return obj_iterator() def __len__(self) -> int: """Compute the number of objects available in the storage. Warning: this currently uses `__iter__`, its warning about bad performances applies Return: number of objects contained in the storage """ return sum(1 for i in self) def add( self, content: bytes, obj_id: ObjId, check_presence: bool = True, ) -> None: if check_presence and obj_id in self: # If the object is already present, return immediately. return hex_obj_id = hashutil.hash_to_hex(obj_id) compressor = compressors[self.compression]() with self._write_obj_file(hex_obj_id) as f: f.write(compressor.compress(content)) f.write(compressor.flush()) def get(self, obj_id: ObjId) -> bytes: if obj_id not in self: raise ObjNotFoundError(obj_id) # Open the file and return its content as bytes hex_obj_id = hashutil.hash_to_hex(obj_id) d = decompressors[self.compression]() with open(self.slicer.get_path(hex_obj_id), "rb") as f: out = d.decompress(f.read()) if d.unused_data: raise Error( "Corrupt object %s: trailing data found" % hex_obj_id, ) return out def check(self, obj_id: ObjId) -> None: try: data = self.get(obj_id) except OSError: hex_obj_id = hashutil.hash_to_hex(obj_id) raise Error( "Corrupt object %s: not a proper compressed file" % hex_obj_id, ) checksums = hashutil.MultiHash.from_data( data, hash_names=[ID_HASH_ALGO] ).digest() actual_obj_id = checksums[ID_HASH_ALGO] hex_obj_id = hashutil.hash_to_hex(obj_id) if hex_obj_id != hashutil.hash_to_hex(actual_obj_id): raise Error( "Corrupt object %s should have id %s" % (hashutil.hash_to_hex(obj_id), hashutil.hash_to_hex(actual_obj_id)) ) def delete(self, obj_id: ObjId): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) hex_obj_id = hashutil.hash_to_hex(obj_id) try: os.remove(self.slicer.get_path(hex_obj_id)) except FileNotFoundError: raise ObjNotFoundError(obj_id) return True - # Management methods - - def get_random(self, batch_size: int) -> Iterable[ObjId]: - def get_random_content(self, batch_size): - """Get a batch of content inside a single directory. - - Returns: - a tuple (batch size, batch). - """ - dirs = [] - for level in range(len(self.slicer)): - path = os.path.join(self.root, *dirs) - dir_list = next(os.walk(path))[1] - if "tmp" in dir_list: - dir_list.remove("tmp") - dirs.append(random.choice(dir_list)) - - path = os.path.join(self.root, *dirs) - content_list = next(os.walk(path))[2] - length = min(batch_size, len(content_list)) - return ( - length, - map(hashutil.hash_to_bytes, random.sample(content_list, length)), - ) - - while batch_size: - length, it = get_random_content(self, batch_size) - batch_size = batch_size - length - yield from it - # Streaming methods @contextmanager def chunk_writer(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) compressor = compressors[self.compression]() with self._write_obj_file(hex_obj_id) as f: yield lambda c: f.write(compressor.compress(c)) f.write(compressor.flush()) def list_content( self, last_obj_id: Optional[ObjId] = None, limit: int = DEFAULT_LIMIT ) -> Iterator[ObjId]: if last_obj_id: it = self.iter_from(last_obj_id) else: it = iter(self) return islice(it, limit) def iter_from(self, obj_id, n_leaf=False): hex_obj_id = hashutil.hash_to_hex(obj_id) slices = self.slicer.get_slices(hex_obj_id) rlen = len(self.root.split("/")) i = 0 for root, dirs, files in os.walk(self.root): if not dirs: i += 1 level = len(root.split("/")) - rlen dirs.sort() if dirs and root == os.path.join(self.root, *slices[:level]): cslice = slices[level] for d in dirs[:]: if d < cslice: dirs.remove(d) for f in sorted(files): if f > hex_obj_id: yield bytes.fromhex(f) if n_leaf: yield i @contextmanager def _write_obj_file(self, hex_obj_id): """Context manager for writing object files to the object storage. During writing, data are written to a temporary file, which is atomically renamed to the right file name after closing. Usage sample: with objstorage._write_obj_file(hex_obj_id): f.write(obj_data) Yields: a file-like object open for writing bytes. """ # Get the final paths and create the directory if absent. dir = self.slicer.get_directory(hex_obj_id) if not os.path.isdir(dir): os.makedirs(dir, DIR_MODE, exist_ok=True) path = os.path.join(dir, hex_obj_id) # Create a temporary file. (tmp, tmp_path) = tempfile.mkstemp(suffix=".tmp", prefix="hex_obj_id.", dir=dir) # Open the file and yield it for writing. tmp_f = os.fdopen(tmp, "wb") yield tmp_f # Make sure the contents of the temporary file are written to disk tmp_f.flush() if self.use_fdatasync: os.fdatasync(tmp) else: os.fsync(tmp) # Then close the temporary file and move it to the right path. tmp_f.close() os.chmod(tmp_path, FILE_MODE) os.rename(tmp_path, path) diff --git a/swh/objstorage/interface.py b/swh/objstorage/interface.py index e2755da..49099a1 100644 --- a/swh/objstorage/interface.py +++ b/swh/objstorage/interface.py @@ -1,214 +1,191 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from typing import Dict, Iterable, Iterator, List, Optional +from typing import Dict, Iterator, List, Optional from typing_extensions import Protocol, runtime_checkable from swh.core.api import remote_api_endpoint from swh.objstorage.constants import DEFAULT_LIMIT ObjId = bytes """Type of object ids, which should be a sha1 hash.""" @runtime_checkable class ObjStorageInterface(Protocol): """High-level API to manipulate the Software Heritage object storage. Conceptually, the object storage offers the following methods: - check_config() check if the object storage is properly configured - __contains__() check if an object is present, by object id - add() add a new object, returning an object id - restore() same as add() but erase an already existed content - get() retrieve the content of an object, by object id - check() check the integrity of an object, by object id - delete() remove an object - And some management methods: - - - get_random() get random object id of existing contents (used for the - content integrity checker). - Each implementation of this interface can have a different behavior and its own way to store the contents. """ @remote_api_endpoint("check_config") def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ ... @remote_api_endpoint("content/contains") def __contains__(self, obj_id: ObjId) -> bool: """Indicate if the given object is present in the storage. Args: obj_id: object identifier. Returns: True if and only if the object is present in the current object storage. """ ... @remote_api_endpoint("content/add") def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: """Add a new object to the object storage. Args: content: object's raw content to add in storage. obj_id: checksum of [bytes] using [ID_HASH_ALGO] algorithm. It is trusted to match the bytes. check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ ... @remote_api_endpoint("content/add/batch") def add_batch(self, contents, check_presence=True) -> Dict: """Add a batch of new objects to the object storage. Args: contents: mapping from obj_id to object contents Returns: the summary of objects added to the storage (count of object, count of bytes object) """ ... def restore(self, content: bytes, obj_id: ObjId) -> None: """Restore a content that have been corrupted. This function is identical to add but does not check if the object id is already in the file system. The default implementation provided by the current class is suitable for most cases. Args: content: object's raw content to add in storage obj_id: dict of hashes of the content (or only the sha1, for legacy clients) """ ... @remote_api_endpoint("content/get") def get(self, obj_id: ObjId) -> bytes: """Retrieve the content of a given object. Args: obj_id: object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ ... @remote_api_endpoint("content/get/batch") def get_batch(self, obj_ids: List[ObjId]) -> Iterator[Optional[bytes]]: """Retrieve objects' raw content in bulk from storage. Note: This function does have a default implementation in ObjStorage that is suitable for most cases. For object storages that needs to do the minimal number of requests possible (ex: remote object storages), that method can be overridden to perform a more efficient operation. Args: obj_ids: list of object ids. Returns: list of resulting contents, or None if the content could not be retrieved. Do not raise any exception as a fail for one content will not cancel the whole request. """ ... @remote_api_endpoint("content/check") def check(self, obj_id: ObjId) -> None: """Perform an integrity check for a given object. Verify that the file object is in place and that the content matches the object id. Args: obj_id: object identifier. Raises: ObjNotFoundError: if the requested object is missing. Error: if the request object is corrupted. """ ... @remote_api_endpoint("content/delete") def delete(self, obj_id: ObjId): """Delete an object. Args: obj_id: object identifier. Raises: ObjNotFoundError: if the requested object is missing. """ ... - # Management methods - - @remote_api_endpoint("content/get/random") - def get_random(self, batch_size: int) -> Iterable[ObjId]: - """Get random ids of existing contents. - - This method is used in order to get random ids to perform - content integrity verifications on random contents. - - Args: - batch_size: Number of ids that will be given - - Yields: - ids of contents that are in the current object storage. - - """ - ... - def __iter__(self) -> Iterator[ObjId]: ... def list_content( self, last_obj_id: Optional[ObjId] = None, limit: int = DEFAULT_LIMIT ) -> Iterator[ObjId]: """Generates known object ids. Args: last_obj_id: object id from which to iterate from (excluded). limit (int): max number of object ids to generate. Generates: obj_id: object ids. """ ... diff --git a/swh/objstorage/multiplexer/filter/filter.py b/swh/objstorage/multiplexer/filter/filter.py index 511da4d..e2e8ade 100644 --- a/swh/objstorage/multiplexer/filter/filter.py +++ b/swh/objstorage/multiplexer/filter/filter.py @@ -1,77 +1,74 @@ # Copyright (C) 2015-2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.objstorage.objstorage import ObjStorage class ObjStorageFilter(ObjStorage): """Base implementation of a filter that allow inputs on ObjStorage or not. This class copy the API of ...objstorage in order to filter the inputs of this class. If the operation is allowed, return the result of this operation applied to the destination implementation. Otherwise, just return without any operation. This class is an abstract base class for a classic read/write storage. Filters can inherit from it and only redefine some methods in order to change behavior. """ def __init__(self, storage): self.storage = storage def check_config(self, *, check_write): """Check the object storage for proper configuration. Args: check_write: check whether writes to the objstorage will succeed Returns: True if the storage is properly configured """ return self.storage.check_config(check_write=check_write) def __contains__(self, *args, **kwargs): return self.storage.__contains__(*args, **kwargs) def __iter__(self): """Iterates over the content of each storages Warning: The `__iter__` methods frequently have bad performance. You almost certainly don't want to use this method in production as the wrapped storage may cause performance issues. """ return self.storage.__iter__() def __len__(self): """Compute the number of objects in the current object storage. Warning: performance issue in `__iter__` also applies here. Returns: number of objects contained in the storage. """ return self.storage.__len__() def add(self, content, obj_id, check_presence=True, *args, **kwargs): return self.storage.add(content, obj_id, check_presence, *args, **kwargs) def restore(self, content, obj_id, *args, **kwargs): return self.storage.restore(content, obj_id, *args, **kwargs) def get(self, obj_id, *args, **kwargs): return self.storage.get(obj_id, *args, **kwargs) def check(self, obj_id, *args, **kwargs): return self.storage.check(obj_id, *args, **kwargs) def delete(self, obj_id, *args, **kwargs): return self.storage.delete(obj_id, *args, **kwargs) - - def get_random(self, batch_size, *args, **kwargs): - return self.storage.get_random(batch_size, *args, **kwargs) diff --git a/swh/objstorage/multiplexer/filter/id_filter.py b/swh/objstorage/multiplexer/filter/id_filter.py index 77c9f21..bd30580 100644 --- a/swh/objstorage/multiplexer/filter/id_filter.py +++ b/swh/objstorage/multiplexer/filter/id_filter.py @@ -1,84 +1,79 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import re from swh.model import hashutil from swh.objstorage.exc import ObjNotFoundError from swh.objstorage.multiplexer.filter.filter import ObjStorageFilter class IdObjStorageFilter(ObjStorageFilter, metaclass=abc.ABCMeta): """Filter that only allow operations if the object id match a requirement. Even for read operations, check before if the id match the requirements. This may prevent for unnecessary disk access. """ @abc.abstractmethod def is_valid(self, obj_id): """Indicates if the given id is valid.""" raise NotImplementedError( "Implementations of an IdObjStorageFilter " 'must have a "is_valid" method' ) def __contains__(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.__contains__(*args, obj_id=obj_id, **kwargs) return False def __len__(self): return sum(1 for i in [id for id in self.storage if self.is_valid(id)]) def __iter__(self): yield from filter(lambda id: self.is_valid(id), iter(self.storage)) def add(self, content, obj_id, check_presence=True, *args, **kwargs): if self.is_valid(obj_id): return self.storage.add(content, *args, obj_id=obj_id, **kwargs) def restore(self, content, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.restore(content, *args, obj_id=obj_id, **kwargs) def get(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.get(*args, obj_id=obj_id, **kwargs) raise ObjNotFoundError(obj_id) def check(self, obj_id, *args, **kwargs): if self.is_valid(obj_id): return self.storage.check(*args, obj_id=obj_id, **kwargs) raise ObjNotFoundError(obj_id) - def get_random(self, *args, **kwargs): - yield from filter( - lambda id: self.is_valid(id), self.storage.get_random(*args, **kwargs) - ) - class RegexIdObjStorageFilter(IdObjStorageFilter): """Filter that allow operations if the content's id as hex match a regex.""" def __init__(self, storage, regex): super().__init__(storage) self.regex = re.compile(regex) def is_valid(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return self.regex.match(hex_obj_id) is not None class PrefixIdObjStorageFilter(IdObjStorageFilter): """Filter that allow operations if the hexlified id have a given prefix.""" def __init__(self, storage, prefix): super().__init__(storage) self.prefix = str(prefix) def is_valid(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return str(hex_obj_id).startswith(self.prefix) diff --git a/swh/objstorage/multiplexer/multiplexer_objstorage.py b/swh/objstorage/multiplexer/multiplexer_objstorage.py index 8d0304f..74fc670 100644 --- a/swh/objstorage/multiplexer/multiplexer_objstorage.py +++ b/swh/objstorage/multiplexer/multiplexer_objstorage.py @@ -1,326 +1,308 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import queue -import random import threading -from typing import Dict, Iterable +from typing import Dict from swh.objstorage.exc import ObjNotFoundError from swh.objstorage.interface import ObjId from swh.objstorage.objstorage import ObjStorage class ObjStorageThread(threading.Thread): def __init__(self, storage): super().__init__(daemon=True) self.storage = storage self.commands = queue.Queue() def run(self): while True: try: mailbox, command, args, kwargs = self.commands.get(True, 0.05) except queue.Empty: continue try: ret = getattr(self.storage, command)(*args, **kwargs) except Exception as exc: self.queue_result(mailbox, "exception", exc) else: self.queue_result(mailbox, "result", ret) def queue_command(self, command, *args, mailbox=None, **kwargs): """Enqueue a new command to be processed by the thread. Args: command (str): one of the method names for the underlying storage. mailbox (queue.Queue): explicit mailbox if the calling thread wants to override it. args, kwargs: arguments for the command. Returns: queue.Queue: The mailbox you can read the response from """ if not mailbox: mailbox = queue.Queue() self.commands.put((mailbox, command, args, kwargs)) return mailbox def queue_result(self, mailbox, result_type, result): """Enqueue a new result in the mailbox This also provides a reference to the storage, which can be useful when an exceptional condition arises. Args: mailbox (queue.Queue): the mailbox to which we need to enqueue the result result_type (str): one of 'result', 'exception' result: the result to pass back to the calling thread """ mailbox.put( { "type": result_type, "result": result, } ) @staticmethod def get_result_from_mailbox(mailbox, *args, **kwargs): """Unpack the result from the mailbox. Args: mailbox (queue.Queue): A mailbox to unpack a result from args: positional arguments to :func:`mailbox.get` kwargs: keyword arguments to :func:`mailbox.get` Returns: the next result unpacked from the queue Raises: either the exception we got back from the underlying storage, or :exc:`queue.Empty` if :func:`mailbox.get` raises that. """ result = mailbox.get(*args, **kwargs) if result["type"] == "exception": raise result["result"] from None else: return result["result"] @staticmethod def collect_results(mailbox, num_results): """Collect num_results from the mailbox""" collected = 0 ret = [] while collected < num_results: try: ret.append( ObjStorageThread.get_result_from_mailbox(mailbox, True, 0.05) ) except queue.Empty: continue collected += 1 return ret def __getattr__(self, attr): def call(*args, **kwargs): mailbox = self.queue_command(attr, *args, **kwargs) return self.get_result_from_mailbox(mailbox) return call def __contains__(self, *args, **kwargs): mailbox = self.queue_command("__contains__", *args, **kwargs) return self.get_result_from_mailbox(mailbox) class MultiplexerObjStorage(ObjStorage): """Implementation of ObjStorage that distributes between multiple storages. The multiplexer object storage allows an input to be demultiplexed among multiple storages that will or will not accept it by themselves (see .filter package). As the ids can be different, no pre-computed ids should be submitted. Also, there are no guarantees that the returned ids can be used directly into the storages that the multiplexer manage. Use case examples follow. Example 1:: storage_v1 = filter.read_only(PathSlicingObjStorage('/dir1', '0:2/2:4/4:6')) storage_v2 = PathSlicingObjStorage('/dir2', '0:1/0:5') storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using 'storage', all the new contents will only be added to the v2 storage, while it will be retrievable from both. Example 2:: storage_v1 = filter.id_regex( PathSlicingObjStorage('/dir1', '0:2/2:4/4:6'), r'[^012].*' ) storage_v2 = filter.if_regex( PathSlicingObjStorage('/dir2', '0:1/0:5'), r'[012]/*' ) storage = MultiplexerObjStorage([storage_v1, storage_v2]) When using this storage, the contents with a sha1 starting with 0, 1 or 2 will be redirected (read AND write) to the storage_v2, while the others will be redirected to the storage_v1. If a content starting with 0, 1 or 2 is present in the storage_v1, it would be ignored anyway. """ def __init__(self, storages, **kwargs): super().__init__(**kwargs) self.storages = storages self.storage_threads = [ObjStorageThread(storage) for storage in storages] for thread in self.storage_threads: thread.start() def wrap_call(self, threads, call, *args, **kwargs): threads = list(threads) mailbox = queue.Queue() for thread in threads: thread.queue_command(call, *args, mailbox=mailbox, **kwargs) return ObjStorageThread.collect_results(mailbox, len(threads)) def get_read_threads(self, obj_id=None): yield from self.storage_threads def get_write_threads(self, obj_id=None): yield from self.storage_threads def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ return all( self.wrap_call( self.storage_threads, "check_config", check_write=check_write ) ) def __contains__(self, obj_id): """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True if and only if the object is present in the current object storage. """ for storage in self.get_read_threads(obj_id): if obj_id in storage: return True return False def __iter__(self): def obj_iterator(): for storage in self.storages: yield from storage return obj_iterator() def add(self, content: bytes, obj_id: ObjId, check_presence: bool = True) -> None: """Add a new object to the object storage. If the adding step works in all the storages that accept this content, this is a success. Otherwise, the full adding step is an error even if it succeed in some of the storages. Args: content: content of the object to be added to the storage. obj_id: checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence: indicate if the presence of the content should be verified before adding the file. Returns: an id of the object into the storage. As the write-storages are always readable as well, any id will be valid to retrieve a content. """ self.wrap_call( self.get_write_threads(obj_id), "add", content, obj_id=obj_id, check_presence=check_presence, ) def add_batch(self, contents, check_presence=True) -> Dict: """Add a batch of new objects to the object storage.""" write_threads = list(self.get_write_threads()) results = self.wrap_call( write_threads, "add_batch", contents, check_presence=check_presence, ) summed = {"object:add": 0, "object:add:bytes": 0} for result in results: summed["object:add"] += result["object:add"] summed["object:add:bytes"] += result["object:add:bytes"] return { "object:add": summed["object:add"] // len(results), "object:add:bytes": summed["object:add:bytes"] // len(results), } def restore(self, content: bytes, obj_id: ObjId) -> None: return self.wrap_call( self.get_write_threads(obj_id), "restore", content, obj_id=obj_id, ).pop() def get(self, obj_id: ObjId) -> bytes: for storage in self.get_read_threads(obj_id): try: return storage.get(obj_id) except ObjNotFoundError: continue # If no storage contains this content, raise the error raise ObjNotFoundError(obj_id) def check(self, obj_id: ObjId) -> None: nb_present = 0 for storage in self.get_read_threads(obj_id): try: storage.check(obj_id) except ObjNotFoundError: continue else: nb_present += 1 # If there is an Error because of a corrupted file, then let it pass. # Raise the ObjNotFoundError only if the content couldn't be found in # all the storages. if nb_present == 0: raise ObjNotFoundError(obj_id) def delete(self, obj_id: ObjId): super().delete(obj_id) # Check delete permission return all(self.wrap_call(self.get_write_threads(obj_id), "delete", obj_id)) - - def get_random(self, batch_size: int) -> Iterable[ObjId]: - storages_set = [storage for storage in self.storages if len(storage) > 0] - if len(storages_set) <= 0: - return [] - - while storages_set: - storage = random.choice(storages_set) - try: - return storage.get_random(batch_size) - except NotImplementedError: - storages_set.remove(storage) - # There is no storage that allow the get_random operation - raise NotImplementedError( - "There is no storage implementation into the multiplexer that " - "support the 'get_random' operation" - ) diff --git a/swh/objstorage/objstorage.py b/swh/objstorage/objstorage.py index 0650f84..c3e446d 100644 --- a/swh/objstorage/objstorage.py +++ b/swh/objstorage/objstorage.py @@ -1,135 +1,132 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import bz2 from itertools import dropwhile, islice import lzma -from typing import Callable, Dict, Iterable, Iterator, List, Optional +from typing import Callable, Dict, Iterator, List, Optional import zlib from swh.model import hashutil from .constants import DEFAULT_LIMIT, ID_HASH_ALGO from .exc import ObjNotFoundError from .interface import ObjId, ObjStorageInterface def compute_hash(content, algo=ID_HASH_ALGO): """Compute the content's hash. Args: content (bytes): The raw content to hash hash_name (str): Hash's name (default to ID_HASH_ALGO) Returns: The ID_HASH_ALGO for the content """ return ( hashutil.MultiHash.from_data( content, hash_names=[algo], ) .digest() .get(algo) ) class NullCompressor: def compress(self, data): return data def flush(self): return b"" class NullDecompressor: def decompress(self, data: bytes) -> bytes: return data @property def unused_data(self) -> bytes: return b"" class _CompressorProtocol: def compress(self, data: bytes) -> bytes: ... def flush(self) -> bytes: ... class _DecompressorProtocol: def decompress(self, data: bytes) -> bytes: ... unused_data: bytes decompressors: Dict[str, Callable[[], _DecompressorProtocol]] = { "bz2": bz2.BZ2Decompressor, # type: ignore "lzma": lzma.LZMADecompressor, # type: ignore "gzip": lambda: zlib.decompressobj(wbits=31), # type: ignore "zlib": zlib.decompressobj, # type: ignore "none": NullDecompressor, # type: ignore } compressors: Dict[str, Callable[[], _CompressorProtocol]] = { "bz2": bz2.BZ2Compressor, # type: ignore "lzma": lzma.LZMACompressor, # type: ignore "gzip": lambda: zlib.compressobj(wbits=31), # type: ignore "zlib": zlib.compressobj, # type: ignore "none": NullCompressor, # type: ignore } class ObjStorage(metaclass=abc.ABCMeta): def __init__(self, *, allow_delete=False, **kwargs): # A more complete permission system could be used in place of that if # it becomes needed self.allow_delete = allow_delete def add_batch(self: ObjStorageInterface, contents, check_presence=True) -> Dict: summary = {"object:add": 0, "object:add:bytes": 0} for obj_id, content in contents.items(): if check_presence and obj_id in self: continue self.add(content, obj_id, check_presence=False) summary["object:add"] += 1 summary["object:add:bytes"] += len(content) return summary def restore(self: ObjStorageInterface, content: bytes, obj_id: ObjId) -> None: # check_presence to false will erase the potential previous content. self.add(content, obj_id, check_presence=False) def get_batch( self: ObjStorageInterface, obj_ids: List[ObjId] ) -> Iterator[Optional[bytes]]: for obj_id in obj_ids: try: yield self.get(obj_id) except ObjNotFoundError: yield None @abc.abstractmethod def delete(self, obj_id: ObjId): if not self.allow_delete: raise PermissionError("Delete is not allowed.") - def get_random(self, batch_size: int) -> Iterable[ObjId]: - pass - def list_content( self: ObjStorageInterface, last_obj_id: Optional[ObjId] = None, limit: int = DEFAULT_LIMIT, ) -> Iterator[ObjId]: it = iter(self) if last_obj_id is not None: it = dropwhile(last_obj_id.__ge__, it) return islice(it, limit) diff --git a/swh/objstorage/tests/test_multiplexer_filter.py b/swh/objstorage/tests/test_multiplexer_filter.py index c7d7eaa..215a140 100644 --- a/swh/objstorage/tests/test_multiplexer_filter.py +++ b/swh/objstorage/tests/test_multiplexer_filter.py @@ -1,332 +1,313 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import random import shutil from string import ascii_lowercase import tempfile import unittest from swh.model import hashutil from swh.objstorage.exc import Error, ObjNotFoundError from swh.objstorage.factory import get_objstorage from swh.objstorage.multiplexer.filter import id_prefix, id_regex, read_only from swh.objstorage.objstorage import compute_hash def get_random_content(): return bytes("".join(random.sample(ascii_lowercase, 10)), "utf8") class MixinTestReadFilter(unittest.TestCase): # Read only filter should not allow writing def setUp(self): super().setUp() self.tmpdir = tempfile.mkdtemp() pstorage = { "cls": "pathslicing", "root": self.tmpdir, "slicing": "0:5", } base_storage = get_objstorage(**pstorage) self.storage = get_objstorage( "filtered", storage_conf=pstorage, filters_conf=[read_only()] ) self.valid_content = b"pre-existing content" self.invalid_content = b"invalid_content" self.true_invalid_content = b"Anything that is not correct" self.absent_content = b"non-existent content" # Create a valid content. self.valid_id = compute_hash(self.valid_content) base_storage.add(self.valid_content, obj_id=self.valid_id) # Create an invalid id and add a content with it. self.invalid_id = compute_hash(self.true_invalid_content) base_storage.add(self.invalid_content, obj_id=self.invalid_id) # Compute an id for a non-existing content. self.absent_id = compute_hash(self.absent_content) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def test_can_contains(self): self.assertTrue(self.valid_id in self.storage) self.assertTrue(self.invalid_id in self.storage) self.assertFalse(self.absent_id in self.storage) def test_can_iter(self): self.assertIn(self.valid_id, iter(self.storage)) self.assertIn(self.invalid_id, iter(self.storage)) def test_can_len(self): self.assertEqual(2, len(self.storage)) def test_can_get(self): self.assertEqual(self.valid_content, self.storage.get(self.valid_id)) self.assertEqual(self.invalid_content, self.storage.get(self.invalid_id)) def test_can_check(self): with self.assertRaises(ObjNotFoundError): self.storage.check(self.absent_id) with self.assertRaises(Error): self.storage.check(self.invalid_id) self.storage.check(self.valid_id) - def test_can_get_random(self): - self.assertEqual(1, len(list(self.storage.get_random(1)))) - self.assertEqual( - len(list(self.storage)), len(set(self.storage.get_random(1000))) - ) - def test_cannot_add(self): new_id = self.storage.add(b"New content") result = self.storage.add(self.valid_content, self.valid_id) self.assertIsNone(new_id, self.storage) self.assertIsNone(result) def test_cannot_restore(self): result = self.storage.restore(self.valid_content, self.valid_id) self.assertIsNone(result) class MixinTestIdFilter: """Mixin class that tests the filters based on filter.IdFilter Methods "make_valid", "make_invalid" and "filter_storage" must be implemented by subclasses. """ def setUp(self): super().setUp() # Use a hack here : as the mock uses the content as id, it is easy to # create contents that are filtered or not. self.prefix = "71" self.tmpdir = tempfile.mkdtemp() # Make the storage filtered self.sconf = { "cls": "pathslicing", "root": self.tmpdir, "slicing": "0:5", } storage = get_objstorage(**self.sconf) self.base_storage = storage self.storage = self.filter_storage(self.sconf) # Present content with valid id self.present_valid_content = self.ensure_valid(b"yroqdtotji") self.present_valid_id = compute_hash(self.present_valid_content) # Present content with invalid id self.present_invalid_content = self.ensure_invalid(b"glxddlmmzb") self.present_invalid_id = compute_hash(self.present_invalid_content) # Missing content with valid id self.missing_valid_content = self.ensure_valid(b"rmzkdclkez") self.missing_valid_id = compute_hash(self.missing_valid_content) # Missing content with invalid id self.missing_invalid_content = self.ensure_invalid(b"hlejfuginh") self.missing_invalid_id = compute_hash(self.missing_invalid_content) # Present corrupted content with valid id self.present_corrupted_valid_content = self.ensure_valid(b"cdsjwnpaij") self.true_present_corrupted_valid_content = self.ensure_valid(b"mgsdpawcrr") self.present_corrupted_valid_id = compute_hash( self.true_present_corrupted_valid_content ) # Present corrupted content with invalid id self.present_corrupted_invalid_content = self.ensure_invalid(b"pspjljnrco") self.true_present_corrupted_invalid_content = self.ensure_invalid(b"rjocbnnbso") self.present_corrupted_invalid_id = compute_hash( self.true_present_corrupted_invalid_content ) # Missing (potentially) corrupted content with valid id self.missing_corrupted_valid_content = self.ensure_valid(b"zxkokfgtou") self.true_missing_corrupted_valid_content = self.ensure_valid(b"royoncooqa") self.missing_corrupted_valid_id = compute_hash( self.true_missing_corrupted_valid_content ) # Missing (potentially) corrupted content with invalid id self.missing_corrupted_invalid_content = self.ensure_invalid(b"hxaxnrmnyk") self.true_missing_corrupted_invalid_content = self.ensure_invalid(b"qhbolyuifr") self.missing_corrupted_invalid_id = compute_hash( self.true_missing_corrupted_invalid_content ) # Add the content that are supposed to be present self.storage.add(self.present_valid_content, obj_id=self.present_valid_id) self.storage.add(self.present_invalid_content, obj_id=self.present_invalid_id) self.storage.add( self.present_corrupted_valid_content, obj_id=self.present_corrupted_valid_id ) self.storage.add( self.present_corrupted_invalid_content, obj_id=self.present_corrupted_invalid_id, ) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def filter_storage(self, sconf): raise NotImplementedError( "Id_filter test class must have a filter_storage method" ) def ensure_valid(self, content=None): if content is None: content = get_random_content() while not self.storage.is_valid(compute_hash(content)): content = get_random_content() return content def ensure_invalid(self, content=None): if content is None: content = get_random_content() while self.storage.is_valid(compute_hash(content)): content = get_random_content() return content def test_contains(self): # Both contents are present, but the invalid one should be ignored. self.assertTrue(self.present_valid_id in self.storage) self.assertFalse(self.present_invalid_id in self.storage) self.assertFalse(self.missing_valid_id in self.storage) self.assertFalse(self.missing_invalid_id in self.storage) self.assertTrue(self.present_corrupted_valid_id in self.storage) self.assertFalse(self.present_corrupted_invalid_id in self.storage) self.assertFalse(self.missing_corrupted_valid_id in self.storage) self.assertFalse(self.missing_corrupted_invalid_id in self.storage) def test_iter(self): self.assertIn(self.present_valid_id, iter(self.storage)) self.assertNotIn(self.present_invalid_id, iter(self.storage)) self.assertNotIn(self.missing_valid_id, iter(self.storage)) self.assertNotIn(self.missing_invalid_id, iter(self.storage)) self.assertIn(self.present_corrupted_valid_id, iter(self.storage)) self.assertNotIn(self.present_corrupted_invalid_id, iter(self.storage)) self.assertNotIn(self.missing_corrupted_valid_id, iter(self.storage)) self.assertNotIn(self.missing_corrupted_invalid_id, iter(self.storage)) def test_len(self): # Four contents are present, but only two should be valid. self.assertEqual(2, len(self.storage)) def test_get(self): self.assertEqual( self.present_valid_content, self.storage.get(self.present_valid_id) ) with self.assertRaises(ObjNotFoundError): self.storage.get(self.present_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_invalid_id) self.assertEqual( self.present_corrupted_valid_content, self.storage.get(self.present_corrupted_valid_id), ) with self.assertRaises(ObjNotFoundError): self.storage.get(self.present_corrupted_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_corrupted_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.get(self.missing_corrupted_invalid_id) def test_check(self): self.storage.check(self.present_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.present_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_invalid_id) with self.assertRaises(Error): self.storage.check(self.present_corrupted_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.present_corrupted_invalid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_corrupted_valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(self.missing_corrupted_invalid_id) - def test_get_random(self): - self.assertEqual(0, len(list(self.storage.get_random(0)))) - - random_content = list(self.storage.get_random(1000)) - self.assertIn(self.present_valid_id, random_content) - self.assertNotIn(self.present_invalid_id, random_content) - self.assertNotIn(self.missing_valid_id, random_content) - self.assertNotIn(self.missing_invalid_id, random_content) - self.assertIn(self.present_corrupted_valid_id, random_content) - self.assertNotIn(self.present_corrupted_invalid_id, random_content) - self.assertNotIn(self.missing_corrupted_valid_id, random_content) - self.assertNotIn(self.missing_corrupted_invalid_id, random_content) - def test_add(self): # Add valid and invalid contents to the storage and check their # presence with the unfiltered storage. valid_content = self.ensure_valid(b"ulepsrjbgt") valid_id = compute_hash(valid_content) invalid_content = self.ensure_invalid(b"znvghkjked") invalid_id = compute_hash(invalid_content) self.storage.add(valid_content, obj_id=valid_id) self.storage.add(invalid_content, obj_id=invalid_id) self.assertTrue(valid_id in self.base_storage) self.assertFalse(invalid_id in self.base_storage) def test_restore(self): # Add corrupted content to the storage and the try to restore it valid_content = self.ensure_valid(b"ulepsrjbgt") valid_id = compute_hash(valid_content) corrupted_content = self.ensure_valid(b"ltjkjsloyb") corrupted_id = compute_hash(corrupted_content) self.storage.add(corrupted_content, obj_id=valid_id) with self.assertRaises(ObjNotFoundError): self.storage.check(corrupted_id) with self.assertRaises(Error): self.storage.check(valid_id) self.storage.restore(valid_content, obj_id=valid_id) self.storage.check(valid_id) class TestPrefixFilter(MixinTestIdFilter, unittest.TestCase): def setUp(self): self.prefix = b"71" super().setUp() def ensure_valid(self, content): obj_id = compute_hash(content) hex_obj_id = hashutil.hash_to_hex(obj_id) self.assertTrue(hex_obj_id.startswith(self.prefix)) return content def ensure_invalid(self, content): obj_id = compute_hash(content) hex_obj_id = hashutil.hash_to_hex(obj_id) self.assertFalse(hex_obj_id.startswith(self.prefix)) return content def filter_storage(self, sconf): return get_objstorage( "filtered", storage_conf=sconf, filters_conf=[id_prefix(self.prefix)], ) class TestRegexFilter(MixinTestIdFilter, unittest.TestCase): def setUp(self): self.regex = r"[a-f][0-9].*" super().setUp() def filter_storage(self, sconf): return get_objstorage( "filtered", storage_conf=sconf, filters_conf=[id_regex(self.regex)] ) diff --git a/swh/objstorage/tests/test_objstorage_multiplexer.py b/swh/objstorage/tests/test_objstorage_multiplexer.py index bfda77b..bd3164b 100644 --- a/swh/objstorage/tests/test_objstorage_multiplexer.py +++ b/swh/objstorage/tests/test_objstorage_multiplexer.py @@ -1,68 +1,61 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import shutil import tempfile import unittest from swh.objstorage.backends.pathslicing import PathSlicingObjStorage from swh.objstorage.multiplexer import MultiplexerObjStorage from swh.objstorage.multiplexer.filter import add_filter, read_only from .objstorage_testing import ObjStorageTestFixture class TestMultiplexerObjStorage(ObjStorageTestFixture, unittest.TestCase): def setUp(self): super().setUp() self.tmpdir = tempfile.mkdtemp() os.mkdir(os.path.join(self.tmpdir, "root1")) os.mkdir(os.path.join(self.tmpdir, "root2")) self.storage_v1 = PathSlicingObjStorage( os.path.join(self.tmpdir, "root1"), "0:2/2:4" ) self.storage_v2 = PathSlicingObjStorage( os.path.join(self.tmpdir, "root2"), "0:1/0:5" ) self.r_storage = add_filter(self.storage_v1, read_only()) self.w_storage = self.storage_v2 self.storage = MultiplexerObjStorage([self.r_storage, self.w_storage]) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def test_contains(self): content_p, obj_id_p = self.hash_content(b"contains_present") content_m, obj_id_m = self.hash_content(b"contains_missing") self.storage.add(content_p, obj_id=obj_id_p) self.assertIn(obj_id_p, self.storage) self.assertNotIn(obj_id_m, self.storage) def test_delete_missing(self): self.storage_v1.allow_delete = True self.storage_v2.allow_delete = True super().test_delete_missing() def test_delete_present(self): self.storage_v1.allow_delete = True self.storage_v2.allow_delete = True super().test_delete_present() - def test_get_random_contents(self): - content, obj_id = self.hash_content(b"get_random_content") - self.storage.add(content, obj_id=obj_id) - random_contents = list(self.storage.get_random(1)) - self.assertEqual(1, len(random_contents)) - self.assertIn(obj_id, random_contents) - def test_access_readonly(self): # Add a content to the readonly storage content, obj_id = self.hash_content(b"content in read-only") self.storage_v1.add(content, obj_id=obj_id) # Try to retrieve it on the main storage self.assertIn(obj_id, self.storage) diff --git a/swh/objstorage/tests/test_objstorage_pathslicing.py b/swh/objstorage/tests/test_objstorage_pathslicing.py index 41a77cc..23a9735 100644 --- a/swh/objstorage/tests/test_objstorage_pathslicing.py +++ b/swh/objstorage/tests/test_objstorage_pathslicing.py @@ -1,161 +1,154 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import shutil import tempfile import unittest from unittest.mock import DEFAULT, patch from swh.model import hashutil from swh.objstorage import exc from swh.objstorage.constants import ID_DIGEST_LENGTH from swh.objstorage.factory import get_objstorage from .objstorage_testing import ObjStorageTestFixture class TestPathSlicingObjStorage(ObjStorageTestFixture, unittest.TestCase): compression = "none" def setUp(self): super().setUp() self.slicing = "0:2/2:4/4:6" self.tmpdir = tempfile.mkdtemp() self.storage = get_objstorage( "pathslicing", root=self.tmpdir, slicing=self.slicing, compression=self.compression, ) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def content_path(self, obj_id): hex_obj_id = hashutil.hash_to_hex(obj_id) return self.storage.slicer.get_path(hex_obj_id) def test_iter(self): content, obj_id = self.hash_content(b"iter") self.assertEqual(list(iter(self.storage)), []) self.storage.add(content, obj_id=obj_id) self.assertEqual(list(iter(self.storage)), [obj_id]) def test_len(self): content, obj_id = self.hash_content(b"len") self.assertEqual(len(self.storage), 0) self.storage.add(content, obj_id=obj_id) self.assertEqual(len(self.storage), 1) def test_check_ok(self): content, obj_id = self.hash_content(b"check_ok") self.storage.add(content, obj_id=obj_id) assert self.storage.check(obj_id) is None assert self.storage.check(obj_id.hex()) is None def test_check_id_mismatch(self): content, obj_id = self.hash_content(b"check_id_mismatch") self.storage.add(b"unexpected content", obj_id=obj_id) with self.assertRaises(exc.Error) as error: self.storage.check(obj_id) self.assertEqual( ( "Corrupt object %s should have id " "12ebb2d6c81395bcc5cab965bdff640110cb67ff" % obj_id.hex(), ), error.exception.args, ) - def test_get_random_contents(self): - content, obj_id = self.hash_content(b"get_random_content") - self.storage.add(content, obj_id=obj_id) - random_contents = list(self.storage.get_random(1)) - self.assertEqual(1, len(random_contents)) - self.assertIn(obj_id, random_contents) - def test_iterate_from(self): all_ids = [] for i in range(100): content, obj_id = self.hash_content(b"content %d" % i) self.storage.add(content, obj_id=obj_id) all_ids.append(obj_id) all_ids.sort() ids = list(self.storage.iter_from(b"\x00" * ID_DIGEST_LENGTH)) self.assertEqual(len(ids), len(all_ids)) self.assertEqual(ids, all_ids) ids = list(self.storage.iter_from(all_ids[0])) self.assertEqual(len(ids), len(all_ids) - 1) self.assertEqual(ids, all_ids[1:]) ids = list(self.storage.iter_from(all_ids[-1], n_leaf=True)) n_leaf = ids[-1] ids = ids[:-1] self.assertEqual(n_leaf, 1) self.assertEqual(len(ids), 0) ids = list(self.storage.iter_from(all_ids[-2], n_leaf=True)) n_leaf = ids[-1] ids = ids[:-1] self.assertEqual(n_leaf, 2) # beware, this depends on the hash algo self.assertEqual(len(ids), 1) self.assertEqual(ids, all_ids[-1:]) def test_fdatasync_default(self): content, obj_id = self.hash_content(b"check_fdatasync") with patch.multiple("os", fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) if self.storage.use_fdatasync: assert patched["fdatasync"].call_count == 1 assert patched["fsync"].call_count == 0 else: assert patched["fdatasync"].call_count == 0 assert patched["fsync"].call_count == 1 def test_fdatasync_forced_on(self): self.storage.use_fdatasync = True content, obj_id = self.hash_content(b"check_fdatasync") with patch.multiple("os", fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) assert patched["fdatasync"].call_count == 1 assert patched["fsync"].call_count == 0 def test_fdatasync_forced_off(self): self.storage.use_fdatasync = False content, obj_id = self.hash_content(b"check_fdatasync") with patch.multiple("os", fsync=DEFAULT, fdatasync=DEFAULT) as patched: self.storage.add(content, obj_id=obj_id) assert patched["fdatasync"].call_count == 0 assert patched["fsync"].call_count == 1 def test_check_not_compressed(self): content, obj_id = self.hash_content(b"check_not_compressed") self.storage.add(content, obj_id=obj_id) with open(self.content_path(obj_id), "ab") as f: # Add garbage. f.write(b"garbage") with self.assertRaises(exc.Error) as error: self.storage.check(obj_id) if self.compression == "none": self.assertIn("Corrupt object", error.exception.args[0]) else: self.assertIn("trailing data found", error.exception.args[0]) class TestPathSlicingObjStorageGzip(TestPathSlicingObjStorage): compression = "gzip" class TestPathSlicingObjStorageZlib(TestPathSlicingObjStorage): compression = "zlib" class TestPathSlicingObjStorageBz2(TestPathSlicingObjStorage): compression = "bz2" class TestPathSlicingObjStorageLzma(TestPathSlicingObjStorage): compression = "lzma"