diff --git a/swh/objstorage/backends/libcloud.py b/swh/objstorage/backends/libcloud.py index 34bf7f1..1a487d9 100644 --- a/swh/objstorage/backends/libcloud.py +++ b/swh/objstorage/backends/libcloud.py @@ -1,214 +1,219 @@ # Copyright (C) 2016-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import collections from urllib.parse import urlencode from swh.model import hashutil from swh.objstorage.objstorage import ObjStorage, compute_hash from swh.objstorage.objstorage import compressors, decompressors from swh.objstorage.exc import ObjNotFoundError, Error from libcloud.storage import providers import libcloud.storage.drivers.s3 from libcloud.storage.types import Provider, ObjectDoesNotExistError def patch_libcloud_s3_urlencode(): """Patches libcloud's S3 backend to properly sign queries. Recent versions of libcloud are not affected (they use signature V4), but 1.5.0 (the one in Debian 9) is.""" def s3_urlencode(params): """Like urllib.parse.urlencode, but sorts the parameters first. This is required to properly compute the request signature, see https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#ConstructingTheCanonicalizedResourceElement """ # noqa return urlencode(collections.OrderedDict(sorted(params.items()))) libcloud.storage.drivers.s3.urlencode = s3_urlencode patch_libcloud_s3_urlencode() class CloudObjStorage(ObjStorage, metaclass=abc.ABCMeta): """Abstract ObjStorage that connect to a cloud using Libcloud Implementations of this class must redefine the _get_provider method to make it return a driver provider (i.e. object that supports `get_driver` method) which return a LibCloud driver (see https://libcloud.readthedocs.io/en/latest/storage/api.html). """ def __init__(self, container_name, compression=None, **kwargs): super().__init__(**kwargs) self.driver = self._get_driver(**kwargs) self.container_name = container_name self.container = self.driver.get_container( container_name=container_name) self.compression = compression def _get_driver(self, **kwargs): """Initialize a driver to communicate with the cloud Kwargs: arguments passed to the StorageDriver class, typically key: key to connect to the API. secret: secret key for authentication. secure: (bool) support HTTPS host: (str) port: (int) api_version: (str) region: (str) Returns: a Libcloud driver to a cloud storage. """ # Get the driver class from its description. cls = providers.get_driver(self._get_provider()) # Initialize the driver. return cls(**kwargs) @abc.abstractmethod def _get_provider(self): """Get a libcloud driver provider This method must be overridden by subclasses to specify which of the native libcloud driver the current storage should connect to. Alternatively, provider for a custom driver may be returned, in which case the provider will have to support `get_driver` method. """ raise NotImplementedError('%s must implement `get_provider` method' % type(self)) def check_config(self, *, check_write): """Check the configuration for this object storage""" # FIXME: hopefully this blew up during instantiation return True def __contains__(self, obj_id): try: self._get_object(obj_id) except ObjNotFoundError: return False else: return True def __iter__(self): """ Iterate over the objects present in the storage Warning: Iteration over the contents of a cloud-based object storage may have bad efficiency: due to the very high amount of objects in it and the fact that it is remote, get all the contents of the current object storage may result in a lot of network requests. You almost certainly don't want to use this method in production. """ yield from (hashutil.bytehex_to_hash(obj.name.encode()) for obj in self.driver.iterate_container_objects(self.container)) def __len__(self): """Compute the number of objects in the current object storage. Warning: this currently uses `__iter__`, its warning about bad performance applies. Returns: number of objects contained in the storage. """ return sum(1 for i in self) def add(self, content, obj_id=None, check_presence=True): if obj_id is None: # Checksum is missing, compute it on the fly. obj_id = compute_hash(content) if check_presence and obj_id in self: return obj_id self._put_object(content, obj_id) return obj_id def restore(self, content, obj_id=None): return self.add(content, obj_id, check_presence=False) def get(self, obj_id): obj = b''.join(self._get_object(obj_id).as_stream()) - return decompressors[self.compression](obj) + d = decompressors[self.compression]() + ret = d.decompress(obj) + if d.unused_data: + hex_obj_id = hashutil.hash_to_hex(obj_id) + raise Error('Corrupt object %s: trailing data found' % hex_obj_id) + return ret def check(self, obj_id): # Check that the file exists, as _get_object raises ObjNotFoundError self._get_object(obj_id) # Check the content integrity obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission obj = self._get_object(obj_id) return self.driver.delete_object(obj) def _get_object(self, obj_id): """Get a Libcloud wrapper for an object pointer. This wrapper does not retrieve the content of the object directly. """ hex_obj_id = hashutil.hash_to_hex(obj_id) try: return self.driver.get_object(self.container_name, hex_obj_id) except ObjectDoesNotExistError: raise ObjNotFoundError(obj_id) def _compressor(self, data): comp = compressors[self.compression]() for chunk in data: cchunk = comp.compress(chunk) if cchunk: yield cchunk trail = comp.flush() if trail: yield trail def _put_object(self, content, obj_id): """Create an object in the cloud storage. Created object will contain the content and be referenced by the given id. """ hex_obj_id = hashutil.hash_to_hex(obj_id) if not isinstance(content, collections.Iterator): content = (content,) self.driver.upload_object_via_stream( self._compressor(content), self.container, hex_obj_id) class AwsCloudObjStorage(CloudObjStorage): """ Amazon's S3 Cloud-based object storage """ def _get_provider(self): return Provider.S3 class OpenStackCloudObjStorage(CloudObjStorage): """ OpenStack Swift Cloud based object storage """ def _get_provider(self): return Provider.OPENSTACK_SWIFT diff --git a/swh/objstorage/backends/seaweed.py b/swh/objstorage/backends/seaweed.py index 52652e6..f5243fc 100644 --- a/swh/objstorage/backends/seaweed.py +++ b/swh/objstorage/backends/seaweed.py @@ -1,202 +1,208 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import io import logging from urllib.parse import urljoin, urlparse import requests from swh.model import hashutil from swh.objstorage.objstorage import ObjStorage, compute_hash from swh.objstorage.objstorage import compressors, decompressors from swh.objstorage.objstorage import DEFAULT_LIMIT from swh.objstorage.exc import ObjNotFoundError, Error LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.ERROR) class WeedFiler(object): """Simple class that encapsulates access to a seaweedfs filer service. TODO: handle errors """ def __init__(self, url): self.url = url def get(self, remote_path): url = urljoin(self.url, remote_path) LOGGER.debug('Get file %s', url) return requests.get(url).content def exists(self, remote_path): url = urljoin(self.url, remote_path) LOGGER.debug('Check file %s', url) return requests.head(url).status_code == 200 def put(self, fp, remote_path): url = urljoin(self.url, remote_path) LOGGER.debug('Put file %s', url) return requests.post(url, files={'file': fp}) def delete(self, remote_path): url = urljoin(self.url, remote_path) LOGGER.debug('Delete file %s', url) return requests.delete(url) def list(self, dir, last_file_name=None, limit=DEFAULT_LIMIT): '''list sub folders and files of @dir. show a better look if you turn on returns a dict of "sub-folders and files" ''' d = dir if dir.endswith('/') else (dir + '/') url = urljoin(self.url, d) headers = {'Accept': 'application/json'} params = {'limit': limit} if last_file_name: params['lastFileName'] = last_file_name LOGGER.debug('List directory %s', url) rsp = requests.get(url, params=params, headers=headers) if rsp.ok: return rsp.json() else: LOGGER.error('Error listing "%s". [HTTP %d]' % ( url, rsp.status_code)) class WeedObjStorage(ObjStorage): """ObjStorage with seaweedfs abilities, using the Filer API. https://github.com/chrislusf/seaweedfs/wiki/Filer-Server-API """ def __init__(self, url='http://127.0.0.1:8888/swh', compression=None, **kwargs): super().__init__(**kwargs) self.wf = WeedFiler(url) self.root_path = urlparse(url).path self.compression = compression def check_config(self, *, check_write): """Check the configuration for this object storage""" # FIXME: hopefully this blew up during instantiation return True def __contains__(self, obj_id): return self.wf.exists(self._path(obj_id)) def __iter__(self): """ Iterate over the objects present in the storage Warning: Iteration over the contents of a cloud-based object storage may have bad efficiency: due to the very high amount of objects in it and the fact that it is remote, get all the contents of the current object storage may result in a lot of network requests. You almost certainly don't want to use this method in production. """ obj_id = last_obj_id = None while True: for obj_id in self.list_content(last_obj_id=last_obj_id): yield obj_id if last_obj_id == obj_id: break last_obj_id = obj_id def __len__(self): """Compute the number of objects in the current object storage. Warning: this currently uses `__iter__`, its warning about bad performance applies. Returns: number of objects contained in the storage. """ return sum(1 for i in self) def add(self, content, obj_id=None, check_presence=True): if obj_id is None: # Checksum is missing, compute it on the fly. obj_id = compute_hash(content) if check_presence and obj_id in self: return obj_id def compressor(data): comp = compressors[self.compression]() for chunk in data: yield comp.compress(chunk) yield comp.flush() if isinstance(content, bytes): content = [content] # XXX should handle streaming correctly... self.wf.put(io.BytesIO(b''.join(compressor(content))), self._path(obj_id)) return obj_id def restore(self, content, obj_id=None): return self.add(content, obj_id, check_presence=False) def get(self, obj_id): try: obj = self.wf.get(self._path(obj_id)) - return decompressors[self.compression](obj) except Exception: raise ObjNotFoundError(obj_id) + d = decompressors[self.compression]() + ret = d.decompress(obj) + if d.unused_data: + hex_obj_id = hashutil.hash_to_hex(obj_id) + raise Error('Corrupt object %s: trailing data found' % hex_obj_id) + return ret + def check(self, obj_id): # Check the content integrity obj_content = self.get(obj_id) content_obj_id = compute_hash(obj_content) if content_obj_id != obj_id: raise Error(obj_id) def delete(self, obj_id): super().delete(obj_id) # Check delete permission if obj_id not in self: raise ObjNotFoundError(obj_id) self.wf.delete(self._path(obj_id)) return True def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): if last_obj_id: last_obj_id = hashutil.hash_to_hex(last_obj_id) resp = self.wf.list(self.root_path, last_obj_id, limit) if resp is not None: entries = resp['Entries'] if entries: for obj in entries: if obj is not None: bytehex = obj['FullPath'].rsplit('/', 1)[-1] yield hashutil.bytehex_to_hash(bytehex.encode()) # internal methods def _put_object(self, content, obj_id): """Create an object in the cloud storage. Created object will contain the content and be referenced by the given id. """ def compressor(data): comp = compressors[self.compression]() for chunk in data: yield comp.compress(chunk) yield comp.flush() if isinstance(content, bytes): content = [content] self.wf.put(io.BytesIO(b''.join(compressor(content))), self._path(obj_id)) def _path(self, obj_id): return hashutil.hash_to_hex(obj_id) diff --git a/swh/objstorage/objstorage.py b/swh/objstorage/objstorage.py index 88e7ef2..c11e33a 100644 --- a/swh/objstorage/objstorage.py +++ b/swh/objstorage/objstorage.py @@ -1,334 +1,342 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc from itertools import dropwhile, islice import bz2 -import gzip import lzma import zlib from swh.model import hashutil from .exc import ObjNotFoundError ID_HASH_ALGO = 'sha1' ID_HASH_LENGTH = 40 # Size in bytes of the hash hexadecimal representation. DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024 # Size in bytes of the streaming chunks DEFAULT_LIMIT = 10000 def compute_hash(content): """Compute the content's hash. Args: content (bytes): The raw content to hash hash_name (str): Hash's name (default to ID_HASH_ALGO) Returns: The ID_HASH_ALGO for the content """ return hashutil.MultiHash.from_data( content, hash_names=[ID_HASH_ALGO], ).digest().get(ID_HASH_ALGO) class NullCompressor: def compress(self, data): return data def flush(self): return b'' +class NullDecompressor: + def decompress(self, data): + return data + + @property + def unused_data(self): + return b'' + + decompressors = { - 'bz2': bz2.decompress, - 'lzma': lzma.decompress, - 'gzip': gzip.decompress, - 'zlib': zlib.decompress, - None: lambda x: x, - } + 'bz2': bz2.BZ2Decompressor, + 'lzma': lzma.LZMADecompressor, + 'gzip': lambda: zlib.decompressobj(wbits=31), + 'zlib': zlib.decompressobj, + None: NullDecompressor, +} compressors = { 'bz2': bz2.BZ2Compressor, 'lzma': lzma.LZMACompressor, 'gzip': lambda: zlib.compressobj(wbits=31), 'zlib': zlib.compressobj, None: NullCompressor, } class ObjStorage(metaclass=abc.ABCMeta): """ High-level API to manipulate the Software Heritage object storage. Conceptually, the object storage offers the following methods: - check_config() check if the object storage is properly configured - __contains__() check if an object is present, by object id - add() add a new object, returning an object id - restore() same as add() but erase an already existed content - get() retrieve the content of an object, by object id - check() check the integrity of an object, by object id - delete() remove an object And some management methods: - get_random() get random object id of existing contents (used for the content integrity checker). Some of the methods have available streaming equivalents: - add_stream() same as add() but with a chunked iterator - restore_stream() same as add_stream() but erase already existing content - get_stream() same as get() but returns a chunked iterator Each implementation of this interface can have a different behavior and its own way to store the contents. """ def __init__(self, *, allow_delete=False, **kwargs): # A more complete permission system could be used in place of that if # it becomes needed self.allow_delete = allow_delete @abc.abstractmethod def check_config(self, *, check_write): """Check whether the object storage is properly configured. Args: check_write (bool): if True, check if writes to the object storage can succeed. Returns: True if the configuration check worked, an exception if it didn't. """ pass @abc.abstractmethod def __contains__(self, obj_id, *args, **kwargs): """Indicate if the given object is present in the storage. Args: obj_id (bytes): object identifier. Returns: True if and only if the object is present in the current object storage. """ pass @abc.abstractmethod def add(self, content, obj_id=None, check_presence=True, *args, **kwargs): """Add a new object to the object storage. Args: content (bytes): object's raw content to add in storage. obj_id (bytes): checksum of [bytes] using [ID_HASH_ALGO] algorithm. When given, obj_id will be trusted to match the bytes. If missing, obj_id will be computed on the fly. check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ pass def add_batch(self, contents, check_presence=True): """Add a batch of new objects to the object storage. Args: contents (dict): mapping from obj_id to object contents Returns: the number of objects added to the storage """ ctr = 0 for obj_id, content in contents.items(): self.add(content, obj_id, check_presence=check_presence) ctr += 1 return ctr def restore(self, content, obj_id=None, *args, **kwargs): """Restore a content that have been corrupted. This function is identical to add but does not check if the object id is already in the file system. The default implementation provided by the current class is suitable for most cases. Args: content (bytes): object's raw content to add in storage obj_id (bytes): checksum of `bytes` as computed by ID_HASH_ALGO. When given, obj_id will be trusted to match bytes. If missing, obj_id will be computed on the fly. """ # check_presence to false will erase the potential previous content. return self.add(content, obj_id, check_presence=False) @abc.abstractmethod def get(self, obj_id, *args, **kwargs): """Retrieve the content of a given object. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ pass def get_batch(self, obj_ids, *args, **kwargs): """Retrieve objects' raw content in bulk from storage. Note: This function does have a default implementation in ObjStorage that is suitable for most cases. For object storages that needs to do the minimal number of requests possible (ex: remote object storages), that method can be overridden to perform a more efficient operation. Args: obj_ids ([bytes]: list of object ids. Returns: list of resulting contents, or None if the content could not be retrieved. Do not raise any exception as a fail for one content will not cancel the whole request. """ for obj_id in obj_ids: try: yield self.get(obj_id) except ObjNotFoundError: yield None @abc.abstractmethod def check(self, obj_id, *args, **kwargs): """Perform an integrity check for a given object. Verify that the file object is in place and that the gzipped content matches the object id. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. Error: if the request object is corrupted. """ pass @abc.abstractmethod def delete(self, obj_id, *args, **kwargs): """Delete an object. Args: obj_id (bytes): object identifier. Raises: ObjNotFoundError: if the requested object is missing. """ if not self.allow_delete: raise PermissionError("Delete is not allowed.") # Management methods def get_random(self, batch_size, *args, **kwargs): """Get random ids of existing contents. This method is used in order to get random ids to perform content integrity verifications on random contents. Args: batch_size (int): Number of ids that will be given Yields: An iterable of ids (bytes) of contents that are in the current object storage. """ pass # Streaming methods def add_stream(self, content_iter, obj_id, check_presence=True): """Add a new object to the object storage using streaming. This function is identical to add() except it takes a generator that yields the chunked content instead of the whole content at once. Args: content (bytes): chunked generator that yields the object's raw content to add in storage. obj_id (bytes): object identifier check_presence (bool): indicate if the presence of the content should be verified before adding the file. Returns: the id (bytes) of the object into the storage. """ raise NotImplementedError def restore_stream(self, content_iter, obj_id=None): """Restore a content that have been corrupted using streaming. This function is identical to restore() except it takes a generator that yields the chunked content instead of the whole content at once. The default implementation provided by the current class is suitable for most cases. Args: content (bytes): chunked generator that yields the object's raw content to add in storage. obj_id (bytes): object identifier """ # check_presence to false will erase the potential previous content. return self.add_stream(content_iter, obj_id, check_presence=False) def get_stream(self, obj_id, chunk_size=DEFAULT_CHUNK_SIZE): """Retrieve the content of a given object as a chunked iterator. Args: obj_id (bytes): object id. Returns: the content of the requested object as bytes. Raises: ObjNotFoundError: if the requested object is missing. """ raise NotImplementedError def list_content(self, last_obj_id=None, limit=DEFAULT_LIMIT): """Generates known object ids. Args: last_obj_id (bytes): object id from which to iterate from (excluded). limit (int): max number of object ids to generate. Generates: obj_id (bytes): object ids. """ it = iter(self) if last_obj_id: it = dropwhile(lambda x: x <= last_obj_id, it) return islice(it, limit) diff --git a/swh/objstorage/tests/test_objstorage_cloud.py b/swh/objstorage/tests/test_objstorage_cloud.py index 0fd8114..1461528 100644 --- a/swh/objstorage/tests/test_objstorage_cloud.py +++ b/swh/objstorage/tests/test_objstorage_cloud.py @@ -1,130 +1,132 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from libcloud.common.types import InvalidCredsError from libcloud.storage.types import (ContainerDoesNotExistError, ObjectDoesNotExistError) from typing import Optional from swh.model import hashutil from swh.objstorage.objstorage import decompressors from swh.objstorage.backends.libcloud import CloudObjStorage from .objstorage_testing import ObjStorageTestFixture API_KEY = 'API_KEY' API_SECRET_KEY = 'API SECRET KEY' CONTAINER_NAME = 'test_container' class MockLibcloudObject(): """ Libcloud object mock that replicates its API """ def __init__(self, name, content): self.name = name self.content = list(content) def as_stream(self): yield from iter(self.content) class MockLibcloudDriver(): """ Mock driver that replicates the used LibCloud API """ def __init__(self, api_key, api_secret_key): self.containers = {CONTAINER_NAME: {}} # Storage is initialized self.api_key = api_key self.api_secret_key = api_secret_key def _check_credentials(self): # Private method may be known as another name in Libcloud but is used # to replicate libcloud behavior (i.e. check credential at each # request) if self.api_key != API_KEY or self.api_secret_key != API_SECRET_KEY: raise InvalidCredsError() def get_container(self, container_name): try: return self.containers[container_name] except KeyError: raise ContainerDoesNotExistError(container_name=container_name, driver=self, value=None) def iterate_container_objects(self, container): self._check_credentials() yield from (v for k, v in sorted(container.items())) def get_object(self, container_name, obj_id): self._check_credentials() try: container = self.get_container(container_name) return container[obj_id] except KeyError: raise ObjectDoesNotExistError(object_name=obj_id, driver=self, value=None) def delete_object(self, obj): self._check_credentials() try: container = self.get_container(CONTAINER_NAME) container.pop(obj.name) return True except KeyError: raise ObjectDoesNotExistError(object_name=obj.name, driver=self, value=None) def upload_object_via_stream(self, content, container, obj_id): self._check_credentials() obj = MockLibcloudObject(obj_id, content) container[obj_id] = obj class MockCloudObjStorage(CloudObjStorage): """ Cloud object storage that uses a mocked driver """ def _get_driver(self, **kwargs): return MockLibcloudDriver(**kwargs) def _get_provider(self): # Implement this for the abc requirement, but behavior is defined in # _get_driver. pass class TestCloudObjStorage(ObjStorageTestFixture, unittest.TestCase): compression = None # type: Optional[str] def setUp(self): super().setUp() self.storage = MockCloudObjStorage( CONTAINER_NAME, api_key=API_KEY, api_secret_key=API_SECRET_KEY, compression=self.compression, ) def test_compression(self): content, obj_id = self.hash_content(b'add_get_w_id') self.storage.add(content, obj_id=obj_id) data = self.storage.driver.containers[CONTAINER_NAME] obj_id = hashutil.hash_to_hex(obj_id) raw_content = b''.join(data[obj_id].content) - assert decompressors[self.compression](raw_content) == content + d = decompressors[self.compression]() + assert d.decompress(raw_content) == content + assert d.unused_data == b'' class TestCloudObjStorageBz2(TestCloudObjStorage): compression = 'bz2' class TestCloudObjStorageGzip(TestCloudObjStorage): compression = 'gzip' class TestCloudObjStorageLzma(TestCloudObjStorage): compression = 'lzma' class TestCloudObjStorageZlib(TestCloudObjStorage): compression = 'zlib'