diff --git a/PKG-INFO b/PKG-INFO index 4d973b0..13e9516 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,65 +1,65 @@ Metadata-Version: 2.1 Name: swh.scheduler -Version: 0.0.49 +Version: 0.0.50 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN +Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Source, https://forge.softwareheritage.org/source/swh-scheduler -Project-URL: Funding, https://www.softwareheritage.org/donate Description: swh-scheduler ============= Job scheduler for the Software Heritage project. Task manager for asynchronous/delayed tasks, used for both recurrent (e.g., listing a forge, loading new stuff from a Git repository) and one-off activities (e.g., loading a specific version of a source package). # Tests ## Running test manually ### Test data To be able to run (unit) tests, you need to have the [[https://forge.softwareheritage.org/source/swh-storage-testdata.git|swh-storage-testdata]] in the parent directory. If you have set your environment following the [[ https://docs.softwareheritage.org/devel/getting-started.html#getting-started|Getting started]] document everything should be set up just fine. Otherwise: ``` ~/.../swh-scheduler$ git clone https://forge.softwareheritage.org/source/swh-storage-testdata.git ../swh-storage-testdata ``` ### Required services Unit tests that require a running celery broker uses an in memory broker/result backend by default, but you can choose to use a true broker by setting `CELERY_BROKER_URL` and `CELERY_RESULT_BACKEND` environment variables up. For example: ``` $ CELERY_BROKER_URL=amqp://localhost pifpaf run postgresql nosetests ..................................... ---------------------------------------------------------------------- Ran 37 tests in 15.578s OK ``` Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/requirements-swh.txt b/requirements-swh.txt index f403adf..22e2eb3 100644 --- a/requirements-swh.txt +++ b/requirements-swh.txt @@ -1 +1,2 @@ swh.core >= 0.0.51 +swh.storage >= 0.0.129 diff --git a/requirements.txt b/requirements.txt index 6964b75..5d3189d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,15 +1,16 @@ # Add here external Python modules dependencies, one per line. Module names # should match https://pypi.python.org/pypi names. For the full spec or # dependency lines, see https://pip.readthedocs.org/en/1.1/requirements.html arrow celery >= 4 Click elasticsearch > 5.4 flask kombu psycopg2 +pyyaml vcversioner # test dependencies # hypothesis diff --git a/swh.scheduler.egg-info/PKG-INFO b/swh.scheduler.egg-info/PKG-INFO index 4d973b0..13e9516 100644 --- a/swh.scheduler.egg-info/PKG-INFO +++ b/swh.scheduler.egg-info/PKG-INFO @@ -1,65 +1,65 @@ Metadata-Version: 2.1 Name: swh.scheduler -Version: 0.0.49 +Version: 0.0.50 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN +Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Source, https://forge.softwareheritage.org/source/swh-scheduler -Project-URL: Funding, https://www.softwareheritage.org/donate Description: swh-scheduler ============= Job scheduler for the Software Heritage project. Task manager for asynchronous/delayed tasks, used for both recurrent (e.g., listing a forge, loading new stuff from a Git repository) and one-off activities (e.g., loading a specific version of a source package). # Tests ## Running test manually ### Test data To be able to run (unit) tests, you need to have the [[https://forge.softwareheritage.org/source/swh-storage-testdata.git|swh-storage-testdata]] in the parent directory. If you have set your environment following the [[ https://docs.softwareheritage.org/devel/getting-started.html#getting-started|Getting started]] document everything should be set up just fine. Otherwise: ``` ~/.../swh-scheduler$ git clone https://forge.softwareheritage.org/source/swh-storage-testdata.git ../swh-storage-testdata ``` ### Required services Unit tests that require a running celery broker uses an in memory broker/result backend by default, but you can choose to use a true broker by setting `CELERY_BROKER_URL` and `CELERY_RESULT_BACKEND` environment variables up. For example: ``` $ CELERY_BROKER_URL=amqp://localhost pifpaf run postgresql nosetests ..................................... ---------------------------------------------------------------------- Ran 37 tests in 15.578s OK ``` Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/swh.scheduler.egg-info/SOURCES.txt b/swh.scheduler.egg-info/SOURCES.txt index 64c7d08..0e8ed6b 100644 --- a/swh.scheduler.egg-info/SOURCES.txt +++ b/swh.scheduler.egg-info/SOURCES.txt @@ -1,61 +1,60 @@ MANIFEST.in Makefile README.md requirements-swh.txt requirements.txt setup.py version.txt bin/swh-worker-control swh/__init__.py swh.scheduler.egg-info/PKG-INFO swh.scheduler.egg-info/SOURCES.txt swh.scheduler.egg-info/dependency_links.txt swh.scheduler.egg-info/entry_points.txt swh.scheduler.egg-info/requires.txt swh.scheduler.egg-info/top_level.txt swh/scheduler/__init__.py swh/scheduler/backend.py swh/scheduler/backend_es.py swh/scheduler/cli.py swh/scheduler/cli_utils.py swh/scheduler/task.py swh/scheduler/utils.py swh/scheduler/api/__init__.py swh/scheduler/api/client.py swh/scheduler/api/server.py swh/scheduler/api/wsgi.py swh/scheduler/celery_backend/__init__.py swh/scheduler/celery_backend/config.py swh/scheduler/celery_backend/listener.py swh/scheduler/celery_backend/runner.py swh/scheduler/sql/30-swh-schema.sql swh/scheduler/sql/40-swh-func.sql swh/scheduler/sql/50-swh-data.sql swh/scheduler/sql/60-swh-indexes.sql swh/scheduler/sql/updater/10-swh-init.sql swh/scheduler/sql/updater/30-swh-schema.sql swh/scheduler/sql/updater/40-swh-func.sql swh/scheduler/tests/__init__.py swh/scheduler/tests/conftest.py swh/scheduler/tests/tasks.py swh/scheduler/tests/test_api_client.py swh/scheduler/tests/test_celery_tasks.py swh/scheduler/tests/test_cli.py swh/scheduler/tests/test_scheduler.py swh/scheduler/tests/test_server.py swh/scheduler/tests/test_utils.py swh/scheduler/tests/updater/__init__.py swh/scheduler/tests/updater/conftest.py swh/scheduler/tests/updater/test_backend.py swh/scheduler/tests/updater/test_consumer.py swh/scheduler/tests/updater/test_events.py swh/scheduler/tests/updater/test_ghtorrent.py swh/scheduler/tests/updater/test_writer.py swh/scheduler/updater/__init__.py swh/scheduler/updater/backend.py swh/scheduler/updater/consumer.py swh/scheduler/updater/events.py swh/scheduler/updater/writer.py swh/scheduler/updater/ghtorrent/__init__.py -swh/scheduler/updater/ghtorrent/cli.py -swh/scheduler/updater/ghtorrent/fake.py \ No newline at end of file +swh/scheduler/updater/ghtorrent/cli.py \ No newline at end of file diff --git a/swh.scheduler.egg-info/requires.txt b/swh.scheduler.egg-info/requires.txt index a0ddd24..bc2f647 100644 --- a/swh.scheduler.egg-info/requires.txt +++ b/swh.scheduler.egg-info/requires.txt @@ -1,15 +1,17 @@ arrow celery>=4 Click elasticsearch>5.4 flask kombu psycopg2 +pyyaml vcversioner swh.core>=0.0.51 +swh.storage>=0.0.129 [testing] pytest<4 pytest-postgresql celery>=4 hypothesis>=3.11.0 diff --git a/swh/scheduler/celery_backend/config.py b/swh/scheduler/celery_backend/config.py index ef4ab37..7d3a3dd 100644 --- a/swh/scheduler/celery_backend/config.py +++ b/swh/scheduler/celery_backend/config.py @@ -1,262 +1,271 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging import os import urllib.parse from celery import Celery from celery.signals import setup_logging, celeryd_after_setup from celery.utils.log import ColorFormatter from celery.worker.control import Panel from kombu import Exchange, Queue from kombu.five import monotonic as _monotonic import requests from swh.scheduler import CONFIG as SWH_CONFIG from swh.core.config import load_named_config, merge_configs from swh.core.logger import JournalHandler DEFAULT_CONFIG_NAME = 'worker' CONFIG_NAME_ENVVAR = 'SWH_WORKER_INSTANCE' CONFIG_NAME_TEMPLATE = 'worker/%s' DEFAULT_CONFIG = { 'task_broker': ('str', 'amqp://guest@localhost//'), 'task_modules': ('list[str]', []), 'task_queues': ('list[str]', []), 'task_soft_time_limit': ('int', 0), } logger = logging.getLogger(__name__) @setup_logging.connect -def setup_log_handler(loglevel=None, logfile=None, format=None, - colorize=None, log_console=True, **kwargs): +def setup_log_handler(loglevel=None, logfile=None, format=None, colorize=None, + log_console=None, log_journal=None, **kwargs): """Setup logging according to Software Heritage preferences. We use the command-line loglevel for tasks only, as we never really care about the debug messages from celery. """ if loglevel is None: loglevel = logging.DEBUG if isinstance(loglevel, str): loglevel = logging._nameToLevel[loglevel] formatter = logging.Formatter(format) root_logger = logging.getLogger('') root_logger.setLevel(logging.INFO) - if loglevel <= logging.DEBUG: + log_target = os.environ.get('SWH_LOG_TARGET', 'console') + if log_target == 'console': log_console = True + elif log_target == 'journal': + log_journal = True + + # this looks for log levels *higher* than DEBUG + if loglevel <= logging.DEBUG and log_console is None: + log_console = True + if log_console: color_formatter = ColorFormatter(format) if colorize else formatter console = logging.StreamHandler() console.setLevel(logging.DEBUG) console.setFormatter(color_formatter) root_logger.addHandler(console) - systemd_journal = JournalHandler() - systemd_journal.setLevel(logging.DEBUG) - systemd_journal.setFormatter(formatter) - root_logger.addHandler(systemd_journal) + if log_journal: + systemd_journal = JournalHandler() + systemd_journal.setLevel(logging.DEBUG) + systemd_journal.setFormatter(formatter) + root_logger.addHandler(systemd_journal) logging.getLogger('celery').setLevel(logging.INFO) # Silence amqp heartbeat_tick messages logger = logging.getLogger('amqp') logger.addFilter(lambda record: not record.msg.startswith( 'heartbeat_tick')) logger.setLevel(loglevel) # Silence useless "Starting new HTTP connection" messages logging.getLogger('urllib3').setLevel(logging.WARNING) logging.getLogger('swh').setLevel(loglevel) # get_task_logger makes the swh tasks loggers children of celery.task logging.getLogger('celery.task').setLevel(loglevel) return loglevel @celeryd_after_setup.connect def setup_queues_and_tasks(sender, instance, **kwargs): """Signal called on worker start. This automatically registers swh.scheduler.task.Task subclasses as available celery tasks. This also subscribes the worker to the "implicit" per-task queues defined for these task classes. """ logger.info('Setup Queues & Tasks for %s', sender) instance.app.conf['worker_name'] = sender @Panel.register def monotonic(state): """Get the current value for the monotonic clock""" return {'monotonic': _monotonic()} def route_for_task(name, args, kwargs, options, task=None, **kw): """Route tasks according to the task_queue attribute in the task class""" if name is not None and name.startswith('swh.'): return {'queue': name} def get_queue_stats(app, queue_name): """Get the statistics regarding a queue on the broker. Arguments: queue_name: name of the queue to check Returns a dictionary raw from the RabbitMQ management API; or `None` if the current configuration does not use RabbitMQ. Interesting keys: - Consumers (number of consumers for the queue) - messages (number of messages in queue) - messages_unacknowledged (number of messages currently being processed) Documentation: https://www.rabbitmq.com/management.html#http-api """ conn_info = app.connection().info() if conn_info['transport'] == 'memory': # We're running in a test environment, without RabbitMQ. return None url = 'http://{hostname}:{port}/api/queues/{vhost}/{queue}'.format( hostname=conn_info['hostname'], port=conn_info['port'] + 10000, vhost=urllib.parse.quote(conn_info['virtual_host'], safe=''), queue=urllib.parse.quote(queue_name, safe=''), ) credentials = (conn_info['userid'], conn_info['password']) r = requests.get(url, auth=credentials) if r.status_code == 404: return {} if r.status_code != 200: raise ValueError('Got error %s when reading queue stats: %s' % ( r.status_code, r.json())) return r.json() def get_queue_length(app, queue_name): """Shortcut to get a queue's length""" stats = get_queue_stats(app, queue_name) if stats: return stats.get('messages') def register_task_class(app, name, cls): """Register a class-based task under the given name""" if name in app.tasks: return task_instance = cls() task_instance.name = name app.register_task(task_instance) INSTANCE_NAME = os.environ.get(CONFIG_NAME_ENVVAR) CONFIG_NAME = os.environ.get('SWH_CONFIG_FILENAME') CONFIG = {} if CONFIG_NAME: # load the celery config from the main config file given as # SWH_CONFIG_FILENAME environment variable. # This is expected to have a [celery] section in which we have the # celery specific configuration. SWH_CONFIG.clear() SWH_CONFIG.update(load_named_config(CONFIG_NAME)) CONFIG = SWH_CONFIG.get('celery') if not CONFIG: # otherwise, back to compat config loading mechanism if INSTANCE_NAME: CONFIG_NAME = CONFIG_NAME_TEMPLATE % INSTANCE_NAME else: CONFIG_NAME = DEFAULT_CONFIG_NAME # Load the Celery config CONFIG = load_named_config(CONFIG_NAME, DEFAULT_CONFIG) # Celery Queues CELERY_QUEUES = [Queue('celery', Exchange('celery'), routing_key='celery')] CELERY_DEFAULT_CONFIG = dict( # Timezone configuration: all in UTC enable_utc=True, timezone='UTC', # Imported modules imports=CONFIG.get('task_modules', []), # Time (in seconds, or a timedelta object) for when after stored task # tombstones will be deleted. None means to never expire results. result_expires=None, # A string identifying the default serialization method to use. Can # be json (default), pickle, yaml, msgpack, or any custom # serialization methods that have been registered with task_serializer='msgpack', # Result serialization format result_serializer='msgpack', # Late ack means the task messages will be acknowledged after the task has # been executed, not just before, which is the default behavior. task_acks_late=True, # A string identifying the default serialization method to use. # Can be pickle (default), json, yaml, msgpack or any custom serialization # methods that have been registered with kombu.serialization.registry accept_content=['msgpack', 'json'], # If True the task will report its status as “started” # when the task is executed by a worker. task_track_started=True, # Default compression used for task messages. Can be gzip, bzip2 # (if available), or any custom compression schemes registered # in the Kombu compression registry. # result_compression='bzip2', # task_compression='bzip2', # Disable all rate limits, even if tasks has explicit rate limits set. # (Disabling rate limits altogether is recommended if you don’t have any # tasks using them.) worker_disable_rate_limits=True, # Task routing task_routes=route_for_task, # Allow pool restarts from remote worker_pool_restarts=True, # Do not prefetch tasks worker_prefetch_multiplier=1, # Send events worker_send_task_events=True, # Do not send useless task_sent events task_send_sent_event=False, ) def build_app(config=None): config = merge_configs( {k: v for (k, (_, v)) in DEFAULT_CONFIG.items()}, config or {}) config['task_queues'] = CELERY_QUEUES + [ Queue(queue, Exchange(queue), routing_key=queue) for queue in config.get('task_queues', ())] logger.debug('Creating a Celery app with %s', config) # Instantiate the Celery app app = Celery(broker=config['task_broker'], task_cls='swh.scheduler.task:SWHTask') app.add_defaults(CELERY_DEFAULT_CONFIG) app.add_defaults(config) return app app = build_app(CONFIG) # XXX for BW compat Celery.get_queue_length = get_queue_length diff --git a/swh/scheduler/celery_backend/listener.py b/swh/scheduler/celery_backend/listener.py index 038bee9..b137a82 100644 --- a/swh/scheduler/celery_backend/listener.py +++ b/swh/scheduler/celery_backend/listener.py @@ -1,202 +1,198 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import logging import time import sys import click from arrow import utcnow from kombu import Queue import celery from celery.events import EventReceiver class ReliableEventReceiver(EventReceiver): def __init__(self, channel, handlers=None, routing_key='#', node_id=None, app=None, queue_prefix='celeryev', accept=None): super(ReliableEventReceiver, self).__init__( channel, handlers, routing_key, node_id, app, queue_prefix, accept) self.queue = Queue('.'.join([self.queue_prefix, self.node_id]), exchange=self.exchange, routing_key=self.routing_key, auto_delete=False, durable=True) def get_consumers(self, consumer, channel): return [consumer(queues=[self.queue], callbacks=[self._receive], no_ack=False, accept=self.accept)] def _receive(self, bodies, message): if not isinstance(bodies, list): # celery<4 returned body as element bodies = [bodies] for body in bodies: type, body = self.event_from_message(body) self.process(type, body, message) def process(self, type, event, message): """Process the received event by dispatching it to the appropriate handler.""" handler = self.handlers.get(type) or self.handlers.get('*') handler and handler(event, message) ACTION_SEND_DELAY = datetime.timedelta(seconds=1.0) ACTION_QUEUE_MAX_LENGTH = 1000 def event_monitor(app, backend): logger = logging.getLogger('swh.scheduler.listener') actions = { 'last_send': utcnow() - 2*ACTION_SEND_DELAY, 'queue': [], } def try_perform_actions(actions=actions): logger.debug('Try perform pending actions') if actions['queue'] and ( len(actions['queue']) > ACTION_QUEUE_MAX_LENGTH or utcnow() - actions['last_send'] > ACTION_SEND_DELAY): perform_actions(actions) def perform_actions(actions, backend=backend): logger.info('Perform %s pending actions' % len(actions['queue'])) action_map = { 'start_task_run': backend.start_task_run, 'end_task_run': backend.end_task_run, } messages = [] db = backend.get_db() cursor = db.cursor(None) for action in actions['queue']: messages.append(action['message']) function = action_map[action['action']] args = action.get('args', ()) kwargs = action.get('kwargs', {}) kwargs['cur'] = cursor function(*args, **kwargs) db.conn.commit() for message in messages: if not message.acknowledged: message.ack() - else: - logger.info('message already acknowledged: %s', message) actions['queue'] = [] actions['last_send'] = utcnow() def queue_action(action, actions=actions): actions['queue'].append(action) try_perform_actions() def catchall_event(event, message): logger.debug('event: %s %s', event['type'], event.get('name', 'N/A')) if not message.acknowledged: message.ack() - else: - logger.info('message already acknowledged: %s', message) try_perform_actions() def task_started(event, message): logger.debug('task_started: %s %s', event['type'], event.get('name', 'N/A')) queue_action({ 'action': 'start_task_run', 'args': [event['uuid']], 'kwargs': { 'timestamp': utcnow(), 'metadata': { 'worker': event['hostname'], }, }, 'message': message, }) def task_succeeded(event, message): logger.debug('task_succeeded: event: %s' % event) logger.debug(' message: %s' % message) result = event['result'] logger.debug('task_succeeded: result: %s' % result) try: status = result.get('status') if status == 'success': status = 'eventful' if result.get('eventful') else 'uneventful' except Exception: status = 'eventful' if result else 'uneventful' queue_action({ 'action': 'end_task_run', 'args': [event['uuid']], 'kwargs': { 'timestamp': utcnow(), 'status': status, 'result': result, }, 'message': message, }) def task_failed(event, message): logger.debug('task_failed: event: %s' % event) logger.debug(' message: %s' % message) queue_action({ 'action': 'end_task_run', 'args': [event['uuid']], 'kwargs': { 'timestamp': utcnow(), 'status': 'failed', }, 'message': message, }) recv = ReliableEventReceiver( celery.current_app.connection(), app=celery.current_app, handlers={ 'task-started': task_started, 'task-result': task_succeeded, 'task-failed': task_failed, '*': catchall_event, }, node_id='listener', ) errors = 0 while True: try: recv.capture(limit=None, timeout=None, wakeup=True) errors = 0 except KeyboardInterrupt: logger.exception('Keyboard interrupt, exiting') break except Exception: logger.exception('Unexpected exception') if errors < 5: time.sleep(errors) errors += 1 else: logger.error('Too many consecutive errors, exiting') sys.exit(1) @click.command() @click.pass_context def main(ctx): click.echo("Deprecated! Use 'swh-scheduler listener' instead.", err=True) ctx.exit(1) if __name__ == '__main__': main() diff --git a/swh/scheduler/cli.py b/swh/scheduler/cli.py index 5428f98..64ff8e3 100644 --- a/swh/scheduler/cli.py +++ b/swh/scheduler/cli.py @@ -1,749 +1,808 @@ # Copyright (C) 2016-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import arrow import click import csv import itertools import json import locale import logging import time import datetime from swh.core import utils, config +from swh.storage import get_storage +from swh.storage.algos.origin import iter_origins + from . import compute_nb_tasks_from from .backend_es import SWHElasticSearchClient from . import get_scheduler, DEFAULT_CONFIG -from .cli_utils import parse_options +from .cli_utils import parse_options, schedule_origin_batches locale.setlocale(locale.LC_ALL, '') ARROW_LOCALE = locale.getlocale(locale.LC_TIME)[0] class DateTimeType(click.ParamType): name = 'time and date' def convert(self, value, param, ctx): if not isinstance(value, arrow.Arrow): value = arrow.get(value) return value DATETIME = DateTimeType() CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) def format_dict(d): ret = {} for k, v in d.items(): if isinstance(v, (arrow.Arrow, datetime.date, datetime.datetime)): v = arrow.get(v).format() elif isinstance(v, dict): v = format_dict(v) ret[k] = v return ret def pretty_print_list(list, indent=0): """Pretty-print a list""" - return ''.join('%s%s\n' % (' ' * indent, item) for item in list) + return ''.join('%s%r\n' % (' ' * indent, item) for item in list) def pretty_print_dict(dict, indent=0): """Pretty-print a list""" - return ''.join('%s%s: %s\n' % + return ''.join('%s%s: %r\n' % (' ' * indent, click.style(key, bold=True), value) - for key, value in dict.items()) + for key, value in sorted(dict.items())) def pretty_print_run(run, indent=4): fmt = ('{indent}{backend_id} [{status}]\n' '{indent} scheduled: {scheduled} [{started}:{ended}]') return fmt.format(indent=' '*indent, **format_dict(run)) def pretty_print_task(task, full=False): """Pretty-print a task If 'full' is True, also print the status and priority fields. >>> task = { ... 'id': 1234, ... 'arguments': { - ... 'args': ['foo', 'bar'], - ... 'kwargs': {'key': 'value'}, + ... 'args': ['foo', 'bar', True], + ... 'kwargs': {'key': 'value', 'key2': 42}, ... }, ... 'current_interval': datetime.timedelta(hours=1), ... 'next_run': datetime.datetime(2019, 2, 21, 13, 52, 35, 407818), ... 'policy': 'oneshot', ... 'priority': None, ... 'status': 'next_run_not_scheduled', ... 'type': 'test_task', ... } >>> print(click.unstyle(pretty_print_task(task))) Task 1234 Next run: ... (2019-02-21 13:52:35+00:00) Interval: 1:00:00 Type: test_task Policy: oneshot Args: - foo - bar + 'foo' + 'bar' + True Keyword args: - key: value + key: 'value' + key2: 42 >>> print(click.unstyle(pretty_print_task(task, full=True))) Task 1234 Next run: ... (2019-02-21 13:52:35+00:00) Interval: 1:00:00 Type: test_task Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: - foo - bar + 'foo' + 'bar' + True Keyword args: - key: value + key: 'value' + key2: 42 """ next_run = arrow.get(task['next_run']) lines = [ '%s %s\n' % (click.style('Task', bold=True), task['id']), click.style(' Next run: ', bold=True), "%s (%s)" % (next_run.humanize(locale=ARROW_LOCALE), next_run.format()), '\n', click.style(' Interval: ', bold=True), str(task['current_interval']), '\n', click.style(' Type: ', bold=True), task['type'] or '', '\n', click.style(' Policy: ', bold=True), task['policy'] or '', '\n', ] if full: lines += [ click.style(' Status: ', bold=True), task['status'] or '', '\n', click.style(' Priority: ', bold=True), task['priority'] or '', '\n', ] lines += [ click.style(' Args:\n', bold=True), pretty_print_list(task['arguments']['args'], indent=4), click.style(' Keyword args:\n', bold=True), pretty_print_dict(task['arguments']['kwargs'], indent=4), ] return ''.join(lines) @click.group(context_settings=CONTEXT_SETTINGS) @click.option('--config-file', '-C', default=None, type=click.Path(exists=True, dir_okay=False,), help="Configuration file.") @click.option('--database', '-d', default=None, help="Scheduling database DSN (imply cls is 'local')") @click.option('--url', '-u', default=None, help="Scheduler's url access (imply cls is 'remote')") @click.option('--log-level', '-l', default='INFO', type=click.Choice(logging._nameToLevel.keys()), help="Log level (default to INFO)") @click.option('--no-stdout', is_flag=True, default=False, help="Do NOT output logs on the console") @click.pass_context def cli(ctx, config_file, database, url, log_level, no_stdout): """Software Heritage Scheduler CLI interface Default to use the the local scheduler instance (plugged to the main scheduler db). """ from swh.scheduler.celery_backend.config import setup_log_handler log_level = setup_log_handler( loglevel=log_level, colorize=False, format='[%(levelname)s] %(name)s -- %(message)s', log_console=not no_stdout) ctx.ensure_object(dict) logger = logging.getLogger(__name__) scheduler = None conf = config.read(config_file, DEFAULT_CONFIG) if 'scheduler' not in conf: raise ValueError("missing 'scheduler' configuration") if database: conf['scheduler']['cls'] = 'local' conf['scheduler']['args']['db'] = database elif url: conf['scheduler']['cls'] = 'remote' conf['scheduler']['args'] = {'url': url} sched_conf = conf['scheduler'] try: logger.debug('Instanciating scheduler with %s' % ( sched_conf)) scheduler = get_scheduler(**sched_conf) except ValueError: # it's the subcommand to decide whether not having a proper # scheduler instance is a problem. pass ctx.obj['scheduler'] = scheduler ctx.obj['config'] = conf ctx.obj['loglevel'] = log_level @cli.group('task') @click.pass_context def task(ctx): """Manipulate tasks.""" pass @task.command('schedule') @click.option('--columns', '-c', multiple=True, default=['type', 'args', 'kwargs', 'next_run'], type=click.Choice([ 'type', 'args', 'kwargs', 'policy', 'next_run']), help='columns present in the CSV file') @click.option('--delimiter', '-d', default=',') @click.argument('file', type=click.File(encoding='utf-8')) @click.pass_context def schedule_tasks(ctx, columns, delimiter, file): """Schedule tasks from a CSV input file. The following columns are expected, and can be set through the -c option: - type: the type of the task to be scheduled (mandatory) - args: the arguments passed to the task (JSON list, defaults to an empty list) - kwargs: the keyword arguments passed to the task (JSON object, defaults to an empty dict) - next_run: the date at which the task should run (datetime, defaults to now) The CSV can be read either from a named file, or from stdin (use - as filename). Use sample: cat scheduling-task.txt | \ python3 -m swh.scheduler.cli \ --database 'service=swh-scheduler-dev' \ task schedule \ --columns type --columns kwargs --columns policy \ --delimiter ';' - """ tasks = [] now = arrow.utcnow() scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') reader = csv.reader(file, delimiter=delimiter) for line in reader: task = dict(zip(columns, line)) args = json.loads(task.pop('args', '[]')) kwargs = json.loads(task.pop('kwargs', '{}')) task['arguments'] = { 'args': args, 'kwargs': kwargs, } task['next_run'] = DATETIME.convert(task.get('next_run', now), None, None) tasks.append(task) created = scheduler.create_tasks(tasks) output = [ 'Created %d tasks\n' % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo_via_pager('\n'.join(output)) @task.command('add') @click.argument('type', nargs=1, required=True) @click.argument('options', nargs=-1) @click.option('--policy', '-p', default='recurring', type=click.Choice(['recurring', 'oneshot'])) @click.option('--priority', '-P', default=None, type=click.Choice(['low', 'normal', 'high'])) @click.option('--next-run', '-n', default=None) @click.pass_context def schedule_task(ctx, type, options, policy, priority, next_run): """Schedule one task from arguments. + The first argument is the name of the task type, further ones are + positional and keyword argument(s) of the task, in YAML format. + Keyword args are of the form key=value. + Usage sample: swh-scheduler --database 'service=swh-scheduler' \ task add swh-lister-pypi swh-scheduler --database 'service=swh-scheduler' \ task add swh-lister-debian --policy=oneshot distribution=stretch Note: if the priority is not given, the task won't have the priority set, which is considered as the lowest priority level. """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') now = arrow.utcnow() (args, kw) = parse_options(options) task = {'type': type, 'policy': policy, 'priority': priority, 'arguments': { 'args': args, 'kwargs': kw, }, 'next_run': DATETIME.convert(next_run or now, None, None), } created = scheduler.create_tasks([task]) output = [ 'Created %d tasks\n' % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo('\n'.join(output)) +@task.command('schedule_origins') +@click.argument('type', nargs=1, required=True) +@click.argument('options', nargs=-1) +@click.option('--batch-size', '-b', 'origin_batch_size', + default=10, show_default=True, type=int, + help="Number of origins per task") +@click.option('--min-id', + default=0, show_default=True, type=int, + help="Only schedule tasks for origins whose ID is greater") +@click.option('--max-id', + default=None, type=int, + help="Only schedule tasks for origins whose ID is lower") +@click.option('--storage-url', '-g', + help="URL of the (graph) storage API") +@click.option('--dry-run/--no-dry-run', is_flag=True, + default=False, + help='List only what would be scheduled.') +@click.pass_context +def schedule_origin_metadata_index( + ctx, type, options, storage_url, origin_batch_size, + min_id, max_id, dry_run): + """Schedules tasks for origins that are already known. + + The first argument is the name of the task type, further ones are + keyword argument(s) of the task in the form key=value, where value is + in YAML format. + + Usage sample: + + swh-scheduler --database 'service=swh-scheduler' \ + task schedule_origins indexer_origin_metadata + """ + scheduler = ctx.obj['scheduler'] + storage = get_storage('remote', {'url': storage_url}) + if dry_run: + scheduler = None + + (args, kw) = parse_options(options) + if args: + raise click.ClickException('Only keywords arguments are allowed.') + + origins = iter_origins(storage, origin_from=min_id, origin_to=max_id) + origin_ids = (origin['id'] for origin in origins) + + schedule_origin_batches( + scheduler, type, origin_ids, origin_batch_size, kw) + + @task.command('list-pending') @click.argument('task-types', required=True, nargs=-1) @click.option('--limit', '-l', required=False, type=click.INT, help='The maximum number of tasks to fetch') @click.option('--before', '-b', required=False, type=DATETIME, help='List all jobs supposed to run before the given date') @click.pass_context def list_pending_tasks(ctx, task_types, limit, before): """List the tasks that are going to be run. You can override the number of tasks to fetch """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') num_tasks, num_tasks_priority = compute_nb_tasks_from(limit) output = [] for task_type in task_types: pending = scheduler.peek_ready_tasks( task_type, timestamp=before, num_tasks=num_tasks, num_tasks_priority=num_tasks_priority) output.append('Found %d %s tasks\n' % ( len(pending), task_type)) for task in pending: output.append(pretty_print_task(task)) click.echo('\n'.join(output)) @task.command('list') @click.option('--task-id', '-i', default=None, multiple=True, metavar='ID', help='List only tasks whose id is ID.') @click.option('--task-type', '-t', default=None, multiple=True, metavar='TYPE', help='List only tasks of type TYPE') @click.option('--limit', '-l', required=False, type=click.INT, help='The maximum number of tasks to fetch.') @click.option('--status', '-s', multiple=True, metavar='STATUS', default=None, help='List tasks whose status is STATUS.') @click.option('--policy', '-p', default=None, type=click.Choice(['recurring', 'oneshot']), help='List tasks whose policy is POLICY.') @click.option('--priority', '-P', default=None, multiple=True, type=click.Choice(['all', 'low', 'normal', 'high']), help='List tasks whose priority is PRIORITY.') @click.option('--before', '-b', required=False, type=DATETIME, metavar='DATETIME', help='Limit to tasks supposed to run before the given date.') @click.option('--after', '-a', required=False, type=DATETIME, metavar='DATETIME', help='Limit to tasks supposed to run after the given date.') @click.option('--list-runs', '-r', is_flag=True, default=False, help='Also list past executions of each task.') @click.pass_context def list_tasks(ctx, task_id, task_type, limit, status, policy, priority, before, after, list_runs): """List tasks. """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') if not task_type: task_type = [x['type'] for x in scheduler.get_task_types()] # if task_id is not given, default value for status is # 'next_run_not_scheduled' # if task_id is given, default status is 'all' if task_id is None and status is None: status = ['next_run_not_scheduled'] if status and 'all' in status: status = None if priority and 'all' in priority: priority = None output = [] tasks = scheduler.search_tasks( task_id=task_id, task_type=task_type, status=status, priority=priority, policy=policy, before=before, after=after, limit=limit) if list_runs: runs = {t['id']: [] for t in tasks} for r in scheduler.get_task_runs([task['id'] for task in tasks]): runs[r['task']].append(r) else: runs = {} output.append('Found %d tasks\n' % ( len(tasks))) for task in tasks: output.append(pretty_print_task(task, full=True)) if runs.get(task['id']): output.append(click.style(' Executions:', bold=True)) for run in runs[task['id']]: output.append(pretty_print_run(run, indent=4)) click.echo('\n'.join(output)) @task.command('respawn') @click.argument('task-ids', required=True, nargs=-1) @click.option('--next-run', '-n', required=False, type=DATETIME, metavar='DATETIME', default=None, help='Re spawn the selected tasks at this date') @click.pass_context def respawn_tasks(ctx, task_ids, next_run): """Respawn tasks. Respawn tasks given by their ids (see the 'task list' command to find task ids) at the given date (immediately by default). Eg. swh-scheduler task respawn 1 3 12 """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') if next_run is None: next_run = arrow.utcnow() output = [] scheduler.set_status_tasks( task_ids, status='next_run_not_scheduled', next_run=next_run) output.append('Respawn tasks %s\n' % (task_ids,)) click.echo('\n'.join(output)) @task.command('archive') @click.option('--before', '-b', default=None, help='''Task whose ended date is anterior will be archived. Default to current month's first day.''') @click.option('--after', '-a', default=None, help='''Task whose ended date is after the specified date will be archived. Default to prior month's first day.''') @click.option('--batch-index', default=1000, type=click.INT, help='Batch size of tasks to read from db to archive') @click.option('--bulk-index', default=200, type=click.INT, help='Batch size of tasks to bulk index') @click.option('--batch-clean', default=1000, type=click.INT, help='Batch size of task to clean after archival') @click.option('--dry-run/--no-dry-run', is_flag=True, default=False, help='Default to list only what would be archived.') @click.option('--verbose', is_flag=True, default=False, help='Verbose mode') @click.option('--cleanup/--no-cleanup', is_flag=True, default=True, help='Clean up archived tasks (default)') @click.option('--start-from', type=click.INT, default=-1, help='(Optional) default task id to start from. Default is -1.') @click.pass_context def archive_tasks(ctx, before, after, batch_index, bulk_index, batch_clean, dry_run, verbose, cleanup, start_from): """Archive task/task_run whose (task_type is 'oneshot' and task_status is 'completed') or (task_type is 'recurring' and task_status is 'disabled'). With --dry-run flag set (default), only list those. """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') es_client = SWHElasticSearchClient() logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) log = logging.getLogger('swh.scheduler.cli.archive') logging.getLogger('urllib3').setLevel(logging.WARN) logging.getLogger('elasticsearch').setLevel(logging.WARN) if dry_run: log.info('**DRY-RUN** (only reading db)') if not cleanup: log.info('**NO CLEANUP**') now = arrow.utcnow() # Default to archive tasks from a rolling month starting the week # prior to the current one if not before: before = now.shift(weeks=-1).format('YYYY-MM-DD') if not after: after = now.shift(weeks=-1).shift(months=-1).format('YYYY-MM-DD') log.debug('index: %s; cleanup: %s; period: [%s ; %s]' % ( not dry_run, not dry_run and cleanup, after, before)) def group_by_index_name(data, es_client=es_client): """Given a data record, determine the index's name through its ending date. This varies greatly depending on the task_run's status. """ date = data.get('started') if not date: date = data['scheduled'] return es_client.compute_index_name(date.year, date.month) def index_data(before, last_id, batch_index): tasks_in = scheduler.filter_task_to_archive( after, before, last_id=last_id, limit=batch_index) for index_name, tasks_group in itertools.groupby( tasks_in, key=group_by_index_name): log.debug('Index tasks to %s' % index_name) if dry_run: for task in tasks_group: yield task continue yield from es_client.streaming_bulk( index_name, tasks_group, source=['task_id', 'task_run_id'], chunk_size=bulk_index, log=log) gen = index_data(before, last_id=start_from, batch_index=batch_index) if cleanup: for task_ids in utils.grouper(gen, n=batch_clean): task_ids = list(task_ids) log.info('Clean up %s tasks: [%s, ...]' % ( len(task_ids), task_ids[0])) if dry_run: # no clean up continue ctx.obj['scheduler'].delete_archived_tasks(task_ids) else: for task_ids in utils.grouper(gen, n=batch_index): task_ids = list(task_ids) log.info('Indexed %s tasks: [%s, ...]' % ( len(task_ids), task_ids[0])) @cli.command('runner') @click.option('--period', '-p', default=0, help=('Period (in s) at witch pending tasks are checked and ' 'executed. Set to 0 (default) for a one shot.')) @click.pass_context def runner(ctx, period): """Starts a swh-scheduler runner service. This process is responsible for checking for ready-to-run tasks and schedule them.""" from swh.scheduler.celery_backend.runner import run_ready_tasks from swh.scheduler.celery_backend.config import build_app app = build_app(ctx.obj['config'].get('celery')) app.set_current() logger = logging.getLogger(__name__ + '.runner') scheduler = ctx.obj['scheduler'] logger.debug('Scheduler %s' % scheduler) try: while True: logger.debug('Run ready tasks') try: ntasks = len(run_ready_tasks(scheduler, app)) if ntasks: logger.info('Scheduled %s tasks', ntasks) except Exception: logger.exception('Unexpected error in run_ready_tasks()') if not period: break time.sleep(period) except KeyboardInterrupt: ctx.exit(0) @cli.command('listener') @click.pass_context def listener(ctx): """Starts a swh-scheduler listener service. This service is responsible for listening at task lifecycle events and handle their workflow status in the database.""" scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') from swh.scheduler.celery_backend.config import build_app app = build_app(ctx.obj['config'].get('celery')) app.set_current() from swh.scheduler.celery_backend.listener import event_monitor event_monitor(app, backend=scheduler) @cli.command('api-server') @click.option('--host', default='0.0.0.0', help="Host to run the scheduler server api") @click.option('--port', default=5008, type=click.INT, help="Binding port of the server") @click.option('--debug/--nodebug', default=None, help=("Indicates if the server should run in debug mode. " "Defaults to True if log-level is DEBUG, False otherwise.") ) @click.pass_context def api_server(ctx, host, port, debug): """Starts a swh-scheduler API HTTP server. """ if ctx.obj['config']['scheduler']['cls'] == 'remote': click.echo("The API server can only be started with a 'local' " "configuration", err=True) ctx.exit(1) from swh.scheduler.api import server server.app.config.update(ctx.obj['config']) if debug is None: debug = ctx.obj['loglevel'] <= logging.DEBUG server.app.run(host, port=port, debug=bool(debug)) @cli.group('task-type') @click.pass_context def task_type(ctx): """Manipulate task types.""" pass @task_type.command('list') @click.option('--verbose', '-v', is_flag=True, default=False, help='Verbose mode') @click.option('--task_type', '-t', multiple=True, default=None, help='List task types of given type') @click.option('--task_name', '-n', multiple=True, default=None, help='List task types of given backend task name') @click.pass_context def list_task_types(ctx, verbose, task_type, task_name): click.echo("Known task types:") if verbose: tmpl = click.style('{type}: ', bold=True) + '''{backend_name} {description} interval: {default_interval} [{min_interval}, {max_interval}] backoff_factor: {backoff_factor} max_queue_length: {max_queue_length} num_retries: {num_retries} retry_delay: {retry_delay} ''' else: tmpl = '{type}:\n {description}' for tasktype in sorted(ctx.obj['scheduler'].get_task_types(), key=lambda x: x['type']): if task_type and tasktype['type'] not in task_type: continue if task_name and tasktype['backend_name'] not in task_name: continue click.echo(tmpl.format(**tasktype)) @task_type.command('add') @click.argument('type', required=1) @click.argument('task-name', required=1) @click.argument('description', required=1) @click.option('--default-interval', '-i', default='90 days', help='Default interval ("90 days" by default)') @click.option('--min-interval', default=None, help='Minimum interval (default interval if not set)') @click.option('--max-interval', '-i', default=None, help='Maximal interval (default interval if not set)') @click.option('--backoff-factor', '-f', type=float, default=1, help='Backoff factor') @click.pass_context def add_task_type(ctx, type, task_name, description, default_interval, min_interval, max_interval, backoff_factor): """Create a new task type """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') task_type = dict( type=type, backend_name=task_name, description=description, default_interval=default_interval, min_interval=min_interval, max_interval=max_interval, backoff_factor=backoff_factor, max_queue_length=None, num_retries=None, retry_delay=None, ) scheduler.create_task_type(task_type) click.echo('OK') @cli.command('updater') @click.option('--verbose/--no-verbose', '-v', default=False, help='Verbose mode') @click.pass_context def updater(ctx, verbose): """Insert tasks in the scheduler from the scheduler-updater's events """ from swh.scheduler.updater.writer import UpdaterWriter UpdaterWriter(**ctx.obj['config']).run() @cli.command('ghtorrent') @click.option('--verbose/--no-verbose', '-v', default=False, help='Verbose mode') @click.pass_context def ghtorrent(ctx, verbose): """Consume events from ghtorrent and write them to cache. """ from swh.scheduler.updater.ghtorrent import GHTorrentConsumer from swh.scheduler.updater.backend import SchedulerUpdaterBackend ght_config = ctx.obj['config'].get('ghtorrent', {}) back_config = ctx.obj['config'].get('scheduler_updater', {}) backend = SchedulerUpdaterBackend(**back_config) GHTorrentConsumer(backend, **ght_config).run() def main(): return cli(auto_envvar_prefix='SWH_SCHEDULER') if __name__ == '__main__': main() diff --git a/swh/scheduler/cli_utils.py b/swh/scheduler/cli_utils.py index c135a7c..3d5bd7c 100644 --- a/swh/scheduler/cli_utils.py +++ b/swh/scheduler/cli_utils.py @@ -1,21 +1,87 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import itertools + +import click +import yaml + +from .utils import create_task_dict + +TASK_BATCH_SIZE = 1000 # Number of tasks per query to the scheduler + + +def schedule_origin_batches( + scheduler, task_type, origins, origin_batch_size, kwargs): + nb_origins = 0 + nb_tasks = 0 + + while True: + task_batch = [] + for _ in range(TASK_BATCH_SIZE): + # Group origins + origin_batch = [] + for origin in itertools.islice(origins, origin_batch_size): + origin_batch.append(origin) + nb_origins += len(origin_batch) + if not origin_batch: + break + + # Create a task for these origins + args = [origin_batch] + task_dict = create_task_dict(task_type, 'oneshot', *args, **kwargs) + task_batch.append(task_dict) + + # Schedule a batch of tasks + if not task_batch: + break + nb_tasks += len(task_batch) + if scheduler: + scheduler.create_tasks(task_batch) + click.echo('Scheduled %d tasks (%d origins).' % (nb_tasks, nb_origins)) + + # Print final status. + if nb_tasks: + click.echo('Done.') + else: + click.echo('Nothing to do (no origin metadata matched the criteria).') + + +def parse_argument(option): + try: + return yaml.load(option) + except Exception: + raise click.ClickException('Invalid argument: {}'.format(option)) + def parse_options(options): - """Parses options from a CLI and turns it into Python args and kwargs. + """Parses options from a CLI as YAML and turns it into Python + args and kwargs. >>> parse_options([]) ([], {}) >>> parse_options(['foo', 'bar']) (['foo', 'bar'], {}) - >>> parse_options(['foo=bar']) + >>> parse_options(['[foo, bar]']) + ([['foo', 'bar']], {}) + >>> parse_options(['"foo"', '"bar"']) + (['foo', 'bar'], {}) + >>> parse_options(['foo="bar"']) ([], {'foo': 'bar'}) - >>> parse_options(['foo', 'bar=baz']) + >>> parse_options(['"foo"', 'bar="baz"']) (['foo'], {'bar': 'baz'}) + >>> parse_options(['42', 'bar=False']) + ([42], {'bar': False}) + >>> parse_options(['42', 'bar=false']) + ([42], {'bar': False}) + >>> parse_options(['42', '"foo']) + Traceback (most recent call last): + ... + click.exceptions.ClickException: Invalid argument: "foo """ - args = [x for x in options if '=' not in x] - kw = dict(x.split('=', 1) for x in options if '=' in x) + kw_pairs = [x.split('=', 1) for x in options if '=' in x] + args = [parse_argument(x) for x in options if '=' not in x] + kw = {k: parse_argument(v) for (k, v) in kw_pairs} return (args, kw) diff --git a/swh/scheduler/tests/conftest.py b/swh/scheduler/tests/conftest.py index 1cfde95..cd9ff8f 100644 --- a/swh/scheduler/tests/conftest.py +++ b/swh/scheduler/tests/conftest.py @@ -1,93 +1,96 @@ import os import pytest import glob from datetime import timedelta from swh.core.utils import numfile_sortkey as sortkey from swh.scheduler import get_scheduler from swh.scheduler.tests import SQL_DIR # make sure we are not fooled by CELERY_ config environment vars for var in [x for x in os.environ.keys() if x.startswith('CELERY')]: os.environ.pop(var) import swh.scheduler.celery_backend.config # noqa # this import is needed here to enforce creation of the celery current app # BEFORE the swh_app fixture is called, otherwise the Celery app instance from # celery_backend.config becomes the celery.current_app +# test_cli tests depends on a en/C locale, so ensure it +os.environ['LC_ALL'] = 'C.UTF-8' + DUMP_FILES = os.path.join(SQL_DIR, '*.sql') # celery tasks for testing purpose; tasks themselves should be # in swh/scheduler/tests/tasks.py TASK_NAMES = ['ping', 'multiping', 'add', 'error'] @pytest.fixture(scope='session') def celery_enable_logging(): return True @pytest.fixture(scope='session') def celery_includes(): return [ 'swh.scheduler.tests.tasks', ] @pytest.fixture(scope='session') def celery_parameters(): return { 'task_cls': 'swh.scheduler.task:SWHTask', } @pytest.fixture(scope='session') def celery_config(): return { 'accept_content': ['application/x-msgpack', 'application/json'], 'task_serializer': 'msgpack', 'result_serializer': 'msgpack', } # override the celery_session_app fixture to monkeypatch the 'main' # swh.scheduler.celery_backend.config.app Celery application # with the test application. @pytest.fixture(scope='session') def swh_app(celery_session_app): swh.scheduler.celery_backend.config.app = celery_session_app yield celery_session_app @pytest.fixture def swh_scheduler(request, postgresql_proc, postgresql): scheduler_config = { 'db': 'postgresql://{user}@{host}:{port}/{dbname}'.format( host=postgresql_proc.host, port=postgresql_proc.port, user='postgres', dbname='tests') } all_dump_files = sorted(glob.glob(DUMP_FILES), key=sortkey) cursor = postgresql.cursor() for fname in all_dump_files: with open(fname) as fobj: cursor.execute(fobj.read()) postgresql.commit() scheduler = get_scheduler('local', scheduler_config) for taskname in TASK_NAMES: scheduler.create_task_type({ 'type': 'swh-test-{}'.format(taskname), 'description': 'The {} testing task'.format(taskname), 'backend_name': 'swh.scheduler.tests.tasks.{}'.format(taskname), 'default_interval': timedelta(days=1), 'min_interval': timedelta(hours=6), 'max_interval': timedelta(days=12), }) return scheduler diff --git a/swh/scheduler/tests/test_cli.py b/swh/scheduler/tests/test_cli.py index 7e2a819..4d38d8d 100644 --- a/swh/scheduler/tests/test_cli.py +++ b/swh/scheduler/tests/test_cli.py @@ -1,191 +1,670 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import re import tempfile from unittest.mock import patch from click.testing import CliRunner +import pytest + +from swh.storage.in_memory import Storage from swh.scheduler.cli import cli from swh.scheduler.utils import create_task_dict CLI_CONFIG = ''' scheduler: cls: foo args: {} ''' def invoke(scheduler, catch_exceptions, args): runner = CliRunner() with patch('swh.scheduler.cli.get_scheduler') as get_scheduler_mock, \ tempfile.NamedTemporaryFile('a', suffix='.yml') as config_fd: config_fd.write(CLI_CONFIG) config_fd.seek(0) get_scheduler_mock.return_value = scheduler - result = runner.invoke(cli, ['-C' + config_fd.name] + args) + args = ['-C' + config_fd.name, '-l', 'WARNING'] + args + result = runner.invoke(cli, args) if not catch_exceptions and result.exception: print(result.output) raise result.exception return result def test_schedule_tasks(swh_scheduler): csv_data = ( b'swh-test-ping;[["arg1", "arg2"]];{"key": "value"};' + datetime.datetime.utcnow().isoformat().encode() + b'\n' + b'swh-test-ping;[["arg3", "arg4"]];{"key": "value"};' + datetime.datetime.utcnow().isoformat().encode() + b'\n') with tempfile.NamedTemporaryFile(suffix='.csv') as csv_fd: csv_fd.write(csv_data) csv_fd.seek(0) result = invoke(swh_scheduler, False, [ 'task', 'schedule', '-d', ';', csv_fd.name ]) expected = r''' -\[INFO\] swh.core.config -- Loading config file .* Created 2 tasks Task 1 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: recurring Args: \['arg1', 'arg2'\] Keyword args: - key: value + key: 'value' Task 2 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: recurring Args: \['arg3', 'arg4'\] Keyword args: - key: value + key: 'value' '''.lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_schedule_tasks_columns(swh_scheduler): with tempfile.NamedTemporaryFile(suffix='.csv') as csv_fd: csv_fd.write( b'swh-test-ping;oneshot;["arg1", "arg2"];{"key": "value"}\n') csv_fd.seek(0) result = invoke(swh_scheduler, False, [ 'task', 'schedule', '-c', 'type', '-c', 'policy', '-c', 'args', '-c', 'kwargs', '-d', ';', csv_fd.name ]) expected = r''' -\[INFO\] swh.core.config -- Loading config file .* Created 1 tasks Task 1 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Args: - arg1 - arg2 + 'arg1' + 'arg2' Keyword args: - key: value + key: 'value' '''.lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_schedule_task(swh_scheduler): result = invoke(swh_scheduler, False, [ 'task', 'add', 'swh-test-ping', 'arg1', 'arg2', 'key=value', ]) expected = r''' -\[INFO\] swh.core.config -- Loading config file .* Created 1 tasks Task 1 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: recurring Args: - arg1 - arg2 + 'arg1' + 'arg2' Keyword args: - key: value + key: 'value' '''.lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_pending_tasks_none(swh_scheduler): result = invoke(swh_scheduler, False, [ 'task', 'list-pending', 'swh-test-ping', ]) expected = r''' -\[INFO\] swh.core.config -- Loading config file .* Found 0 swh-test-ping tasks '''.lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output -def test_list_pending_tasks_one(swh_scheduler): - task = create_task_dict('swh-test-ping', 'oneshot', key='value') - swh_scheduler.create_tasks([task]) +def test_list_pending_tasks(swh_scheduler): + task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') + task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') + task2['next_run'] += datetime.timedelta(days=1) + swh_scheduler.create_tasks([task1, task2]) result = invoke(swh_scheduler, False, [ 'task', 'list-pending', 'swh-test-ping', ]) expected = r''' -\[INFO\] swh.core.config -- Loading config file .* Found 1 swh-test-ping tasks Task 1 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Args: Keyword args: - key: value + key: 'value1' + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + swh_scheduler.grab_ready_tasks('swh-test-ping') + + result = invoke(swh_scheduler, False, [ + 'task', 'list-pending', 'swh-test-ping', + ]) + + expected = r''' +Found 0 swh-test-ping tasks '''.lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output -def test_list_pending_tasks_one_filter(swh_scheduler): +def test_list_pending_tasks_filter(swh_scheduler): task = create_task_dict('swh-test-multiping', 'oneshot', key='value') swh_scheduler.create_tasks([task]) result = invoke(swh_scheduler, False, [ 'task', 'list-pending', 'swh-test-ping', ]) expected = r''' -\[INFO\] swh.core.config -- Loading config file .* Found 0 swh-test-ping tasks '''.lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_list_pending_tasks_filter_2(swh_scheduler): + swh_scheduler.create_tasks([ + create_task_dict('swh-test-multiping', 'oneshot', key='value'), + create_task_dict('swh-test-ping', 'oneshot', key='value2'), + ]) + + result = invoke(swh_scheduler, False, [ + 'task', 'list-pending', 'swh-test-ping', + ]) + + expected = r''' +Found 1 swh-test-ping tasks + +Task 2 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Args: + Keyword args: + key: 'value2' + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +# Fails because "task list-pending --limit 3" only returns 2 tasks, because +# of how compute_nb_tasks_from works. +@pytest.mark.xfail +def test_list_pending_tasks_limit(swh_scheduler): + swh_scheduler.create_tasks([ + create_task_dict('swh-test-ping', 'oneshot', key='value%d' % i) + for i in range(10) + ]) + + result = invoke(swh_scheduler, False, [ + 'task', 'list-pending', 'swh-test-ping', '--limit', '3', + ]) + + expected = r''' +Found 2 swh-test-ping tasks + +Task 1 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Args: + Keyword args: + key: 'value0' + +Task 2 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Args: + Keyword args: + key: 'value1' + +Task 3 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Args: + Keyword args: + key: 'value2' + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_list_pending_tasks_before(swh_scheduler): + task1 = create_task_dict('swh-test-ping', 'oneshot', key='value') + task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') + task1['next_run'] += datetime.timedelta(days=3) + task2['next_run'] += datetime.timedelta(days=1) + swh_scheduler.create_tasks([task1, task2]) + + result = invoke(swh_scheduler, False, [ + 'task', 'list-pending', 'swh-test-ping', '--before', + (datetime.date.today() + datetime.timedelta(days=2)).isoformat() + ]) + + expected = r''' +Found 1 swh-test-ping tasks + +Task 2 + Next run: in a day \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Args: + Keyword args: + key: 'value2' + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_list_tasks(swh_scheduler): + task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') + task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') + task1['next_run'] += datetime.timedelta(days=3, hours=2) + swh_scheduler.create_tasks([task1, task2]) + + swh_scheduler.grab_ready_tasks('swh-test-ping') + + result = invoke(swh_scheduler, False, [ + 'task', 'list', + ]) + + expected = r''' +Found 2 tasks + +Task 1 + Next run: in 3 days \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Status: next_run_not_scheduled + Priority:\x20 + Args: + Keyword args: + key: 'value1' + +Task 2 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Status: next_run_scheduled + Priority:\x20 + Args: + Keyword args: + key: 'value2' + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_list_tasks_id(swh_scheduler): + task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') + task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') + task3 = create_task_dict('swh-test-ping', 'oneshot', key='value3') + swh_scheduler.create_tasks([task1, task2, task3]) + + result = invoke(swh_scheduler, False, [ + 'task', 'list', '--task-id', '2', + ]) + + expected = r''' +Found 1 tasks + +Task 2 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Status: next_run_not_scheduled + Priority:\x20 + Args: + Keyword args: + key: 'value2' + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_list_tasks_id_2(swh_scheduler): + task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') + task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') + task3 = create_task_dict('swh-test-ping', 'oneshot', key='value3') + swh_scheduler.create_tasks([task1, task2, task3]) + + result = invoke(swh_scheduler, False, [ + 'task', 'list', '--task-id', '2', '--task-id', '3' + ]) + + expected = r''' +Found 2 tasks + +Task 2 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Status: next_run_not_scheduled + Priority:\x20 + Args: + Keyword args: + key: 'value2' + +Task 3 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Status: next_run_not_scheduled + Priority:\x20 + Args: + Keyword args: + key: 'value3' + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_list_tasks_type(swh_scheduler): + task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') + task2 = create_task_dict('swh-test-multiping', 'oneshot', key='value2') + task3 = create_task_dict('swh-test-ping', 'oneshot', key='value3') + swh_scheduler.create_tasks([task1, task2, task3]) + + result = invoke(swh_scheduler, False, [ + 'task', 'list', '--task-type', 'swh-test-ping' + ]) + + expected = r''' +Found 2 tasks + +Task 1 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Status: next_run_not_scheduled + Priority:\x20 + Args: + Keyword args: + key: 'value1' + +Task 3 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Status: next_run_not_scheduled + Priority:\x20 + Args: + Keyword args: + key: 'value3' + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_list_tasks_limit(swh_scheduler): + task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') + task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') + task3 = create_task_dict('swh-test-ping', 'oneshot', key='value3') + swh_scheduler.create_tasks([task1, task2, task3]) + + result = invoke(swh_scheduler, False, [ + 'task', 'list', '--limit', '2', + ]) + + expected = r''' +Found 2 tasks + +Task 1 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Status: next_run_not_scheduled + Priority:\x20 + Args: + Keyword args: + key: 'value1' + +Task 2 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Status: next_run_not_scheduled + Priority:\x20 + Args: + Keyword args: + key: 'value2' + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_list_tasks_before(swh_scheduler): + task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') + task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') + task1['next_run'] += datetime.timedelta(days=3, hours=2) + swh_scheduler.create_tasks([task1, task2]) + + swh_scheduler.grab_ready_tasks('swh-test-ping') + + result = invoke(swh_scheduler, False, [ + 'task', 'list', '--before', + (datetime.date.today() + datetime.timedelta(days=2)).isoformat() + ]) + + expected = r''' +Found 1 tasks + +Task 2 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Status: next_run_scheduled + Priority:\x20 + Args: + Keyword args: + key: 'value2' + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_list_tasks_after(swh_scheduler): + task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') + task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') + task1['next_run'] += datetime.timedelta(days=3, hours=2) + swh_scheduler.create_tasks([task1, task2]) + + swh_scheduler.grab_ready_tasks('swh-test-ping') + + result = invoke(swh_scheduler, False, [ + 'task', 'list', '--after', + (datetime.date.today() + datetime.timedelta(days=2)).isoformat() + ]) + + expected = r''' +Found 1 tasks + +Task 1 + Next run: in 3 days \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Status: next_run_not_scheduled + Priority:\x20 + Args: + Keyword args: + key: 'value1' + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def _fill_storage_with_origins(storage, nb_origins): + storage.origin_add([ + { + 'type': 'type{}'.format(i), + 'url': 'http://example.com/{}'.format(i), + } + for i in range(nb_origins) + ]) + + +@pytest.fixture +def storage(): + """An instance of swh.storage.in_memory.Storage that gets injected + into the CLI functions.""" + storage = Storage() + with patch('swh.scheduler.cli.get_storage') as get_storage_mock: + get_storage_mock.return_value = storage + yield storage + + +@patch('swh.scheduler.cli_utils.TASK_BATCH_SIZE', 3) +def test_task_schedule_origins_dry_run( + swh_scheduler, storage): + """Tests the scheduling when origin_batch_size*task_batch_size is a + divisor of nb_origins.""" + _fill_storage_with_origins(storage, 90) + + result = invoke(swh_scheduler, False, [ + 'task', 'schedule_origins', '--dry-run', 'swh-test-ping', + ]) + + # Check the output + expected = r''' +Scheduled 3 tasks \(30 origins\). +Scheduled 6 tasks \(60 origins\). +Scheduled 9 tasks \(90 origins\). +Done. +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), \ + repr(result.output) + + # Check scheduled tasks + tasks = swh_scheduler.search_tasks() + assert len(tasks) == 0 + + +@patch('swh.scheduler.cli_utils.TASK_BATCH_SIZE', 3) +def test_task_schedule_origins(swh_scheduler, storage): + """Tests the scheduling when neither origin_batch_size or + task_batch_size is a divisor of nb_origins.""" + _fill_storage_with_origins(storage, 70) + + result = invoke(swh_scheduler, False, [ + 'task', 'schedule_origins', 'swh-test-ping', + '--batch-size', '20', + ]) + + # Check the output + expected = r''' +Scheduled 3 tasks \(60 origins\). +Scheduled 4 tasks \(70 origins\). +Done. +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), \ + repr(result.output) + + # Check scheduled tasks + tasks = swh_scheduler.search_tasks() + assert len(tasks) == 4 + assert tasks[0]['arguments']['args'] == [list(range(1, 21))] + assert tasks[1]['arguments']['args'] == [list(range(21, 41))] + assert tasks[2]['arguments']['args'] == [list(range(41, 61))] + assert tasks[3]['arguments']['args'] == [list(range(61, 71))] + assert all(task['arguments']['kwargs'] == {} for task in tasks) + + +def test_task_schedule_origins_kwargs(swh_scheduler, storage): + """Tests support of extra keyword-arguments.""" + _fill_storage_with_origins(storage, 30) + + result = invoke(swh_scheduler, False, [ + 'task', 'schedule_origins', 'swh-test-ping', + '--batch-size', '20', + 'key1="value1"', 'key2="value2"', + ]) + + # Check the output + expected = r''' +Scheduled 2 tasks \(30 origins\). +Done. +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), \ + repr(result.output) + + # Check scheduled tasks + tasks = swh_scheduler.search_tasks() + assert len(tasks) == 2 + assert tasks[0]['arguments']['args'] == [list(range(1, 21))] + assert tasks[1]['arguments']['args'] == [list(range(21, 31))] + assert all(task['arguments']['kwargs'] == + {'key1': 'value1', 'key2': 'value2'} + for task in tasks) diff --git a/swh/scheduler/updater/ghtorrent/fake.py b/swh/scheduler/updater/ghtorrent/fake.py deleted file mode 100644 index fc5f34f..0000000 --- a/swh/scheduler/updater/ghtorrent/fake.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import random -import string - -from arrow import utcnow -from kombu import Connection - -from swh.scheduler.updater.ghtorrent import RabbitMQConn, events - - -class FakeRandomOriginGenerator: - def _random_string(self, length): - """Build a fake string of length length. - - """ - return ''.join([ - random.choice(string.ascii_letters + string.digits) - for n in range(length)]) - - def generate(self, user_range=range(5, 10), repo_range=range(10, 15)): - """Build a fake url - - """ - length_username = random.choice(user_range) - user = self._random_string(length_username) - length_repo = random.choice(repo_range) - repo = self._random_string(length_repo) - return '%s/%s' % (user, repo) - - -class FakeGHTorrentPublisher(RabbitMQConn): - """Fake GHTorrent that randomly publishes fake events. Those events - are published in similar manner as described by ghtorrent's - documentation [2]. - - context: stuck with raw ghtorrent so far [1] - - [1] https://github.com/ghtorrent/ghtorrent.org/issues/397#issuecomment-387052462 # noqa - [2] http://ghtorrent.org/streaming.html - - """ - - ADDITIONAL_CONFIG = { - 'nb_messages': ('int', 100) - } - - def __init__(self, **config): - super().__init__(**config) - self.fake_origin_generator = FakeRandomOriginGenerator() - self.nb_messages = self.config['nb_messages'] - - def _random_event(self): - """Create a fake and random event - - """ - event_type = random.choice(['evt', 'ent']) - sub_event = random.choice(events[event_type]) - return { - 'type': sub_event, - 'repo': { - 'name': self.fake_origin_generator.generate(), - }, - 'created_at': utcnow().isoformat() - - } - - def publish(self, nb_messages=None): - if not nb_messages: - nb_messages = self.nb_messages - - with Connection(self.config['conn']['url']) as conn: - with conn.Producer(serializer='json') as producer: - for n in range(nb_messages): - event = self._random_event() - producer.publish(event, - exchange=self.exchange, - routing_key=self.routing_key, - declare=[self.queue]) diff --git a/version.txt b/version.txt index 3ecd929..9e12c0a 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.49-0-gdc6afe4 \ No newline at end of file +v0.0.50-0-g693a147 \ No newline at end of file