diff --git a/PKG-INFO b/PKG-INFO index 23405a6..4d973b0 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,65 +1,65 @@ Metadata-Version: 2.1 Name: swh.scheduler -Version: 0.0.48 +Version: 0.0.49 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN -Project-URL: Funding, https://www.softwareheritage.org/donate -Project-URL: Source, https://forge.softwareheritage.org/source/swh-scheduler Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest +Project-URL: Source, https://forge.softwareheritage.org/source/swh-scheduler +Project-URL: Funding, https://www.softwareheritage.org/donate Description: swh-scheduler ============= Job scheduler for the Software Heritage project. Task manager for asynchronous/delayed tasks, used for both recurrent (e.g., listing a forge, loading new stuff from a Git repository) and one-off activities (e.g., loading a specific version of a source package). # Tests ## Running test manually ### Test data To be able to run (unit) tests, you need to have the [[https://forge.softwareheritage.org/source/swh-storage-testdata.git|swh-storage-testdata]] in the parent directory. If you have set your environment following the [[ https://docs.softwareheritage.org/devel/getting-started.html#getting-started|Getting started]] document everything should be set up just fine. Otherwise: ``` ~/.../swh-scheduler$ git clone https://forge.softwareheritage.org/source/swh-storage-testdata.git ../swh-storage-testdata ``` ### Required services Unit tests that require a running celery broker uses an in memory broker/result backend by default, but you can choose to use a true broker by setting `CELERY_BROKER_URL` and `CELERY_RESULT_BACKEND` environment variables up. For example: ``` $ CELERY_BROKER_URL=amqp://localhost pifpaf run postgresql nosetests ..................................... ---------------------------------------------------------------------- Ran 37 tests in 15.578s OK ``` Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/swh.scheduler.egg-info/PKG-INFO b/swh.scheduler.egg-info/PKG-INFO index 23405a6..4d973b0 100644 --- a/swh.scheduler.egg-info/PKG-INFO +++ b/swh.scheduler.egg-info/PKG-INFO @@ -1,65 +1,65 @@ Metadata-Version: 2.1 Name: swh.scheduler -Version: 0.0.48 +Version: 0.0.49 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN -Project-URL: Funding, https://www.softwareheritage.org/donate -Project-URL: Source, https://forge.softwareheritage.org/source/swh-scheduler Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest +Project-URL: Source, https://forge.softwareheritage.org/source/swh-scheduler +Project-URL: Funding, https://www.softwareheritage.org/donate Description: swh-scheduler ============= Job scheduler for the Software Heritage project. Task manager for asynchronous/delayed tasks, used for both recurrent (e.g., listing a forge, loading new stuff from a Git repository) and one-off activities (e.g., loading a specific version of a source package). # Tests ## Running test manually ### Test data To be able to run (unit) tests, you need to have the [[https://forge.softwareheritage.org/source/swh-storage-testdata.git|swh-storage-testdata]] in the parent directory. If you have set your environment following the [[ https://docs.softwareheritage.org/devel/getting-started.html#getting-started|Getting started]] document everything should be set up just fine. Otherwise: ``` ~/.../swh-scheduler$ git clone https://forge.softwareheritage.org/source/swh-storage-testdata.git ../swh-storage-testdata ``` ### Required services Unit tests that require a running celery broker uses an in memory broker/result backend by default, but you can choose to use a true broker by setting `CELERY_BROKER_URL` and `CELERY_RESULT_BACKEND` environment variables up. For example: ``` $ CELERY_BROKER_URL=amqp://localhost pifpaf run postgresql nosetests ..................................... ---------------------------------------------------------------------- Ran 37 tests in 15.578s OK ``` Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/swh.scheduler.egg-info/SOURCES.txt b/swh.scheduler.egg-info/SOURCES.txt index c54e63a..64c7d08 100644 --- a/swh.scheduler.egg-info/SOURCES.txt +++ b/swh.scheduler.egg-info/SOURCES.txt @@ -1,55 +1,61 @@ MANIFEST.in Makefile README.md requirements-swh.txt requirements.txt setup.py version.txt bin/swh-worker-control swh/__init__.py swh.scheduler.egg-info/PKG-INFO swh.scheduler.egg-info/SOURCES.txt swh.scheduler.egg-info/dependency_links.txt swh.scheduler.egg-info/entry_points.txt swh.scheduler.egg-info/requires.txt swh.scheduler.egg-info/top_level.txt swh/scheduler/__init__.py swh/scheduler/backend.py swh/scheduler/backend_es.py swh/scheduler/cli.py +swh/scheduler/cli_utils.py swh/scheduler/task.py swh/scheduler/utils.py swh/scheduler/api/__init__.py swh/scheduler/api/client.py swh/scheduler/api/server.py +swh/scheduler/api/wsgi.py swh/scheduler/celery_backend/__init__.py swh/scheduler/celery_backend/config.py swh/scheduler/celery_backend/listener.py swh/scheduler/celery_backend/runner.py swh/scheduler/sql/30-swh-schema.sql -swh/scheduler/sql/40-swh-data.sql +swh/scheduler/sql/40-swh-func.sql +swh/scheduler/sql/50-swh-data.sql +swh/scheduler/sql/60-swh-indexes.sql swh/scheduler/sql/updater/10-swh-init.sql swh/scheduler/sql/updater/30-swh-schema.sql swh/scheduler/sql/updater/40-swh-func.sql swh/scheduler/tests/__init__.py swh/scheduler/tests/conftest.py swh/scheduler/tests/tasks.py swh/scheduler/tests/test_api_client.py swh/scheduler/tests/test_celery_tasks.py +swh/scheduler/tests/test_cli.py swh/scheduler/tests/test_scheduler.py +swh/scheduler/tests/test_server.py swh/scheduler/tests/test_utils.py swh/scheduler/tests/updater/__init__.py swh/scheduler/tests/updater/conftest.py swh/scheduler/tests/updater/test_backend.py swh/scheduler/tests/updater/test_consumer.py swh/scheduler/tests/updater/test_events.py swh/scheduler/tests/updater/test_ghtorrent.py swh/scheduler/tests/updater/test_writer.py swh/scheduler/updater/__init__.py swh/scheduler/updater/backend.py swh/scheduler/updater/consumer.py swh/scheduler/updater/events.py swh/scheduler/updater/writer.py swh/scheduler/updater/ghtorrent/__init__.py swh/scheduler/updater/ghtorrent/cli.py swh/scheduler/updater/ghtorrent/fake.py \ No newline at end of file diff --git a/swh/scheduler/__init__.py b/swh/scheduler/__init__.py index 02a493b..d706ed7 100644 --- a/swh/scheduler/__init__.py +++ b/swh/scheduler/__init__.py @@ -1,70 +1,69 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # Percentage of tasks with priority to schedule PRIORITY_SLOT = 0.6 -DEFAULT_CONFIG_PATH = 'backend/scheduler' DEFAULT_CONFIG = { 'scheduler': ('dict', { 'cls': 'local', 'args': { 'db': 'dbname=softwareheritage-scheduler-dev', }, }) } # current configuration. To be set by the config loading mechanism CONFIG = {} def compute_nb_tasks_from(num_tasks): """Compute and returns the tuple, number of tasks without priority, number of tasks with priority. Args: num_tasks (int): Returns: tuple number of tasks without priority (int), number of tasks with priority (int) """ if not num_tasks: return None, None return (int((1 - PRIORITY_SLOT) * num_tasks), int(PRIORITY_SLOT * num_tasks)) def get_scheduler(cls, args={}): """ Get a scheduler object of class `scheduler_class` with arguments `scheduler_args`. Args: scheduler (dict): dictionary with keys: cls (str): scheduler's class, either 'local' or 'remote' args (dict): dictionary with keys, default to empty. Returns: an instance of swh.scheduler, either local or remote: local: swh.scheduler.backend.SchedulerBackend remote: swh.scheduler.api.client.RemoteScheduler Raises: ValueError if passed an unknown storage class. """ if cls == 'remote': from .api.client import RemoteScheduler as SchedulerBackend elif cls == 'local': from .backend import SchedulerBackend else: raise ValueError('Unknown swh.scheduler class `%s`' % cls) return SchedulerBackend(**args) diff --git a/swh/scheduler/api/server.py b/swh/scheduler/api/server.py index 5ab7729..3cdb28d 100644 --- a/swh/scheduler/api/server.py +++ b/swh/scheduler/api/server.py @@ -1,204 +1,259 @@ -# Copyright (C) 2018 The Software Heritage developers +# Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import os import logging from flask import request, Flask from swh.core import config from swh.core.api import (decode_request, error_handler, encode_data_server as encode_data) from swh.core.api import negotiate, JSONFormatter, MsgpackFormatter from swh.scheduler import get_scheduler as get_scheduler_from -from swh.scheduler import DEFAULT_CONFIG, DEFAULT_CONFIG_PATH app = Flask(__name__) scheduler = None @app.errorhandler(Exception) def my_error_handler(exception): return error_handler(exception, encode_data) def get_sched(): global scheduler if not scheduler: scheduler = get_scheduler_from(**app.config['scheduler']) return scheduler def has_no_empty_params(rule): return len(rule.defaults or ()) >= len(rule.arguments or ()) @app.route('/') @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def index(): return 'SWH Scheduler API server' @app.route('/close_connection', methods=['GET', 'POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def close_connection(): return get_sched().close_connection() @app.route('/set_status_tasks', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def set_status_tasks(): return get_sched().set_status_tasks(**decode_request(request)) @app.route('/create_task_type', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def create_task_type(): return get_sched().create_task_type(**decode_request(request)) @app.route('/get_task_type', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def get_task_type(): return get_sched().get_task_type(**decode_request(request)) @app.route('/get_task_types', methods=['GET', 'POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def get_task_types(): return get_sched().get_task_types(**decode_request(request)) @app.route('/create_tasks', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def create_tasks(): return get_sched().create_tasks(**decode_request(request)) @app.route('/disable_tasks', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def disable_tasks(): return get_sched().disable_tasks(**decode_request(request)) @app.route('/get_tasks', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def get_tasks(): return get_sched().get_tasks(**decode_request(request)) @app.route('/get_task_runs', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def get_task_runs(): return get_sched().get_task_runs(**decode_request(request)) @app.route('/search_tasks', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def search_tasks(): return get_sched().search_tasks(**decode_request(request)) @app.route('/peek_ready_tasks', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def peek_ready_tasks(): return get_sched().peek_ready_tasks(**decode_request(request)) @app.route('/grab_ready_tasks', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def grab_ready_tasks(): return get_sched().grab_ready_tasks(**decode_request(request)) @app.route('/schedule_task_run', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def schedule_task_run(): return get_sched().schedule_task_run(**decode_request(request)) @app.route('/mass_schedule_task_runs', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def mass_schedule_task_runs(): return get_sched().mass_schedule_task_runs(**decode_request(request)) @app.route('/start_task_run', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def start_task_run(): return get_sched().start_task_run(**decode_request(request)) @app.route('/end_task_run', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def end_task_run(): return get_sched().end_task_run(**decode_request(request)) @app.route('/filter_task_to_archive', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def filter_task_to_archive(): return get_sched().filter_task_to_archive(**decode_request(request)) @app.route('/delete_archived_tasks', methods=['POST']) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def delete_archived_tasks(): return get_sched().delete_archived_tasks(**decode_request(request)) @app.route("/site-map") @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def site_map(): links = [] sched = get_sched() for rule in app.url_map.iter_rules(): if has_no_empty_params(rule) and hasattr(sched, rule.endpoint): links.append(dict( rule=rule.rule, description=getattr(sched, rule.endpoint).__doc__)) # links is now a list of url, endpoint tuples return links +def load_and_check_config(config_file, type='local'): + """Check the minimal configuration is set to run the api or raise an + error explanation. + + Args: + config_file (str): Path to the configuration file to load + type (str): configuration type. For 'local' type, more + checks are done. + + Raises: + Error if the setup is not as expected + + Returns: + configuration as a dict + + """ + if not config_file: + raise EnvironmentError('Configuration file must be defined') + + if not os.path.exists(config_file): + raise FileNotFoundError('Configuration file %s does not exist' % ( + config_file, )) + + cfg = config.read(config_file) + + vcfg = cfg.get('scheduler') + if not vcfg: + raise KeyError("Missing '%scheduler' configuration") + + if type == 'local': + cls = vcfg.get('cls') + if cls != 'local': + raise ValueError( + "The scheduler backend can only be started with a 'local' " + "configuration") + + args = vcfg.get('args') + if not args: + raise KeyError( + "Invalid configuration; missing 'args' config entry") + + db = args.get('db') + if not db: + raise KeyError( + "Invalid configuration; missing 'db' config entry") + + return cfg + + api_cfg = None -def run_from_webserver(environ, start_response, - config_path=DEFAULT_CONFIG_PATH): - """Run the WSGI app from the webserver, loading the configuration.""" +def make_app_from_configfile(): + """Run the WSGI app from the webserver, loading the configuration from + a configuration file. + + SWH_CONFIG_FILENAME environment variable defines the + configuration path to load. + + """ global api_cfg if not api_cfg: - api_cfg = config.load_named_config(config_path, DEFAULT_CONFIG) + config_file = os.environ.get('SWH_CONFIG_FILENAME') + api_cfg = load_and_check_config(config_file) app.config.update(api_cfg) handler = logging.StreamHandler() app.logger.addHandler(handler) - return app(environ, start_response) + return app if __name__ == '__main__': print('Please use the "swh-scheduler api-server" command') diff --git a/swh/scheduler/api/wsgi.py b/swh/scheduler/api/wsgi.py new file mode 100644 index 0000000..02c4901 --- /dev/null +++ b/swh/scheduler/api/wsgi.py @@ -0,0 +1,8 @@ +# Copyright (C) 2019 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +from .server import make_app_from_configfile + +application = make_app_from_configfile() diff --git a/swh/scheduler/celery_backend/config.py b/swh/scheduler/celery_backend/config.py index 66cc591..ef4ab37 100644 --- a/swh/scheduler/celery_backend/config.py +++ b/swh/scheduler/celery_backend/config.py @@ -1,265 +1,262 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging import os import urllib.parse from celery import Celery from celery.signals import setup_logging, celeryd_after_setup from celery.utils.log import ColorFormatter from celery.worker.control import Panel from kombu import Exchange, Queue from kombu.five import monotonic as _monotonic import requests from swh.scheduler import CONFIG as SWH_CONFIG from swh.core.config import load_named_config, merge_configs from swh.core.logger import JournalHandler DEFAULT_CONFIG_NAME = 'worker' CONFIG_NAME_ENVVAR = 'SWH_WORKER_INSTANCE' CONFIG_NAME_TEMPLATE = 'worker/%s' DEFAULT_CONFIG = { 'task_broker': ('str', 'amqp://guest@localhost//'), - 'result_backend': ('str', 'rpc://'), 'task_modules': ('list[str]', []), 'task_queues': ('list[str]', []), 'task_soft_time_limit': ('int', 0), } logger = logging.getLogger(__name__) @setup_logging.connect def setup_log_handler(loglevel=None, logfile=None, format=None, colorize=None, log_console=True, **kwargs): """Setup logging according to Software Heritage preferences. We use the command-line loglevel for tasks only, as we never really care about the debug messages from celery. """ if loglevel is None: loglevel = logging.DEBUG if isinstance(loglevel, str): loglevel = logging._nameToLevel[loglevel] formatter = logging.Formatter(format) root_logger = logging.getLogger('') root_logger.setLevel(logging.INFO) if loglevel <= logging.DEBUG: log_console = True if log_console: color_formatter = ColorFormatter(format) if colorize else formatter console = logging.StreamHandler() console.setLevel(logging.DEBUG) console.setFormatter(color_formatter) root_logger.addHandler(console) systemd_journal = JournalHandler() systemd_journal.setLevel(logging.DEBUG) systemd_journal.setFormatter(formatter) root_logger.addHandler(systemd_journal) logging.getLogger('celery').setLevel(logging.INFO) # Silence amqp heartbeat_tick messages logger = logging.getLogger('amqp') logger.addFilter(lambda record: not record.msg.startswith( 'heartbeat_tick')) logger.setLevel(loglevel) # Silence useless "Starting new HTTP connection" messages logging.getLogger('urllib3').setLevel(logging.WARNING) logging.getLogger('swh').setLevel(loglevel) # get_task_logger makes the swh tasks loggers children of celery.task logging.getLogger('celery.task').setLevel(loglevel) return loglevel @celeryd_after_setup.connect def setup_queues_and_tasks(sender, instance, **kwargs): """Signal called on worker start. This automatically registers swh.scheduler.task.Task subclasses as available celery tasks. This also subscribes the worker to the "implicit" per-task queues defined for these task classes. """ logger.info('Setup Queues & Tasks for %s', sender) instance.app.conf['worker_name'] = sender @Panel.register def monotonic(state): """Get the current value for the monotonic clock""" return {'monotonic': _monotonic()} def route_for_task(name, args, kwargs, options, task=None, **kw): """Route tasks according to the task_queue attribute in the task class""" if name is not None and name.startswith('swh.'): return {'queue': name} def get_queue_stats(app, queue_name): """Get the statistics regarding a queue on the broker. Arguments: queue_name: name of the queue to check Returns a dictionary raw from the RabbitMQ management API; or `None` if the current configuration does not use RabbitMQ. Interesting keys: - Consumers (number of consumers for the queue) - messages (number of messages in queue) - messages_unacknowledged (number of messages currently being processed) Documentation: https://www.rabbitmq.com/management.html#http-api """ conn_info = app.connection().info() if conn_info['transport'] == 'memory': # We're running in a test environment, without RabbitMQ. return None url = 'http://{hostname}:{port}/api/queues/{vhost}/{queue}'.format( hostname=conn_info['hostname'], port=conn_info['port'] + 10000, vhost=urllib.parse.quote(conn_info['virtual_host'], safe=''), queue=urllib.parse.quote(queue_name, safe=''), ) credentials = (conn_info['userid'], conn_info['password']) r = requests.get(url, auth=credentials) if r.status_code == 404: return {} if r.status_code != 200: raise ValueError('Got error %s when reading queue stats: %s' % ( r.status_code, r.json())) return r.json() def get_queue_length(app, queue_name): """Shortcut to get a queue's length""" stats = get_queue_stats(app, queue_name) if stats: return stats.get('messages') def register_task_class(app, name, cls): """Register a class-based task under the given name""" if name in app.tasks: return task_instance = cls() task_instance.name = name app.register_task(task_instance) INSTANCE_NAME = os.environ.get(CONFIG_NAME_ENVVAR) CONFIG_NAME = os.environ.get('SWH_CONFIG_FILENAME') CONFIG = {} if CONFIG_NAME: # load the celery config from the main config file given as # SWH_CONFIG_FILENAME environment variable. # This is expected to have a [celery] section in which we have the # celery specific configuration. SWH_CONFIG.clear() SWH_CONFIG.update(load_named_config(CONFIG_NAME)) CONFIG = SWH_CONFIG.get('celery') if not CONFIG: # otherwise, back to compat config loading mechanism if INSTANCE_NAME: CONFIG_NAME = CONFIG_NAME_TEMPLATE % INSTANCE_NAME else: CONFIG_NAME = DEFAULT_CONFIG_NAME # Load the Celery config CONFIG = load_named_config(CONFIG_NAME, DEFAULT_CONFIG) # Celery Queues CELERY_QUEUES = [Queue('celery', Exchange('celery'), routing_key='celery')] CELERY_DEFAULT_CONFIG = dict( # Timezone configuration: all in UTC enable_utc=True, timezone='UTC', # Imported modules imports=CONFIG.get('task_modules', []), # Time (in seconds, or a timedelta object) for when after stored task # tombstones will be deleted. None means to never expire results. result_expires=None, # A string identifying the default serialization method to use. Can # be json (default), pickle, yaml, msgpack, or any custom # serialization methods that have been registered with task_serializer='msgpack', # Result serialization format result_serializer='msgpack', # Late ack means the task messages will be acknowledged after the task has # been executed, not just before, which is the default behavior. task_acks_late=True, # A string identifying the default serialization method to use. # Can be pickle (default), json, yaml, msgpack or any custom serialization # methods that have been registered with kombu.serialization.registry accept_content=['msgpack', 'json'], # If True the task will report its status as “started” # when the task is executed by a worker. task_track_started=True, # Default compression used for task messages. Can be gzip, bzip2 # (if available), or any custom compression schemes registered # in the Kombu compression registry. # result_compression='bzip2', # task_compression='bzip2', # Disable all rate limits, even if tasks has explicit rate limits set. # (Disabling rate limits altogether is recommended if you don’t have any # tasks using them.) worker_disable_rate_limits=True, # Task routing task_routes=route_for_task, - # Task queues this worker will consume from - task_queues=CELERY_QUEUES, # Allow pool restarts from remote worker_pool_restarts=True, # Do not prefetch tasks worker_prefetch_multiplier=1, # Send events worker_send_task_events=True, # Do not send useless task_sent events task_send_sent_event=False, ) def build_app(config=None): config = merge_configs( {k: v for (k, (_, v)) in DEFAULT_CONFIG.items()}, config or {}) - config['task_queues'] = [Queue(queue, Exchange(queue), routing_key=queue) - for queue in config.get('task_queues', ())] + config['task_queues'] = CELERY_QUEUES + [ + Queue(queue, Exchange(queue), routing_key=queue) + for queue in config.get('task_queues', ())] logger.debug('Creating a Celery app with %s', config) # Instantiate the Celery app app = Celery(broker=config['task_broker'], - backend=config['result_backend'], task_cls='swh.scheduler.task:SWHTask') app.add_defaults(CELERY_DEFAULT_CONFIG) app.add_defaults(config) return app app = build_app(CONFIG) # XXX for BW compat Celery.get_queue_length = get_queue_length diff --git a/swh/scheduler/celery_backend/listener.py b/swh/scheduler/celery_backend/listener.py index 86791a6..038bee9 100644 --- a/swh/scheduler/celery_backend/listener.py +++ b/swh/scheduler/celery_backend/listener.py @@ -1,202 +1,202 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import logging import time import sys import click from arrow import utcnow from kombu import Queue import celery from celery.events import EventReceiver class ReliableEventReceiver(EventReceiver): def __init__(self, channel, handlers=None, routing_key='#', node_id=None, app=None, queue_prefix='celeryev', accept=None): super(ReliableEventReceiver, self).__init__( channel, handlers, routing_key, node_id, app, queue_prefix, accept) self.queue = Queue('.'.join([self.queue_prefix, self.node_id]), exchange=self.exchange, routing_key=self.routing_key, auto_delete=False, durable=True) def get_consumers(self, consumer, channel): return [consumer(queues=[self.queue], callbacks=[self._receive], no_ack=False, accept=self.accept)] def _receive(self, bodies, message): if not isinstance(bodies, list): # celery<4 returned body as element bodies = [bodies] for body in bodies: type, body = self.event_from_message(body) self.process(type, body, message) def process(self, type, event, message): """Process the received event by dispatching it to the appropriate handler.""" handler = self.handlers.get(type) or self.handlers.get('*') handler and handler(event, message) ACTION_SEND_DELAY = datetime.timedelta(seconds=1.0) ACTION_QUEUE_MAX_LENGTH = 1000 def event_monitor(app, backend): logger = logging.getLogger('swh.scheduler.listener') actions = { 'last_send': utcnow() - 2*ACTION_SEND_DELAY, 'queue': [], } def try_perform_actions(actions=actions): logger.debug('Try perform pending actions') if actions['queue'] and ( len(actions['queue']) > ACTION_QUEUE_MAX_LENGTH or utcnow() - actions['last_send'] > ACTION_SEND_DELAY): perform_actions(actions) def perform_actions(actions, backend=backend): logger.info('Perform %s pending actions' % len(actions['queue'])) action_map = { 'start_task_run': backend.start_task_run, 'end_task_run': backend.end_task_run, } messages = [] db = backend.get_db() cursor = db.cursor(None) for action in actions['queue']: messages.append(action['message']) function = action_map[action['action']] args = action.get('args', ()) kwargs = action.get('kwargs', {}) kwargs['cur'] = cursor function(*args, **kwargs) db.conn.commit() for message in messages: if not message.acknowledged: message.ack() else: logger.info('message already acknowledged: %s', message) actions['queue'] = [] actions['last_send'] = utcnow() def queue_action(action, actions=actions): actions['queue'].append(action) try_perform_actions() def catchall_event(event, message): logger.debug('event: %s %s', event['type'], event.get('name', 'N/A')) if not message.acknowledged: message.ack() else: logger.info('message already acknowledged: %s', message) try_perform_actions() def task_started(event, message): - logger.debug('task_started: %s %s %s', event['type'], - event.get('name', 'N/A')) + logger.debug('task_started: %s %s', + event['type'], event.get('name', 'N/A')) queue_action({ 'action': 'start_task_run', 'args': [event['uuid']], 'kwargs': { 'timestamp': utcnow(), 'metadata': { 'worker': event['hostname'], }, }, 'message': message, }) def task_succeeded(event, message): logger.debug('task_succeeded: event: %s' % event) logger.debug(' message: %s' % message) result = event['result'] logger.debug('task_succeeded: result: %s' % result) try: status = result.get('status') if status == 'success': status = 'eventful' if result.get('eventful') else 'uneventful' except Exception: status = 'eventful' if result else 'uneventful' queue_action({ 'action': 'end_task_run', 'args': [event['uuid']], 'kwargs': { 'timestamp': utcnow(), 'status': status, 'result': result, }, 'message': message, }) def task_failed(event, message): logger.debug('task_failed: event: %s' % event) logger.debug(' message: %s' % message) queue_action({ 'action': 'end_task_run', 'args': [event['uuid']], 'kwargs': { 'timestamp': utcnow(), 'status': 'failed', }, 'message': message, }) recv = ReliableEventReceiver( celery.current_app.connection(), app=celery.current_app, handlers={ 'task-started': task_started, 'task-result': task_succeeded, 'task-failed': task_failed, '*': catchall_event, }, node_id='listener', ) errors = 0 while True: try: recv.capture(limit=None, timeout=None, wakeup=True) errors = 0 except KeyboardInterrupt: logger.exception('Keyboard interrupt, exiting') break except Exception: logger.exception('Unexpected exception') if errors < 5: time.sleep(errors) errors += 1 else: logger.error('Too many consecutive errors, exiting') sys.exit(1) @click.command() @click.pass_context def main(ctx): click.echo("Deprecated! Use 'swh-scheduler listener' instead.", err=True) ctx.exit(1) if __name__ == '__main__': main() diff --git a/swh/scheduler/cli.py b/swh/scheduler/cli.py index f7e1191..5428f98 100644 --- a/swh/scheduler/cli.py +++ b/swh/scheduler/cli.py @@ -1,710 +1,749 @@ # Copyright (C) 2016-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import arrow import click import csv import itertools import json import locale import logging import time import datetime from swh.core import utils, config from . import compute_nb_tasks_from from .backend_es import SWHElasticSearchClient from . import get_scheduler, DEFAULT_CONFIG +from .cli_utils import parse_options locale.setlocale(locale.LC_ALL, '') ARROW_LOCALE = locale.getlocale(locale.LC_TIME)[0] class DateTimeType(click.ParamType): name = 'time and date' def convert(self, value, param, ctx): if not isinstance(value, arrow.Arrow): value = arrow.get(value) return value DATETIME = DateTimeType() CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) def format_dict(d): ret = {} for k, v in d.items(): if isinstance(v, (arrow.Arrow, datetime.date, datetime.datetime)): v = arrow.get(v).format() elif isinstance(v, dict): v = format_dict(v) ret[k] = v return ret def pretty_print_list(list, indent=0): """Pretty-print a list""" return ''.join('%s%s\n' % (' ' * indent, item) for item in list) def pretty_print_dict(dict, indent=0): """Pretty-print a list""" return ''.join('%s%s: %s\n' % (' ' * indent, click.style(key, bold=True), value) for key, value in dict.items()) def pretty_print_run(run, indent=4): fmt = ('{indent}{backend_id} [{status}]\n' '{indent} scheduled: {scheduled} [{started}:{ended}]') return fmt.format(indent=' '*indent, **format_dict(run)) def pretty_print_task(task, full=False): """Pretty-print a task If 'full' is True, also print the status and priority fields. + + >>> task = { + ... 'id': 1234, + ... 'arguments': { + ... 'args': ['foo', 'bar'], + ... 'kwargs': {'key': 'value'}, + ... }, + ... 'current_interval': datetime.timedelta(hours=1), + ... 'next_run': datetime.datetime(2019, 2, 21, 13, 52, 35, 407818), + ... 'policy': 'oneshot', + ... 'priority': None, + ... 'status': 'next_run_not_scheduled', + ... 'type': 'test_task', + ... } + >>> print(click.unstyle(pretty_print_task(task))) + Task 1234 + Next run: ... (2019-02-21 13:52:35+00:00) + Interval: 1:00:00 + Type: test_task + Policy: oneshot + Args: + foo + bar + Keyword args: + key: value + + >>> print(click.unstyle(pretty_print_task(task, full=True))) + Task 1234 + Next run: ... (2019-02-21 13:52:35+00:00) + Interval: 1:00:00 + Type: test_task + Policy: oneshot + Status: next_run_not_scheduled + Priority:\x20 + Args: + foo + bar + Keyword args: + key: value + """ next_run = arrow.get(task['next_run']) lines = [ '%s %s\n' % (click.style('Task', bold=True), task['id']), click.style(' Next run: ', bold=True), "%s (%s)" % (next_run.humanize(locale=ARROW_LOCALE), next_run.format()), '\n', click.style(' Interval: ', bold=True), str(task['current_interval']), '\n', click.style(' Type: ', bold=True), task['type'] or '', '\n', click.style(' Policy: ', bold=True), task['policy'] or '', '\n', ] if full: lines += [ click.style(' Status: ', bold=True), task['status'] or '', '\n', click.style(' Priority: ', bold=True), task['priority'] or '', '\n', ] lines += [ click.style(' Args:\n', bold=True), pretty_print_list(task['arguments']['args'], indent=4), click.style(' Keyword args:\n', bold=True), pretty_print_dict(task['arguments']['kwargs'], indent=4), ] return ''.join(lines) @click.group(context_settings=CONTEXT_SETTINGS) @click.option('--config-file', '-C', default=None, type=click.Path(exists=True, dir_okay=False,), help="Configuration file.") @click.option('--database', '-d', default=None, help="Scheduling database DSN (imply cls is 'local')") @click.option('--url', '-u', default=None, help="Scheduler's url access (imply cls is 'remote')") @click.option('--log-level', '-l', default='INFO', type=click.Choice(logging._nameToLevel.keys()), help="Log level (default to INFO)") @click.option('--no-stdout', is_flag=True, default=False, help="Do NOT output logs on the console") @click.pass_context def cli(ctx, config_file, database, url, log_level, no_stdout): """Software Heritage Scheduler CLI interface Default to use the the local scheduler instance (plugged to the main scheduler db). """ from swh.scheduler.celery_backend.config import setup_log_handler log_level = setup_log_handler( loglevel=log_level, colorize=False, format='[%(levelname)s] %(name)s -- %(message)s', log_console=not no_stdout) ctx.ensure_object(dict) logger = logging.getLogger(__name__) scheduler = None conf = config.read(config_file, DEFAULT_CONFIG) if 'scheduler' not in conf: raise ValueError("missing 'scheduler' configuration") if database: conf['scheduler']['cls'] = 'local' conf['scheduler']['args']['db'] = database elif url: conf['scheduler']['cls'] = 'remote' conf['scheduler']['args'] = {'url': url} sched_conf = conf['scheduler'] try: logger.debug('Instanciating scheduler with %s' % ( sched_conf)) scheduler = get_scheduler(**sched_conf) except ValueError: # it's the subcommand to decide whether not having a proper # scheduler instance is a problem. pass ctx.obj['scheduler'] = scheduler ctx.obj['config'] = conf ctx.obj['loglevel'] = log_level @cli.group('task') @click.pass_context def task(ctx): """Manipulate tasks.""" pass @task.command('schedule') @click.option('--columns', '-c', multiple=True, default=['type', 'args', 'kwargs', 'next_run'], type=click.Choice([ 'type', 'args', 'kwargs', 'policy', 'next_run']), help='columns present in the CSV file') @click.option('--delimiter', '-d', default=',') @click.argument('file', type=click.File(encoding='utf-8')) @click.pass_context def schedule_tasks(ctx, columns, delimiter, file): """Schedule tasks from a CSV input file. The following columns are expected, and can be set through the -c option: - type: the type of the task to be scheduled (mandatory) - args: the arguments passed to the task (JSON list, defaults to an empty list) - kwargs: the keyword arguments passed to the task (JSON object, defaults to an empty dict) - next_run: the date at which the task should run (datetime, defaults to now) The CSV can be read either from a named file, or from stdin (use - as filename). Use sample: cat scheduling-task.txt | \ python3 -m swh.scheduler.cli \ --database 'service=swh-scheduler-dev' \ task schedule \ --columns type --columns kwargs --columns policy \ --delimiter ';' - """ tasks = [] now = arrow.utcnow() scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') reader = csv.reader(file, delimiter=delimiter) for line in reader: task = dict(zip(columns, line)) args = json.loads(task.pop('args', '[]')) kwargs = json.loads(task.pop('kwargs', '{}')) task['arguments'] = { 'args': args, 'kwargs': kwargs, } task['next_run'] = DATETIME.convert(task.get('next_run', now), None, None) tasks.append(task) created = scheduler.create_tasks(tasks) output = [ 'Created %d tasks\n' % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo_via_pager('\n'.join(output)) @task.command('add') @click.argument('type', nargs=1, required=True) @click.argument('options', nargs=-1) @click.option('--policy', '-p', default='recurring', type=click.Choice(['recurring', 'oneshot'])) @click.option('--priority', '-P', default=None, type=click.Choice(['low', 'normal', 'high'])) @click.option('--next-run', '-n', default=None) @click.pass_context def schedule_task(ctx, type, options, policy, priority, next_run): """Schedule one task from arguments. Usage sample: swh-scheduler --database 'service=swh-scheduler' \ task add swh-lister-pypi swh-scheduler --database 'service=swh-scheduler' \ task add swh-lister-debian --policy=oneshot distribution=stretch Note: if the priority is not given, the task won't have the priority set, which is considered as the lowest priority level. """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') now = arrow.utcnow() - args = [x for x in options if '=' not in x] - kw = dict(x.split('=', 1) for x in options if '=' in x) + (args, kw) = parse_options(options) task = {'type': type, 'policy': policy, 'priority': priority, 'arguments': { 'args': args, 'kwargs': kw, }, 'next_run': DATETIME.convert(next_run or now, None, None), } created = scheduler.create_tasks([task]) output = [ 'Created %d tasks\n' % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo('\n'.join(output)) @task.command('list-pending') @click.argument('task-types', required=True, nargs=-1) @click.option('--limit', '-l', required=False, type=click.INT, help='The maximum number of tasks to fetch') @click.option('--before', '-b', required=False, type=DATETIME, help='List all jobs supposed to run before the given date') @click.pass_context def list_pending_tasks(ctx, task_types, limit, before): """List the tasks that are going to be run. You can override the number of tasks to fetch """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') num_tasks, num_tasks_priority = compute_nb_tasks_from(limit) output = [] for task_type in task_types: pending = scheduler.peek_ready_tasks( task_type, timestamp=before, num_tasks=num_tasks, num_tasks_priority=num_tasks_priority) output.append('Found %d %s tasks\n' % ( len(pending), task_type)) for task in pending: output.append(pretty_print_task(task)) click.echo('\n'.join(output)) @task.command('list') @click.option('--task-id', '-i', default=None, multiple=True, metavar='ID', help='List only tasks whose id is ID.') @click.option('--task-type', '-t', default=None, multiple=True, metavar='TYPE', help='List only tasks of type TYPE') @click.option('--limit', '-l', required=False, type=click.INT, help='The maximum number of tasks to fetch.') @click.option('--status', '-s', multiple=True, metavar='STATUS', default=None, help='List tasks whose status is STATUS.') @click.option('--policy', '-p', default=None, type=click.Choice(['recurring', 'oneshot']), help='List tasks whose policy is POLICY.') @click.option('--priority', '-P', default=None, multiple=True, type=click.Choice(['all', 'low', 'normal', 'high']), help='List tasks whose priority is PRIORITY.') @click.option('--before', '-b', required=False, type=DATETIME, metavar='DATETIME', help='Limit to tasks supposed to run before the given date.') @click.option('--after', '-a', required=False, type=DATETIME, metavar='DATETIME', help='Limit to tasks supposed to run after the given date.') @click.option('--list-runs', '-r', is_flag=True, default=False, help='Also list past executions of each task.') @click.pass_context def list_tasks(ctx, task_id, task_type, limit, status, policy, priority, before, after, list_runs): """List tasks. """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') if not task_type: task_type = [x['type'] for x in scheduler.get_task_types()] # if task_id is not given, default value for status is # 'next_run_not_scheduled' # if task_id is given, default status is 'all' if task_id is None and status is None: status = ['next_run_not_scheduled'] if status and 'all' in status: status = None if priority and 'all' in priority: priority = None output = [] tasks = scheduler.search_tasks( task_id=task_id, task_type=task_type, status=status, priority=priority, policy=policy, before=before, after=after, limit=limit) if list_runs: runs = {t['id']: [] for t in tasks} for r in scheduler.get_task_runs([task['id'] for task in tasks]): runs[r['task']].append(r) else: runs = {} output.append('Found %d tasks\n' % ( len(tasks))) for task in tasks: output.append(pretty_print_task(task, full=True)) if runs.get(task['id']): output.append(click.style(' Executions:', bold=True)) for run in runs[task['id']]: output.append(pretty_print_run(run, indent=4)) click.echo('\n'.join(output)) @task.command('respawn') @click.argument('task-ids', required=True, nargs=-1) @click.option('--next-run', '-n', required=False, type=DATETIME, metavar='DATETIME', default=None, help='Re spawn the selected tasks at this date') @click.pass_context def respawn_tasks(ctx, task_ids, next_run): """Respawn tasks. Respawn tasks given by their ids (see the 'task list' command to find task ids) at the given date (immediately by default). Eg. swh-scheduler task respawn 1 3 12 """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') if next_run is None: next_run = arrow.utcnow() output = [] scheduler.set_status_tasks( task_ids, status='next_run_not_scheduled', next_run=next_run) output.append('Respawn tasks %s\n' % (task_ids,)) click.echo('\n'.join(output)) @task.command('archive') @click.option('--before', '-b', default=None, help='''Task whose ended date is anterior will be archived. Default to current month's first day.''') @click.option('--after', '-a', default=None, help='''Task whose ended date is after the specified date will be archived. Default to prior month's first day.''') @click.option('--batch-index', default=1000, type=click.INT, help='Batch size of tasks to read from db to archive') @click.option('--bulk-index', default=200, type=click.INT, help='Batch size of tasks to bulk index') @click.option('--batch-clean', default=1000, type=click.INT, help='Batch size of task to clean after archival') @click.option('--dry-run/--no-dry-run', is_flag=True, default=False, help='Default to list only what would be archived.') @click.option('--verbose', is_flag=True, default=False, help='Verbose mode') @click.option('--cleanup/--no-cleanup', is_flag=True, default=True, help='Clean up archived tasks (default)') @click.option('--start-from', type=click.INT, default=-1, help='(Optional) default task id to start from. Default is -1.') @click.pass_context def archive_tasks(ctx, before, after, batch_index, bulk_index, batch_clean, dry_run, verbose, cleanup, start_from): """Archive task/task_run whose (task_type is 'oneshot' and task_status is 'completed') or (task_type is 'recurring' and task_status is 'disabled'). With --dry-run flag set (default), only list those. """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') es_client = SWHElasticSearchClient() logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) log = logging.getLogger('swh.scheduler.cli.archive') logging.getLogger('urllib3').setLevel(logging.WARN) logging.getLogger('elasticsearch').setLevel(logging.WARN) if dry_run: log.info('**DRY-RUN** (only reading db)') if not cleanup: log.info('**NO CLEANUP**') now = arrow.utcnow() # Default to archive tasks from a rolling month starting the week # prior to the current one if not before: before = now.shift(weeks=-1).format('YYYY-MM-DD') if not after: after = now.shift(weeks=-1).shift(months=-1).format('YYYY-MM-DD') log.debug('index: %s; cleanup: %s; period: [%s ; %s]' % ( not dry_run, not dry_run and cleanup, after, before)) def group_by_index_name(data, es_client=es_client): """Given a data record, determine the index's name through its ending date. This varies greatly depending on the task_run's status. """ date = data.get('started') if not date: date = data['scheduled'] return es_client.compute_index_name(date.year, date.month) def index_data(before, last_id, batch_index): tasks_in = scheduler.filter_task_to_archive( after, before, last_id=last_id, limit=batch_index) for index_name, tasks_group in itertools.groupby( tasks_in, key=group_by_index_name): log.debug('Index tasks to %s' % index_name) if dry_run: for task in tasks_group: yield task continue yield from es_client.streaming_bulk( index_name, tasks_group, source=['task_id', 'task_run_id'], chunk_size=bulk_index, log=log) gen = index_data(before, last_id=start_from, batch_index=batch_index) if cleanup: for task_ids in utils.grouper(gen, n=batch_clean): task_ids = list(task_ids) log.info('Clean up %s tasks: [%s, ...]' % ( len(task_ids), task_ids[0])) if dry_run: # no clean up continue ctx.obj['scheduler'].delete_archived_tasks(task_ids) else: for task_ids in utils.grouper(gen, n=batch_index): task_ids = list(task_ids) log.info('Indexed %s tasks: [%s, ...]' % ( len(task_ids), task_ids[0])) @cli.command('runner') @click.option('--period', '-p', default=0, help=('Period (in s) at witch pending tasks are checked and ' 'executed. Set to 0 (default) for a one shot.')) @click.pass_context def runner(ctx, period): """Starts a swh-scheduler runner service. This process is responsible for checking for ready-to-run tasks and schedule them.""" from swh.scheduler.celery_backend.runner import run_ready_tasks from swh.scheduler.celery_backend.config import build_app app = build_app(ctx.obj['config'].get('celery')) app.set_current() logger = logging.getLogger(__name__ + '.runner') scheduler = ctx.obj['scheduler'] logger.debug('Scheduler %s' % scheduler) try: while True: logger.debug('Run ready tasks') try: ntasks = len(run_ready_tasks(scheduler, app)) if ntasks: logger.info('Scheduled %s tasks', ntasks) except Exception: logger.exception('Unexpected error in run_ready_tasks()') if not period: break time.sleep(period) except KeyboardInterrupt: ctx.exit(0) @cli.command('listener') @click.pass_context def listener(ctx): """Starts a swh-scheduler listener service. This service is responsible for listening at task lifecycle events and handle their workflow status in the database.""" scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') from swh.scheduler.celery_backend.config import build_app app = build_app(ctx.obj['config'].get('celery')) app.set_current() from swh.scheduler.celery_backend.listener import event_monitor event_monitor(app, backend=scheduler) @cli.command('api-server') @click.option('--host', default='0.0.0.0', help="Host to run the scheduler server api") @click.option('--port', default=5008, type=click.INT, help="Binding port of the server") @click.option('--debug/--nodebug', default=None, help=("Indicates if the server should run in debug mode. " "Defaults to True if log-level is DEBUG, False otherwise.") ) @click.pass_context def api_server(ctx, host, port, debug): """Starts a swh-scheduler API HTTP server. """ if ctx.obj['config']['scheduler']['cls'] == 'remote': click.echo("The API server can only be started with a 'local' " "configuration", err=True) ctx.exit(1) from swh.scheduler.api import server - server.app.scheduler = ctx.obj['scheduler'] server.app.config.update(ctx.obj['config']) if debug is None: debug = ctx.obj['loglevel'] <= logging.DEBUG server.app.run(host, port=port, debug=bool(debug)) @cli.group('task-type') @click.pass_context def task_type(ctx): """Manipulate task types.""" pass @task_type.command('list') @click.option('--verbose', '-v', is_flag=True, default=False, help='Verbose mode') @click.option('--task_type', '-t', multiple=True, default=None, help='List task types of given type') @click.option('--task_name', '-n', multiple=True, default=None, help='List task types of given backend task name') @click.pass_context def list_task_types(ctx, verbose, task_type, task_name): click.echo("Known task types:") if verbose: tmpl = click.style('{type}: ', bold=True) + '''{backend_name} {description} interval: {default_interval} [{min_interval}, {max_interval}] backoff_factor: {backoff_factor} max_queue_length: {max_queue_length} num_retries: {num_retries} retry_delay: {retry_delay} ''' else: tmpl = '{type}:\n {description}' for tasktype in sorted(ctx.obj['scheduler'].get_task_types(), key=lambda x: x['type']): if task_type and tasktype['type'] not in task_type: continue if task_name and tasktype['backend_name'] not in task_name: continue click.echo(tmpl.format(**tasktype)) @task_type.command('add') @click.argument('type', required=1) @click.argument('task-name', required=1) @click.argument('description', required=1) @click.option('--default-interval', '-i', default='90 days', help='Default interval ("90 days" by default)') @click.option('--min-interval', default=None, help='Minimum interval (default interval if not set)') @click.option('--max-interval', '-i', default=None, help='Maximal interval (default interval if not set)') @click.option('--backoff-factor', '-f', type=float, default=1, help='Backoff factor') @click.pass_context def add_task_type(ctx, type, task_name, description, default_interval, min_interval, max_interval, backoff_factor): """Create a new task type """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') task_type = dict( type=type, backend_name=task_name, description=description, default_interval=default_interval, min_interval=min_interval, max_interval=max_interval, backoff_factor=backoff_factor, max_queue_length=None, num_retries=None, retry_delay=None, ) scheduler.create_task_type(task_type) click.echo('OK') @cli.command('updater') @click.option('--verbose/--no-verbose', '-v', default=False, help='Verbose mode') @click.pass_context def updater(ctx, verbose): """Insert tasks in the scheduler from the scheduler-updater's events """ from swh.scheduler.updater.writer import UpdaterWriter UpdaterWriter(**ctx.obj['config']).run() @cli.command('ghtorrent') @click.option('--verbose/--no-verbose', '-v', default=False, help='Verbose mode') @click.pass_context def ghtorrent(ctx, verbose): """Consume events from ghtorrent and write them to cache. """ from swh.scheduler.updater.ghtorrent import GHTorrentConsumer from swh.scheduler.updater.backend import SchedulerUpdaterBackend ght_config = ctx.obj['config'].get('ghtorrent', {}) back_config = ctx.obj['config'].get('scheduler_updater', {}) backend = SchedulerUpdaterBackend(**back_config) GHTorrentConsumer(backend, **ght_config).run() def main(): return cli(auto_envvar_prefix='SWH_SCHEDULER') if __name__ == '__main__': main() diff --git a/swh/scheduler/cli_utils.py b/swh/scheduler/cli_utils.py new file mode 100644 index 0000000..c135a7c --- /dev/null +++ b/swh/scheduler/cli_utils.py @@ -0,0 +1,21 @@ +# Copyright (C) 2019 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + + +def parse_options(options): + """Parses options from a CLI and turns it into Python args and kwargs. + + >>> parse_options([]) + ([], {}) + >>> parse_options(['foo', 'bar']) + (['foo', 'bar'], {}) + >>> parse_options(['foo=bar']) + ([], {'foo': 'bar'}) + >>> parse_options(['foo', 'bar=baz']) + (['foo'], {'bar': 'baz'}) + """ + args = [x for x in options if '=' not in x] + kw = dict(x.split('=', 1) for x in options if '=' in x) + return (args, kw) diff --git a/swh/scheduler/sql/30-swh-schema.sql b/swh/scheduler/sql/30-swh-schema.sql index 5214d2b..eec3c6b 100644 --- a/swh/scheduler/sql/30-swh-schema.sql +++ b/swh/scheduler/sql/30-swh-schema.sql @@ -1,520 +1,102 @@ create table dbversion ( version int primary key, release timestamptz not null, description text not null ); comment on table dbversion is 'Schema update tracking'; insert into dbversion (version, release, description) values (12, now(), 'Work In Progress'); create table task_type ( type text primary key, description text not null, backend_name text not null, default_interval interval, min_interval interval, max_interval interval, backoff_factor float, max_queue_length bigint, num_retries bigint, retry_delay interval ); comment on table task_type is 'Types of schedulable tasks'; comment on column task_type.type is 'Short identifier for the task type'; comment on column task_type.description is 'Human-readable task description'; comment on column task_type.backend_name is 'Name of the task in the job-running backend'; comment on column task_type.default_interval is 'Default interval for newly scheduled tasks'; comment on column task_type.min_interval is 'Minimum interval between two runs of a task'; comment on column task_type.max_interval is 'Maximum interval between two runs of a task'; comment on column task_type.backoff_factor is 'Adjustment factor for the backoff between two task runs'; comment on column task_type.max_queue_length is 'Maximum length of the queue for this type of tasks'; comment on column task_type.num_retries is 'Default number of retries on transient failures'; comment on column task_type.retry_delay is 'Retry delay for the task'; create type task_status as enum ('next_run_not_scheduled', 'next_run_scheduled', 'completed', 'disabled'); comment on type task_status is 'Status of a given task'; create type task_policy as enum ('recurring', 'oneshot'); comment on type task_policy is 'Recurrence policy of the given task'; create type task_priority as enum('high', 'normal', 'low'); comment on type task_priority is 'Priority of the given task'; create table priority_ratio( id task_priority primary key, ratio float not null ); comment on table priority_ratio is 'Oneshot task''s reading ratio per priority'; comment on column priority_ratio.id is 'Task priority id'; comment on column priority_ratio.ratio is 'Percentage of tasks to read per priority'; insert into priority_ratio (id, ratio) values ('high', 0.5); insert into priority_ratio (id, ratio) values ('normal', 0.3); insert into priority_ratio (id, ratio) values ('low', 0.2); create table task ( id bigserial primary key, type text not null references task_type(type), arguments jsonb not null, next_run timestamptz not null, current_interval interval, status task_status not null, policy task_policy not null default 'recurring', retries_left bigint not null default 0, priority task_priority references priority_ratio(id), check (policy <> 'recurring' or current_interval is not null) ); comment on table task is 'Schedule of recurring tasks'; comment on column task.arguments is 'Arguments passed to the underlying job scheduler. ' 'Contains two keys, ''args'' (list) and ''kwargs'' (object).'; comment on column task.next_run is 'The next run of this task should be run on or after that time'; comment on column task.current_interval is 'The interval between two runs of this task, ' 'taking into account the backoff factor'; comment on column task.policy is 'Whether the task is one-shot or recurring'; comment on column task.retries_left is 'The number of "short delay" retries of the task in case of ' 'transient failure'; comment on column task.priority is 'Policy of the given task'; -create index on task(type); -create index on task(next_run); -create index task_args on task using btree ((arguments -> 'args')); -create index task_kwargs on task using gin ((arguments -> 'kwargs')); -create index on task(priority); - create type task_run_status as enum ('scheduled', 'started', 'eventful', 'uneventful', 'failed', 'permfailed', 'lost'); comment on type task_run_status is 'Status of a given task run'; create table task_run ( id bigserial primary key, task bigint not null references task(id), backend_id text, scheduled timestamptz, started timestamptz, ended timestamptz, metadata jsonb, status task_run_status not null default 'scheduled' ); comment on table task_run is 'History of task runs sent to the job-running backend'; comment on column task_run.backend_id is 'id of the task run in the job-running backend'; comment on column task_run.metadata is 'Useful metadata for the given task run. ' 'For instance, the worker that took on the job, ' 'or the logs for the run.'; -create index on task_run(task); -create index on task_run(backend_id); - -create or replace function swh_scheduler_mktemp_task () - returns void - language sql -as $$ - create temporary table tmp_task ( - like task excluding indexes - ) on commit drop; - alter table tmp_task - alter column retries_left drop not null, - drop column id; -$$; - -comment on function swh_scheduler_mktemp_task () is 'Create a temporary table for bulk task creation'; - -create or replace function swh_scheduler_create_tasks_from_temp () - returns setof task - language plpgsql -as $$ -begin - -- update the default values in one go - -- this is separated from the insert/select to avoid too much - -- juggling - update tmp_task t - set current_interval = tt.default_interval, - retries_left = coalesce(retries_left, tt.num_retries, 0) - from task_type tt - where tt.type=t.type; - - insert into task (type, arguments, next_run, status, current_interval, policy, - retries_left, priority) - select type, arguments, next_run, status, current_interval, policy, - retries_left, priority - from tmp_task t - where not exists(select 1 - from task - where type = t.type and - arguments->'args' = t.arguments->'args' and - arguments->'kwargs' = t.arguments->'kwargs' and - policy = t.policy and - priority is not distinct from t.priority and - status = t.status); - - return query - select distinct t.* - from tmp_task tt inner join task t on ( - tt.type = t.type and - tt.arguments->'args' = t.arguments->'args' and - tt.arguments->'kwargs' = t.arguments->'kwargs' and - tt.policy = t.policy and - tt.priority is not distinct from t.priority and - tt.status = t.status - ); -end; -$$; - -comment on function swh_scheduler_create_tasks_from_temp () is 'Create tasks in bulk from the temporary table'; - -create or replace function swh_scheduler_peek_no_priority_tasks (task_type text, ts timestamptz default now(), - num_tasks bigint default NULL) - returns setof task - language sql - stable -as $$ -select * from task - where next_run <= ts - and type = task_type - and status = 'next_run_not_scheduled' - and priority is null - order by next_run - limit num_tasks - for update skip locked; -$$; - -comment on function swh_scheduler_peek_no_priority_tasks (text, timestamptz, bigint) -is 'Retrieve tasks without priority'; - -create or replace function swh_scheduler_nb_priority_tasks(num_tasks_priority bigint, task_priority task_priority) - returns numeric - language sql stable -as $$ - select ceil(num_tasks_priority * (select ratio from priority_ratio where id = task_priority)) :: numeric -$$; - -comment on function swh_scheduler_nb_priority_tasks (bigint, task_priority) -is 'Given a priority task and a total number, compute the number of tasks to read'; - -create or replace function swh_scheduler_peek_tasks_with_priority (task_type text, ts timestamptz default now(), - num_tasks_priority bigint default NULL, - task_priority task_priority default 'normal') - returns setof task - language sql - stable -as $$ - select * - from task t - where t.next_run <= ts - and t.type = task_type - and t.status = 'next_run_not_scheduled' - and t.priority = task_priority - order by t.next_run - limit num_tasks_priority - for update skip locked; -$$; - -comment on function swh_scheduler_peek_tasks_with_priority(text, timestamptz, bigint, task_priority) -is 'Retrieve tasks with a given priority'; - -create or replace function swh_scheduler_peek_priority_tasks (task_type text, ts timestamptz default now(), - num_tasks_priority bigint default NULL) - returns setof task - language plpgsql -as $$ -declare - r record; - count_row bigint; - nb_diff bigint; - nb_high bigint; - nb_normal bigint; - nb_low bigint; -begin - -- expected values to fetch - select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'high') into nb_high; - select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'normal') into nb_normal; - select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'low') into nb_low; - nb_diff := 0; - count_row := 0; - - for r in select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_high, 'high') - loop - count_row := count_row + 1; - return next r; - end loop; - - if count_row < nb_high then - nb_normal := nb_normal + nb_high - count_row; - end if; - - count_row := 0; - for r in select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_normal, 'normal') - loop - count_row := count_row + 1; - return next r; - end loop; - - if count_row < nb_normal then - nb_low := nb_low + nb_normal - count_row; - end if; - - return query select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_low, 'low'); -end -$$; - -comment on function swh_scheduler_peek_priority_tasks(text, timestamptz, bigint) -is 'Retrieve priority tasks'; - -create or replace function swh_scheduler_peek_ready_tasks (task_type text, ts timestamptz default now(), - num_tasks bigint default NULL, num_tasks_priority bigint default NULL) - returns setof task - language plpgsql -as $$ -declare - r record; - count_row bigint; - nb_diff bigint; - nb_tasks bigint; -begin - count_row := 0; - - for r in select * from swh_scheduler_peek_priority_tasks(task_type, ts, num_tasks_priority) - order by priority, next_run - loop - count_row := count_row + 1; - return next r; - end loop; - - if count_row < num_tasks_priority then - nb_tasks := num_tasks + num_tasks_priority - count_row; - else - nb_tasks := num_tasks; - end if; - - for r in select * from swh_scheduler_peek_no_priority_tasks(task_type, ts, nb_tasks) - order by priority, next_run - loop - return next r; - end loop; - - return; -end -$$; - -comment on function swh_scheduler_peek_ready_tasks(text, timestamptz, bigint, bigint) -is 'Retrieve tasks with/without priority in order'; - -create or replace function swh_scheduler_grab_ready_tasks (task_type text, ts timestamptz default now(), - num_tasks bigint default NULL, - num_tasks_priority bigint default NULL) - returns setof task - language sql -as $$ - update task - set status='next_run_scheduled' - from ( - select id from swh_scheduler_peek_ready_tasks(task_type, ts, num_tasks, num_tasks_priority) - ) next_tasks - where task.id = next_tasks.id - returning task.*; -$$; - -comment on function swh_scheduler_grab_ready_tasks (text, timestamptz, bigint, bigint) -is 'Grab tasks ready for scheduling and change their status'; - -create or replace function swh_scheduler_schedule_task_run (task_id bigint, - backend_id text, - metadata jsonb default '{}'::jsonb, - ts timestamptz default now()) - returns task_run - language sql -as $$ - insert into task_run (task, backend_id, metadata, scheduled, status) - values (task_id, backend_id, metadata, ts, 'scheduled') - returning *; -$$; - -create or replace function swh_scheduler_mktemp_task_run () - returns void - language sql -as $$ - create temporary table tmp_task_run ( - like task_run excluding indexes - ) on commit drop; - alter table tmp_task_run - drop column id, - drop column status; -$$; - -comment on function swh_scheduler_mktemp_task_run () is 'Create a temporary table for bulk task run scheduling'; - -create or replace function swh_scheduler_schedule_task_run_from_temp () - returns void - language plpgsql -as $$ -begin - insert into task_run (task, backend_id, metadata, scheduled, status) - select task, backend_id, metadata, scheduled, 'scheduled' - from tmp_task_run; - return; -end; -$$; - -create or replace function swh_scheduler_start_task_run (backend_id text, - metadata jsonb default '{}'::jsonb, - ts timestamptz default now()) - returns task_run - language sql -as $$ - update task_run - set started = ts, - status = 'started', - metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_start_task_run.metadata - where task_run.backend_id = swh_scheduler_start_task_run.backend_id - returning *; -$$; - -create or replace function swh_scheduler_end_task_run (backend_id text, - status task_run_status, - metadata jsonb default '{}'::jsonb, - ts timestamptz default now()) - returns task_run - language sql -as $$ - update task_run - set ended = ts, - status = swh_scheduler_end_task_run.status, - metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_end_task_run.metadata - where task_run.backend_id = swh_scheduler_end_task_run.backend_id - returning *; -$$; - -create type task_record as ( - task_id bigint, - task_policy task_policy, - task_status task_status, - task_run_id bigint, - arguments jsonb, - type text, - backend_id text, - metadata jsonb, - scheduled timestamptz, - started timestamptz, - ended timestamptz, - task_run_status task_run_status -); - -create index task_run_id_asc_idx on task_run(task asc, started asc); - -create or replace function swh_scheduler_task_to_archive( - ts_after timestamptz, ts_before timestamptz, last_id bigint default -1, - lim bigint default 10) - returns setof task_record - language sql stable -as $$ - select t.id as task_id, t.policy as task_policy, - t.status as task_status, tr.id as task_run_id, - t.arguments, t.type, tr.backend_id, tr.metadata, - tr.scheduled, tr.started, tr.ended, tr.status as task_run_status - from task_run tr inner join task t on tr.task=t.id - where ((t.policy = 'oneshot' and t.status in ('completed', 'disabled')) or - (t.policy = 'recurring' and t.status = 'disabled')) and - ((ts_after <= tr.started and tr.started < ts_before) or tr.started is null) and - t.id > last_id - order by tr.task, tr.started - limit lim; -$$; - -comment on function swh_scheduler_task_to_archive (timestamptz, timestamptz, bigint, bigint) is 'Read archivable tasks function'; - -create or replace function swh_scheduler_delete_archived_tasks( - task_ids bigint[], task_run_ids bigint[]) - returns void - language sql -as $$ - -- clean up task_run_ids - delete from task_run where id in (select * from unnest(task_run_ids)); - -- clean up only tasks whose associated task_run are all cleaned up. - -- Remaining tasks will stay there and will be cleaned up when - -- remaining data have been indexed - delete from task - where id in (select t.id - from task t left outer join task_run tr on t.id=tr.task - where t.id in (select * from unnest(task_ids)) - and tr.task is null); -$$; - -comment on function swh_scheduler_delete_archived_tasks(bigint[], bigint[]) is 'Clean up archived tasks function'; - -create or replace function swh_scheduler_update_task_on_task_end () - returns trigger - language plpgsql -as $$ -declare - cur_task task%rowtype; - cur_task_type task_type%rowtype; - adjustment_factor float; - new_interval interval; -begin - select * from task where id = new.task into cur_task; - select * from task_type where type = cur_task.type into cur_task_type; - - case - when new.status = 'permfailed' then - update task - set status = 'disabled' - where id = cur_task.id; - when new.status in ('eventful', 'uneventful') then - case - when cur_task.policy = 'oneshot' then - update task - set status = 'completed' - where id = cur_task.id; - when cur_task.policy = 'recurring' then - if new.status = 'uneventful' then - adjustment_factor := 1/cur_task_type.backoff_factor; - else - adjustment_factor := 1/cur_task_type.backoff_factor; - end if; - new_interval := greatest( - cur_task_type.min_interval, - least( - cur_task_type.max_interval, - adjustment_factor * cur_task.current_interval)); - update task - set status = 'next_run_not_scheduled', - next_run = now() + new_interval, - current_interval = new_interval, - retries_left = coalesce(cur_task_type.num_retries, 0) - where id = cur_task.id; - end case; - else -- new.status in 'failed', 'lost' - if cur_task.retries_left > 0 then - update task - set status = 'next_run_not_scheduled', - next_run = now() + coalesce(cur_task_type.retry_delay, interval '1 hour'), - retries_left = cur_task.retries_left - 1 - where id = cur_task.id; - else -- no retries left - case - when cur_task.policy = 'oneshot' then - update task - set status = 'disabled' - where id = cur_task.id; - when cur_task.policy = 'recurring' then - update task - set status = 'next_run_not_scheduled', - next_run = now() + cur_task.current_interval, - retries_left = coalesce(cur_task_type.num_retries, 0) - where id = cur_task.id; - end case; - end if; -- retries - end case; - return null; -end; -$$; - -create trigger update_task_on_task_end - after update of status on task_run - for each row - when (new.status NOT IN ('scheduled', 'started')) - execute procedure swh_scheduler_update_task_on_task_end (); diff --git a/swh/scheduler/sql/30-swh-schema.sql b/swh/scheduler/sql/40-swh-func.sql similarity index 74% copy from swh/scheduler/sql/30-swh-schema.sql copy to swh/scheduler/sql/40-swh-func.sql index 5214d2b..487e508 100644 --- a/swh/scheduler/sql/30-swh-schema.sql +++ b/swh/scheduler/sql/40-swh-func.sql @@ -1,520 +1,408 @@ -create table dbversion -( - version int primary key, - release timestamptz not null, - description text not null -); - -comment on table dbversion is 'Schema update tracking'; - -insert into dbversion (version, release, description) - values (12, now(), 'Work In Progress'); - -create table task_type ( - type text primary key, - description text not null, - backend_name text not null, - default_interval interval, - min_interval interval, - max_interval interval, - backoff_factor float, - max_queue_length bigint, - num_retries bigint, - retry_delay interval -); - -comment on table task_type is 'Types of schedulable tasks'; -comment on column task_type.type is 'Short identifier for the task type'; -comment on column task_type.description is 'Human-readable task description'; -comment on column task_type.backend_name is 'Name of the task in the job-running backend'; -comment on column task_type.default_interval is 'Default interval for newly scheduled tasks'; -comment on column task_type.min_interval is 'Minimum interval between two runs of a task'; -comment on column task_type.max_interval is 'Maximum interval between two runs of a task'; -comment on column task_type.backoff_factor is 'Adjustment factor for the backoff between two task runs'; -comment on column task_type.max_queue_length is 'Maximum length of the queue for this type of tasks'; -comment on column task_type.num_retries is 'Default number of retries on transient failures'; -comment on column task_type.retry_delay is 'Retry delay for the task'; - -create type task_status as enum ('next_run_not_scheduled', 'next_run_scheduled', 'completed', 'disabled'); -comment on type task_status is 'Status of a given task'; - -create type task_policy as enum ('recurring', 'oneshot'); -comment on type task_policy is 'Recurrence policy of the given task'; - -create type task_priority as enum('high', 'normal', 'low'); -comment on type task_priority is 'Priority of the given task'; - -create table priority_ratio( - id task_priority primary key, - ratio float not null -); - -comment on table priority_ratio is 'Oneshot task''s reading ratio per priority'; -comment on column priority_ratio.id is 'Task priority id'; -comment on column priority_ratio.ratio is 'Percentage of tasks to read per priority'; - -insert into priority_ratio (id, ratio) values ('high', 0.5); -insert into priority_ratio (id, ratio) values ('normal', 0.3); -insert into priority_ratio (id, ratio) values ('low', 0.2); - -create table task ( - id bigserial primary key, - type text not null references task_type(type), - arguments jsonb not null, - next_run timestamptz not null, - current_interval interval, - status task_status not null, - policy task_policy not null default 'recurring', - retries_left bigint not null default 0, - priority task_priority references priority_ratio(id), - check (policy <> 'recurring' or current_interval is not null) -); - -comment on table task is 'Schedule of recurring tasks'; -comment on column task.arguments is 'Arguments passed to the underlying job scheduler. ' - 'Contains two keys, ''args'' (list) and ''kwargs'' (object).'; -comment on column task.next_run is 'The next run of this task should be run on or after that time'; -comment on column task.current_interval is 'The interval between two runs of this task, ' - 'taking into account the backoff factor'; -comment on column task.policy is 'Whether the task is one-shot or recurring'; -comment on column task.retries_left is 'The number of "short delay" retries of the task in case of ' - 'transient failure'; -comment on column task.priority is 'Policy of the given task'; - -create index on task(type); -create index on task(next_run); -create index task_args on task using btree ((arguments -> 'args')); -create index task_kwargs on task using gin ((arguments -> 'kwargs')); -create index on task(priority); - -create type task_run_status as enum ('scheduled', 'started', 'eventful', 'uneventful', 'failed', 'permfailed', 'lost'); -comment on type task_run_status is 'Status of a given task run'; - -create table task_run ( - id bigserial primary key, - task bigint not null references task(id), - backend_id text, - scheduled timestamptz, - started timestamptz, - ended timestamptz, - metadata jsonb, - status task_run_status not null default 'scheduled' -); -comment on table task_run is 'History of task runs sent to the job-running backend'; -comment on column task_run.backend_id is 'id of the task run in the job-running backend'; -comment on column task_run.metadata is 'Useful metadata for the given task run. ' - 'For instance, the worker that took on the job, ' - 'or the logs for the run.'; - -create index on task_run(task); -create index on task_run(backend_id); - create or replace function swh_scheduler_mktemp_task () returns void language sql as $$ create temporary table tmp_task ( like task excluding indexes ) on commit drop; alter table tmp_task alter column retries_left drop not null, drop column id; $$; comment on function swh_scheduler_mktemp_task () is 'Create a temporary table for bulk task creation'; create or replace function swh_scheduler_create_tasks_from_temp () returns setof task language plpgsql as $$ begin -- update the default values in one go -- this is separated from the insert/select to avoid too much -- juggling update tmp_task t set current_interval = tt.default_interval, retries_left = coalesce(retries_left, tt.num_retries, 0) from task_type tt where tt.type=t.type; insert into task (type, arguments, next_run, status, current_interval, policy, retries_left, priority) select type, arguments, next_run, status, current_interval, policy, retries_left, priority from tmp_task t where not exists(select 1 from task where type = t.type and arguments->'args' = t.arguments->'args' and arguments->'kwargs' = t.arguments->'kwargs' and policy = t.policy and priority is not distinct from t.priority and status = t.status); return query select distinct t.* from tmp_task tt inner join task t on ( tt.type = t.type and tt.arguments->'args' = t.arguments->'args' and tt.arguments->'kwargs' = t.arguments->'kwargs' and tt.policy = t.policy and tt.priority is not distinct from t.priority and tt.status = t.status ); end; $$; comment on function swh_scheduler_create_tasks_from_temp () is 'Create tasks in bulk from the temporary table'; create or replace function swh_scheduler_peek_no_priority_tasks (task_type text, ts timestamptz default now(), num_tasks bigint default NULL) returns setof task language sql stable as $$ select * from task where next_run <= ts and type = task_type and status = 'next_run_not_scheduled' and priority is null order by next_run limit num_tasks for update skip locked; $$; comment on function swh_scheduler_peek_no_priority_tasks (text, timestamptz, bigint) is 'Retrieve tasks without priority'; create or replace function swh_scheduler_nb_priority_tasks(num_tasks_priority bigint, task_priority task_priority) returns numeric language sql stable as $$ select ceil(num_tasks_priority * (select ratio from priority_ratio where id = task_priority)) :: numeric $$; comment on function swh_scheduler_nb_priority_tasks (bigint, task_priority) is 'Given a priority task and a total number, compute the number of tasks to read'; create or replace function swh_scheduler_peek_tasks_with_priority (task_type text, ts timestamptz default now(), num_tasks_priority bigint default NULL, task_priority task_priority default 'normal') returns setof task language sql stable as $$ select * from task t where t.next_run <= ts and t.type = task_type and t.status = 'next_run_not_scheduled' and t.priority = task_priority order by t.next_run limit num_tasks_priority for update skip locked; $$; comment on function swh_scheduler_peek_tasks_with_priority(text, timestamptz, bigint, task_priority) is 'Retrieve tasks with a given priority'; create or replace function swh_scheduler_peek_priority_tasks (task_type text, ts timestamptz default now(), num_tasks_priority bigint default NULL) returns setof task language plpgsql as $$ declare r record; count_row bigint; nb_diff bigint; nb_high bigint; nb_normal bigint; nb_low bigint; begin -- expected values to fetch select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'high') into nb_high; select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'normal') into nb_normal; select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'low') into nb_low; nb_diff := 0; count_row := 0; for r in select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_high, 'high') loop count_row := count_row + 1; return next r; end loop; if count_row < nb_high then nb_normal := nb_normal + nb_high - count_row; end if; count_row := 0; for r in select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_normal, 'normal') loop count_row := count_row + 1; return next r; end loop; if count_row < nb_normal then nb_low := nb_low + nb_normal - count_row; end if; return query select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_low, 'low'); end $$; comment on function swh_scheduler_peek_priority_tasks(text, timestamptz, bigint) is 'Retrieve priority tasks'; create or replace function swh_scheduler_peek_ready_tasks (task_type text, ts timestamptz default now(), num_tasks bigint default NULL, num_tasks_priority bigint default NULL) returns setof task language plpgsql as $$ declare r record; count_row bigint; nb_diff bigint; nb_tasks bigint; begin count_row := 0; for r in select * from swh_scheduler_peek_priority_tasks(task_type, ts, num_tasks_priority) order by priority, next_run loop count_row := count_row + 1; return next r; end loop; if count_row < num_tasks_priority then nb_tasks := num_tasks + num_tasks_priority - count_row; else nb_tasks := num_tasks; end if; for r in select * from swh_scheduler_peek_no_priority_tasks(task_type, ts, nb_tasks) order by priority, next_run loop return next r; end loop; return; end $$; comment on function swh_scheduler_peek_ready_tasks(text, timestamptz, bigint, bigint) is 'Retrieve tasks with/without priority in order'; create or replace function swh_scheduler_grab_ready_tasks (task_type text, ts timestamptz default now(), num_tasks bigint default NULL, num_tasks_priority bigint default NULL) returns setof task language sql as $$ update task set status='next_run_scheduled' from ( select id from swh_scheduler_peek_ready_tasks(task_type, ts, num_tasks, num_tasks_priority) ) next_tasks where task.id = next_tasks.id returning task.*; $$; comment on function swh_scheduler_grab_ready_tasks (text, timestamptz, bigint, bigint) is 'Grab tasks ready for scheduling and change their status'; create or replace function swh_scheduler_schedule_task_run (task_id bigint, backend_id text, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ insert into task_run (task, backend_id, metadata, scheduled, status) values (task_id, backend_id, metadata, ts, 'scheduled') returning *; $$; create or replace function swh_scheduler_mktemp_task_run () returns void language sql as $$ create temporary table tmp_task_run ( like task_run excluding indexes ) on commit drop; alter table tmp_task_run drop column id, drop column status; $$; comment on function swh_scheduler_mktemp_task_run () is 'Create a temporary table for bulk task run scheduling'; create or replace function swh_scheduler_schedule_task_run_from_temp () returns void language plpgsql as $$ begin insert into task_run (task, backend_id, metadata, scheduled, status) select task, backend_id, metadata, scheduled, 'scheduled' from tmp_task_run; return; end; $$; create or replace function swh_scheduler_start_task_run (backend_id text, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ update task_run set started = ts, status = 'started', metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_start_task_run.metadata where task_run.backend_id = swh_scheduler_start_task_run.backend_id returning *; $$; create or replace function swh_scheduler_end_task_run (backend_id text, status task_run_status, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ update task_run set ended = ts, status = swh_scheduler_end_task_run.status, metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_end_task_run.metadata where task_run.backend_id = swh_scheduler_end_task_run.backend_id returning *; $$; create type task_record as ( task_id bigint, task_policy task_policy, task_status task_status, task_run_id bigint, arguments jsonb, type text, backend_id text, metadata jsonb, scheduled timestamptz, started timestamptz, ended timestamptz, task_run_status task_run_status ); -create index task_run_id_asc_idx on task_run(task asc, started asc); - create or replace function swh_scheduler_task_to_archive( ts_after timestamptz, ts_before timestamptz, last_id bigint default -1, lim bigint default 10) returns setof task_record language sql stable as $$ select t.id as task_id, t.policy as task_policy, t.status as task_status, tr.id as task_run_id, t.arguments, t.type, tr.backend_id, tr.metadata, tr.scheduled, tr.started, tr.ended, tr.status as task_run_status from task_run tr inner join task t on tr.task=t.id where ((t.policy = 'oneshot' and t.status in ('completed', 'disabled')) or (t.policy = 'recurring' and t.status = 'disabled')) and ((ts_after <= tr.started and tr.started < ts_before) or tr.started is null) and t.id > last_id order by tr.task, tr.started limit lim; $$; comment on function swh_scheduler_task_to_archive (timestamptz, timestamptz, bigint, bigint) is 'Read archivable tasks function'; create or replace function swh_scheduler_delete_archived_tasks( task_ids bigint[], task_run_ids bigint[]) returns void language sql as $$ -- clean up task_run_ids delete from task_run where id in (select * from unnest(task_run_ids)); -- clean up only tasks whose associated task_run are all cleaned up. -- Remaining tasks will stay there and will be cleaned up when -- remaining data have been indexed delete from task where id in (select t.id from task t left outer join task_run tr on t.id=tr.task where t.id in (select * from unnest(task_ids)) and tr.task is null); $$; comment on function swh_scheduler_delete_archived_tasks(bigint[], bigint[]) is 'Clean up archived tasks function'; create or replace function swh_scheduler_update_task_on_task_end () returns trigger language plpgsql as $$ declare cur_task task%rowtype; cur_task_type task_type%rowtype; adjustment_factor float; new_interval interval; begin select * from task where id = new.task into cur_task; select * from task_type where type = cur_task.type into cur_task_type; case when new.status = 'permfailed' then update task set status = 'disabled' where id = cur_task.id; when new.status in ('eventful', 'uneventful') then case when cur_task.policy = 'oneshot' then update task set status = 'completed' where id = cur_task.id; when cur_task.policy = 'recurring' then if new.status = 'uneventful' then adjustment_factor := 1/cur_task_type.backoff_factor; else adjustment_factor := 1/cur_task_type.backoff_factor; end if; new_interval := greatest( cur_task_type.min_interval, least( cur_task_type.max_interval, adjustment_factor * cur_task.current_interval)); update task set status = 'next_run_not_scheduled', next_run = now() + new_interval, current_interval = new_interval, retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; else -- new.status in 'failed', 'lost' if cur_task.retries_left > 0 then update task set status = 'next_run_not_scheduled', next_run = now() + coalesce(cur_task_type.retry_delay, interval '1 hour'), retries_left = cur_task.retries_left - 1 where id = cur_task.id; else -- no retries left case when cur_task.policy = 'oneshot' then update task set status = 'disabled' where id = cur_task.id; when cur_task.policy = 'recurring' then update task set status = 'next_run_not_scheduled', next_run = now() + cur_task.current_interval, retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; end if; -- retries end case; return null; end; $$; create trigger update_task_on_task_end after update of status on task_run for each row when (new.status NOT IN ('scheduled', 'started')) execute procedure swh_scheduler_update_task_on_task_end (); + diff --git a/swh/scheduler/sql/40-swh-data.sql b/swh/scheduler/sql/50-swh-data.sql similarity index 100% rename from swh/scheduler/sql/40-swh-data.sql rename to swh/scheduler/sql/50-swh-data.sql diff --git a/swh/scheduler/sql/60-swh-indexes.sql b/swh/scheduler/sql/60-swh-indexes.sql new file mode 100644 index 0000000..efb58a9 --- /dev/null +++ b/swh/scheduler/sql/60-swh-indexes.sql @@ -0,0 +1,11 @@ +create index on task(type); +create index on task(next_run); +create index task_args on task using btree ((arguments -> 'args')); +create index task_kwargs on task using gin ((arguments -> 'kwargs')); +create index on task(priority); + +create index on task_run(task); +create index on task_run(backend_id); + +create index task_run_id_asc_idx on task_run(task asc, started asc); + diff --git a/swh/scheduler/tests/conftest.py b/swh/scheduler/tests/conftest.py index d761f9a..1cfde95 100644 --- a/swh/scheduler/tests/conftest.py +++ b/swh/scheduler/tests/conftest.py @@ -1,93 +1,93 @@ import os import pytest import glob from datetime import timedelta from swh.core.utils import numfile_sortkey as sortkey from swh.scheduler import get_scheduler from swh.scheduler.tests import SQL_DIR # make sure we are not fooled by CELERY_ config environment vars for var in [x for x in os.environ.keys() if x.startswith('CELERY')]: os.environ.pop(var) import swh.scheduler.celery_backend.config # noqa # this import is needed here to enforce creation of the celery current app # BEFORE the swh_app fixture is called, otherwise the Celery app instance from # celery_backend.config becomes the celery.current_app DUMP_FILES = os.path.join(SQL_DIR, '*.sql') # celery tasks for testing purpose; tasks themselves should be -# in swh/scheduler/tests/celery_tasks.py +# in swh/scheduler/tests/tasks.py TASK_NAMES = ['ping', 'multiping', 'add', 'error'] @pytest.fixture(scope='session') def celery_enable_logging(): return True @pytest.fixture(scope='session') def celery_includes(): return [ 'swh.scheduler.tests.tasks', ] @pytest.fixture(scope='session') def celery_parameters(): return { 'task_cls': 'swh.scheduler.task:SWHTask', } @pytest.fixture(scope='session') def celery_config(): return { 'accept_content': ['application/x-msgpack', 'application/json'], 'task_serializer': 'msgpack', 'result_serializer': 'msgpack', } # override the celery_session_app fixture to monkeypatch the 'main' # swh.scheduler.celery_backend.config.app Celery application # with the test application. @pytest.fixture(scope='session') def swh_app(celery_session_app): swh.scheduler.celery_backend.config.app = celery_session_app yield celery_session_app @pytest.fixture def swh_scheduler(request, postgresql_proc, postgresql): scheduler_config = { 'db': 'postgresql://{user}@{host}:{port}/{dbname}'.format( host=postgresql_proc.host, port=postgresql_proc.port, user='postgres', dbname='tests') } all_dump_files = sorted(glob.glob(DUMP_FILES), key=sortkey) cursor = postgresql.cursor() for fname in all_dump_files: with open(fname) as fobj: cursor.execute(fobj.read()) postgresql.commit() scheduler = get_scheduler('local', scheduler_config) for taskname in TASK_NAMES: scheduler.create_task_type({ 'type': 'swh-test-{}'.format(taskname), 'description': 'The {} testing task'.format(taskname), 'backend_name': 'swh.scheduler.tests.tasks.{}'.format(taskname), 'default_interval': timedelta(days=1), 'min_interval': timedelta(hours=6), 'max_interval': timedelta(days=12), }) return scheduler diff --git a/swh/scheduler/tests/test_cli.py b/swh/scheduler/tests/test_cli.py new file mode 100644 index 0000000..7e2a819 --- /dev/null +++ b/swh/scheduler/tests/test_cli.py @@ -0,0 +1,191 @@ +# Copyright (C) 2019 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import datetime +import re +import tempfile +from unittest.mock import patch + +from click.testing import CliRunner + +from swh.scheduler.cli import cli +from swh.scheduler.utils import create_task_dict + + +CLI_CONFIG = ''' +scheduler: + cls: foo + args: {} +''' + + +def invoke(scheduler, catch_exceptions, args): + runner = CliRunner() + with patch('swh.scheduler.cli.get_scheduler') as get_scheduler_mock, \ + tempfile.NamedTemporaryFile('a', suffix='.yml') as config_fd: + config_fd.write(CLI_CONFIG) + config_fd.seek(0) + get_scheduler_mock.return_value = scheduler + result = runner.invoke(cli, ['-C' + config_fd.name] + args) + if not catch_exceptions and result.exception: + print(result.output) + raise result.exception + return result + + +def test_schedule_tasks(swh_scheduler): + csv_data = ( + b'swh-test-ping;[["arg1", "arg2"]];{"key": "value"};' + + datetime.datetime.utcnow().isoformat().encode() + b'\n' + + b'swh-test-ping;[["arg3", "arg4"]];{"key": "value"};' + + datetime.datetime.utcnow().isoformat().encode() + b'\n') + with tempfile.NamedTemporaryFile(suffix='.csv') as csv_fd: + csv_fd.write(csv_data) + csv_fd.seek(0) + result = invoke(swh_scheduler, False, [ + 'task', 'schedule', + '-d', ';', + csv_fd.name + ]) + expected = r''' +\[INFO\] swh.core.config -- Loading config file .* +Created 2 tasks + +Task 1 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: recurring + Args: + \['arg1', 'arg2'\] + Keyword args: + key: value + +Task 2 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: recurring + Args: + \['arg3', 'arg4'\] + Keyword args: + key: value + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_schedule_tasks_columns(swh_scheduler): + with tempfile.NamedTemporaryFile(suffix='.csv') as csv_fd: + csv_fd.write( + b'swh-test-ping;oneshot;["arg1", "arg2"];{"key": "value"}\n') + csv_fd.seek(0) + result = invoke(swh_scheduler, False, [ + 'task', 'schedule', + '-c', 'type', '-c', 'policy', '-c', 'args', '-c', 'kwargs', + '-d', ';', + csv_fd.name + ]) + expected = r''' +\[INFO\] swh.core.config -- Loading config file .* +Created 1 tasks + +Task 1 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Args: + arg1 + arg2 + Keyword args: + key: value + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_schedule_task(swh_scheduler): + result = invoke(swh_scheduler, False, [ + 'task', 'add', + 'swh-test-ping', 'arg1', 'arg2', 'key=value', + ]) + expected = r''' +\[INFO\] swh.core.config -- Loading config file .* +Created 1 tasks + +Task 1 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: recurring + Args: + arg1 + arg2 + Keyword args: + key: value + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_list_pending_tasks_none(swh_scheduler): + result = invoke(swh_scheduler, False, [ + 'task', 'list-pending', 'swh-test-ping', + ]) + + expected = r''' +\[INFO\] swh.core.config -- Loading config file .* +Found 0 swh-test-ping tasks + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_list_pending_tasks_one(swh_scheduler): + task = create_task_dict('swh-test-ping', 'oneshot', key='value') + swh_scheduler.create_tasks([task]) + + result = invoke(swh_scheduler, False, [ + 'task', 'list-pending', 'swh-test-ping', + ]) + + expected = r''' +\[INFO\] swh.core.config -- Loading config file .* +Found 1 swh-test-ping tasks + +Task 1 + Next run: just now \(.*\) + Interval: 1 day, 0:00:00 + Type: swh-test-ping + Policy: oneshot + Args: + Keyword args: + key: value + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output + + +def test_list_pending_tasks_one_filter(swh_scheduler): + task = create_task_dict('swh-test-multiping', 'oneshot', key='value') + swh_scheduler.create_tasks([task]) + + result = invoke(swh_scheduler, False, [ + 'task', 'list-pending', 'swh-test-ping', + ]) + + expected = r''' +\[INFO\] swh.core.config -- Loading config file .* +Found 0 swh-test-ping tasks + +'''.lstrip() + assert result.exit_code == 0, result.output + assert re.fullmatch(expected, result.output, re.MULTILINE), result.output diff --git a/swh/scheduler/tests/test_server.py b/swh/scheduler/tests/test_server.py new file mode 100644 index 0000000..4d3ea5f --- /dev/null +++ b/swh/scheduler/tests/test_server.py @@ -0,0 +1,133 @@ +# Copyright (C) 2019 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import copy +import pytest +import yaml + +from swh.scheduler.api.server import load_and_check_config + + +def prepare_config_file(tmpdir, content, name='config.yml'): + """Prepare configuration file in `$tmpdir/name` with content `content`. + + Args: + tmpdir (LocalPath): root directory + content (str/dict): Content of the file either as string or as a dict. + If a dict, converts the dict into a yaml string. + name (str): configuration filename + + Returns + path (str) of the configuration file prepared. + + """ + config_path = tmpdir / name + if isinstance(content, dict): # convert if needed + content = yaml.dump(content) + config_path.write_text(content, encoding='utf-8') + # pytest on python3.5 does not support LocalPath manipulation, so + # convert path to string + return str(config_path) + + +def test_load_and_check_config_no_configuration(): + """Inexistant configuration files raises""" + with pytest.raises(EnvironmentError) as e: + load_and_check_config(None) + + assert e.value.args[0] == 'Configuration file must be defined' + + config_path = '/some/inexistant/config.yml' + with pytest.raises(FileNotFoundError) as e: + load_and_check_config(config_path) + + assert e.value.args[0] == 'Configuration file %s does not exist' % ( + config_path, ) + + +def test_load_and_check_config_wrong_configuration(tmpdir): + """Wrong configuration raises""" + config_path = prepare_config_file(tmpdir, 'something: useless') + with pytest.raises(KeyError) as e: + load_and_check_config(config_path) + + assert e.value.args[0] == 'Missing \'%scheduler\' configuration' + + +def test_load_and_check_config_remote_config_local_type_raise(tmpdir): + """'local' configuration without 'local' storage raises""" + config = { + 'scheduler': { + 'cls': 'remote', + 'args': {} + } + } + config_path = prepare_config_file(tmpdir, config) + with pytest.raises(ValueError) as e: + load_and_check_config(config_path, type='local') + + assert ( + e.value.args[0] == + "The scheduler backend can only be started with a 'local'" + " configuration" + ) + + +def test_load_and_check_config_local_incomplete_configuration(tmpdir): + """Incomplete 'local' configuration should raise""" + config = { + 'scheduler': { + 'cls': 'local', + 'args': { + 'db': 'database', + 'something': 'needed-for-test', + } + } + } + + for key in ['db', 'args']: + c = copy.deepcopy(config) + if key == 'db': + source = c['scheduler']['args'] + else: + source = c['scheduler'] + source.pop(key) + config_path = prepare_config_file(tmpdir, c) + with pytest.raises(KeyError) as e: + load_and_check_config(config_path) + + assert ( + e.value.args[0] == + "Invalid configuration; missing '%s' config entry" % key + ) + + +def test_load_and_check_config_local_config_fine(tmpdir): + """Local configuration is fine""" + config = { + 'scheduler': { + 'cls': 'local', + 'args': { + 'db': 'db', + } + } + } + config_path = prepare_config_file(tmpdir, config) + cfg = load_and_check_config(config_path, type='local') + assert cfg == config + + +def test_load_and_check_config_remote_config_fine(tmpdir): + """'Remote configuration is fine""" + config = { + 'scheduler': { + 'cls': 'remote', + 'args': {} + } + } + config_path = prepare_config_file(tmpdir, config) + cfg = load_and_check_config(config_path, type='any') + + assert cfg == config diff --git a/version.txt b/version.txt index d15a069..3ecd929 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.48-0-g1d4982a \ No newline at end of file +v0.0.49-0-gdc6afe4 \ No newline at end of file