diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 380c658..69b3349 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,46 +1,40 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v2.4.0 hooks: - id: trailing-whitespace - id: flake8 - id: check-json - id: check-yaml - repo: https://github.com/codespell-project/codespell rev: v1.16.0 hooks: - id: codespell - repo: local hooks: - id: mypy name: mypy entry: mypy args: [swh] pass_filenames: false language: system types: [python] +- repo: https://github.com/python/black + rev: 19.10b0 + hooks: + - id: black + # unfortunately, we are far from being able to enable this... # - repo: https://github.com/PyCQA/pydocstyle.git # rev: 4.0.0 # hooks: # - id: pydocstyle # name: pydocstyle # description: pydocstyle is a static analysis tool for checking compliance with Python docstring conventions. # entry: pydocstyle --convention=google # language: python # types: [python] -# black requires py3.6+ -#- repo: https://github.com/python/black -# rev: 19.3b0 -# hooks: -# - id: black -# language_version: python3 -#- repo: https://github.com/asottile/blacken-docs -# rev: v1.0.0-1 -# hooks: -# - id: blacken-docs -# additional_dependencies: [black==19.3b0] diff --git a/bin/swh-worker-control b/bin/swh-worker-control index 15516f7..b6ff4e7 100755 --- a/bin/swh-worker-control +++ b/bin/swh-worker-control @@ -1,268 +1,284 @@ #!/usr/bin/env python3 # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from fnmatch import fnmatch from operator import itemgetter import os import sys import click def list_remote_workers(inspect): ping_replies = inspect.ping() if not ping_replies: return {} workers = list(sorted(ping_replies)) ret = {} for worker_name in workers: - if not worker_name.startswith('celery@'): - print('Unsupported worker: %s' % worker_name, file=sys.stderr) + if not worker_name.startswith("celery@"): + print("Unsupported worker: %s" % worker_name, file=sys.stderr) continue - type, host = worker_name[len('celery@'):].split('.', 1) + type, host = worker_name[len("celery@") :].split(".", 1) worker = { - 'name': worker_name, - 'host': host, - 'type': type, + "name": worker_name, + "host": host, + "type": type, } ret[worker_name] = worker return ret def make_filters(filter_host, filter_type): """Parse the filters and create test functions""" def include(field, value): def filter(worker, field=field, value=value): return fnmatch(worker[field], value) + return filter def exclude(field, value): def filter(worker, field=field, value=value): return not fnmatch(worker[field], value) + return filter filters = [] for host in filter_host: - if host.startswith('-'): - filters.append(exclude('host', host[1:])) + if host.startswith("-"): + filters.append(exclude("host", host[1:])) else: - filters.append(include('host', host)) + filters.append(include("host", host)) for type_ in filter_type: - if type_.startswith('-'): - filters.append(exclude('type', type_[1:])) + if type_.startswith("-"): + filters.append(exclude("type", type_[1:])) else: - filters.append(include('type', type_)) + filters.append(include("type", type_)) return filters def filter_workers(workers, filters): """Filter workers according to the set criteria""" - return {name: worker - for name, worker in workers.items() - if all(check(worker) for check in filters)} + return { + name: worker + for name, worker in workers.items() + if all(check(worker) for check in filters) + } def get_clock_offsets(workers, inspect): """Add a clock_offset entry for each worker""" - err_msg = 'Could not get monotonic clock for {worker}' + err_msg = "Could not get monotonic clock for {worker}" t = datetime.datetime.now(tz=datetime.timezone.utc) - for worker, clock in inspect._request('monotonic').items(): - monotonic = clock.get('monotonic') + for worker, clock in inspect._request("monotonic").items(): + monotonic = clock.get("monotonic") if monotonic is None: monotonic = 0 click.echo(err_msg.format(worker=worker), err=True) dt = datetime.timedelta(seconds=monotonic) - workers[worker]['clock_offset'] = t - dt + workers[worker]["clock_offset"] = t - dt def worker_to_wallclock(worker, monotonic): """Convert a monotonic timestamp from a worker to a wall clock time""" dt = datetime.timedelta(seconds=monotonic) - return worker['clock_offset'] + dt + return worker["clock_offset"] + dt @click.group() -@click.option('--instance-config', metavar='CONFIG', default=None, - help='Use this worker instance configuration') -@click.option('--host', metavar='HOSTNAME_FILTER', multiple=True, - help='Filter by hostname') -@click.option('--type', metavar='WORKER_TYPE_FILTER', multiple=True, - help='Filter by worker type') -@click.option('--timeout', metavar='TIMEOUT', type=float, default=1.0, - help='Timeout for remote control communication') -@click.option('--debug/--no-debug', default=False, help='Turn on debugging') +@click.option( + "--instance-config", + metavar="CONFIG", + default=None, + help="Use this worker instance configuration", +) +@click.option( + "--host", metavar="HOSTNAME_FILTER", multiple=True, help="Filter by hostname" +) +@click.option( + "--type", metavar="WORKER_TYPE_FILTER", multiple=True, help="Filter by worker type" +) +@click.option( + "--timeout", + metavar="TIMEOUT", + type=float, + default=1.0, + help="Timeout for remote control communication", +) +@click.option("--debug/--no-debug", default=False, help="Turn on debugging") @click.pass_context def cli(ctx, debug, timeout, instance_config, host, type): """Manage the Software Heritage workers Filters support globs; a filter starting with a "-" excludes the corresponding values. """ if instance_config: - os.environ['SWH_WORKER_INSTANCE'] = instance_config + os.environ["SWH_WORKER_INSTANCE"] = instance_config from swh.scheduler.celery_backend.config import app + full_inspect = app.control.inspect(timeout=timeout) workers = filter_workers( - list_remote_workers(full_inspect), - make_filters(host, type) + list_remote_workers(full_inspect), make_filters(host, type) ) - ctx.obj['workers'] = workers + ctx.obj["workers"] = workers destination = list(workers) - inspect = app.control.inspect(destination=destination, - timeout=timeout) - ctx.obj['inspect'] = inspect + inspect = app.control.inspect(destination=destination, timeout=timeout) + ctx.obj["inspect"] = inspect get_clock_offsets(workers, inspect) - ctx.obj['control'] = app.control - ctx.obj['destination'] = destination - ctx.obj['timeout'] = timeout - ctx.obj['debug'] = debug + ctx.obj["control"] = app.control + ctx.obj["destination"] = destination + ctx.obj["timeout"] = timeout + ctx.obj["debug"] = debug @cli.command() @click.pass_context def list_workers(ctx): """List the currently running workers""" - workers = ctx.obj['workers'] + workers = ctx.obj["workers"] for worker_name, worker in sorted(workers.items()): click.echo("{type} alive on {host}".format(**worker)) if not workers: sys.exit(2) @cli.command() @click.pass_context def list_tasks(ctx): """List the tasks currently running on workers""" - task_template = ('{worker} {name}' - '[{id} ' - 'started={started:%Y-%m-%mT%H:%M:%S} ' - 'pid={worker_pid}] {args} {kwargs}') - inspect = ctx.obj['inspect'] - workers = ctx.obj['workers'] + task_template = ( + "{worker} {name}" + "[{id} " + "started={started:%Y-%m-%mT%H:%M:%S} " + "pid={worker_pid}] {args} {kwargs}" + ) + inspect = ctx.obj["inspect"] + workers = ctx.obj["workers"] active = inspect.active() if not active: - click.echo('No reply from workers', err=True) + click.echo("No reply from workers", err=True) sys.exit(2) has_tasks = False for worker_name, tasks in sorted(active.items()): worker = workers[worker_name] if not tasks: click.echo("No active tasks on {name}".format(**worker), err=True) - for task in sorted(tasks, key=itemgetter('time_start')): - task['started'] = worker_to_wallclock(worker, task['time_start']) + for task in sorted(tasks, key=itemgetter("time_start")): + task["started"] = worker_to_wallclock(worker, task["time_start"]) click.echo(task_template.format(worker=worker_name, **task)) has_tasks = True if not has_tasks: sys.exit(2) @cli.command() @click.pass_context def list_queues(ctx): """List all the queues currently enabled on the workers""" - inspect = ctx.obj['inspect'] + inspect = ctx.obj["inspect"] active = inspect.active_queues() if not active: - click.echo('No reply from workers', err=True) + click.echo("No reply from workers", err=True) sys.exit(2) has_queues = False for worker_name, queues in sorted(active.items()): - queues = sorted(queue['name'] for queue in queues) + queues = sorted(queue["name"] for queue in queues) if queues: - click.echo('{worker} {queues}'.format(worker=worker_name, - queues=' '.join(queues))) + click.echo( + "{worker} {queues}".format(worker=worker_name, queues=" ".join(queues)) + ) has_queues = True else: - click.echo('No queues for {worker}'.format(worker=worker_name), - err=True) + click.echo("No queues for {worker}".format(worker=worker_name), err=True) if not has_queues: sys.exit(2) @cli.command() -@click.option('--noop', is_flag=True, default=False, help='Do not proceed') -@click.argument('queues', nargs=-1) +@click.option("--noop", is_flag=True, default=False, help="Do not proceed") +@click.argument("queues", nargs=-1) @click.pass_context def remove_queues(ctx, noop, queues): """Cancel the queue for the given workers""" - msg_template = 'Canceling queue {queue} on worker {worker}{noop}' + msg_template = "Canceling queue {queue} on worker {worker}{noop}" - inspect = ctx.obj['inspect'] - control = ctx.obj['control'] - timeout = ctx.obj['timeout'] + inspect = ctx.obj["inspect"] + control = ctx.obj["control"] + timeout = ctx.obj["timeout"] active = inspect.active_queues() if not queues: - queues = ['*'] + queues = ["*"] if not active: - click.echo('No reply from workers', err=True) + click.echo("No reply from workers", err=True) sys.exit(2) for worker, active_queues in sorted(active.items()): - for queue in sorted(active_queues, key=itemgetter('name')): - if any(fnmatch(queue['name'], name) for name in queues): - msg = msg_template.format(queue=queue['name'], worker=worker, - noop=' (noop)' if noop else '') + for queue in sorted(active_queues, key=itemgetter("name")): + if any(fnmatch(queue["name"], name) for name in queues): + msg = msg_template.format( + queue=queue["name"], worker=worker, noop=" (noop)" if noop else "" + ) click.echo(msg, err=True) if not noop: - control.cancel_consumer(queue['name'], - destination=[worker], - timeout=timeout) + control.cancel_consumer( + queue["name"], destination=[worker], timeout=timeout + ) @cli.command() -@click.option('--noop', is_flag=True, default=False, help='Do not proceed') -@click.argument('queues', nargs=-1) +@click.option("--noop", is_flag=True, default=False, help="Do not proceed") +@click.argument("queues", nargs=-1) @click.pass_context def add_queues(ctx, noop, queues): """Start the queue for the given workers""" - msg_template = 'Starting queue {queue} on worker {worker}{noop}' + msg_template = "Starting queue {queue} on worker {worker}{noop}" - control = ctx.obj['control'] - timeout = ctx.obj['timeout'] - workers = ctx.obj['workers'] + control = ctx.obj["control"] + timeout = ctx.obj["timeout"] + workers = ctx.obj["workers"] if not workers: - click.echo('No reply from workers', err=True) + click.echo("No reply from workers", err=True) sys.exit(2) for worker in sorted(workers): for queue in queues: - msg = msg_template.format(queue=queue, worker=worker, - noop=' (noop)' if noop else '') + msg = msg_template.format( + queue=queue, worker=worker, noop=" (noop)" if noop else "" + ) click.echo(msg, err=True) if not noop: - ret = control.add_consumer(queue, - destination=[worker], - timeout=timeout) + ret = control.add_consumer(queue, destination=[worker], timeout=timeout) print(ret) -if __name__ == '__main__': +if __name__ == "__main__": cli(obj={}) diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..8d79b7e --- /dev/null +++ b/setup.cfg @@ -0,0 +1,6 @@ +[flake8] +# E203: whitespaces before ':' +# E231: missing whitespace after ',' +# W503: line break before binary operator +ignore = E203,E231,W503 +max-line-length = 88 diff --git a/setup.py b/setup.py index b3829b4..d3f3d49 100755 --- a/setup.py +++ b/setup.py @@ -1,71 +1,71 @@ #!/usr/bin/env python3 # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from setuptools import setup, find_packages from os import path from io import open here = path.abspath(path.dirname(__file__)) # Get the long description from the README file -with open(path.join(here, 'README.md'), encoding='utf-8') as f: +with open(path.join(here, "README.md"), encoding="utf-8") as f: long_description = f.read() def parse_requirements(name=None): if name: - reqf = 'requirements-%s.txt' % name + reqf = "requirements-%s.txt" % name else: - reqf = 'requirements.txt' + reqf = "requirements.txt" requirements = [] if not path.exists(reqf): return requirements with open(reqf) as f: for line in f.readlines(): line = line.strip() - if not line or line.startswith('#'): + if not line or line.startswith("#"): continue requirements.append(line) return requirements setup( - name='swh.scheduler', - description='Software Heritage Scheduler', + name="swh.scheduler", + description="Software Heritage Scheduler", long_description=long_description, - long_description_content_type='text/markdown', - author='Software Heritage developers', - author_email='swh-devel@inria.fr', - url='https://forge.softwareheritage.org/diffusion/DSCH/', + long_description_content_type="text/markdown", + author="Software Heritage developers", + author_email="swh-devel@inria.fr", + url="https://forge.softwareheritage.org/diffusion/DSCH/", packages=find_packages(), - scripts=['bin/swh-worker-control'], - setup_requires=['vcversioner'], - install_requires=parse_requirements() + parse_requirements('swh'), - extras_require={'testing': parse_requirements('test')}, + scripts=["bin/swh-worker-control"], + setup_requires=["vcversioner"], + install_requires=parse_requirements() + parse_requirements("swh"), + extras_require={"testing": parse_requirements("test")}, vcversioner={}, include_package_data=True, - entry_points=''' + entry_points=""" [console_scripts] swh-scheduler=swh.scheduler.cli:main [swh.cli.subcommands] scheduler=swh.scheduler.cli:cli - ''', + """, classifiers=[ "Programming Language :: Python :: 3", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Operating System :: OS Independent", "Development Status :: 5 - Production/Stable", ], project_urls={ - 'Bug Reports': 'https://forge.softwareheritage.org/maniphest', - 'Funding': 'https://www.softwareheritage.org/donate', - 'Source': 'https://forge.softwareheritage.org/source/swh-scheduler', + "Bug Reports": "https://forge.softwareheritage.org/maniphest", + "Funding": "https://www.softwareheritage.org/donate", + "Source": "https://forge.softwareheritage.org/source/swh-scheduler", }, ) diff --git a/swh/scheduler/__init__.py b/swh/scheduler/__init__.py index 371324e..a0b72f2 100644 --- a/swh/scheduler/__init__.py +++ b/swh/scheduler/__init__.py @@ -1,71 +1,68 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Any, Dict # Percentage of tasks with priority to schedule PRIORITY_SLOT = 0.6 DEFAULT_CONFIG = { - 'scheduler': ('dict', { - 'cls': 'local', - 'args': { - 'db': 'dbname=softwareheritage-scheduler-dev', - }, - }) + "scheduler": ( + "dict", + {"cls": "local", "args": {"db": "dbname=softwareheritage-scheduler-dev",},}, + ) } # current configuration. To be set by the config loading mechanism CONFIG = {} # type: Dict[str, Any] def compute_nb_tasks_from(num_tasks): """Compute and returns the tuple, number of tasks without priority, number of tasks with priority. Args: num_tasks (int): Returns: tuple number of tasks without priority (int), number of tasks with priority (int) """ if not num_tasks: return None, None - return (int((1 - PRIORITY_SLOT) * num_tasks), - int(PRIORITY_SLOT * num_tasks)) + return (int((1 - PRIORITY_SLOT) * num_tasks), int(PRIORITY_SLOT * num_tasks)) def get_scheduler(cls, args={}): """ Get a scheduler object of class `scheduler_class` with arguments `scheduler_args`. Args: scheduler (dict): dictionary with keys: cls (str): scheduler's class, either 'local' or 'remote' args (dict): dictionary with keys, default to empty. Returns: an instance of swh.scheduler, either local or remote: local: swh.scheduler.backend.SchedulerBackend remote: swh.scheduler.api.client.RemoteScheduler Raises: ValueError if passed an unknown storage class. """ - if cls == 'remote': + if cls == "remote": from .api.client import RemoteScheduler as SchedulerBackend - elif cls == 'local': + elif cls == "local": from .backend import SchedulerBackend else: - raise ValueError('Unknown swh.scheduler class `%s`' % cls) + raise ValueError("Unknown swh.scheduler class `%s`" % cls) return SchedulerBackend(**args) diff --git a/swh/scheduler/api/client.py b/swh/scheduler/api/client.py index 8ae47fc..4cfe0df 100644 --- a/swh/scheduler/api/client.py +++ b/swh/scheduler/api/client.py @@ -1,109 +1,142 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.core.api import RPCClient class RemoteScheduler(RPCClient): """Proxy to a remote scheduler API """ + def close_connection(self): - return self.post('close_connection', {}) + return self.post("close_connection", {}) - def set_status_tasks(self, task_ids, status='disabled', next_run=None): - return self.post('set_status_tasks', dict( - task_ids=task_ids, status=status, next_run=next_run)) + def set_status_tasks(self, task_ids, status="disabled", next_run=None): + return self.post( + "set_status_tasks", + dict(task_ids=task_ids, status=status, next_run=next_run), + ) def create_task_type(self, task_type): - return self.post('create_task_type', {'task_type': task_type}) + return self.post("create_task_type", {"task_type": task_type}) def get_task_type(self, task_type_name): - return self.post('get_task_type', {'task_type_name': task_type_name}) + return self.post("get_task_type", {"task_type_name": task_type_name}) def get_task_types(self): - return self.post('get_task_types', {}) + return self.post("get_task_types", {}) def create_tasks(self, tasks): - return self.post('create_tasks', {'tasks': tasks}) + return self.post("create_tasks", {"tasks": tasks}) def disable_tasks(self, task_ids): - return self.post('disable_tasks', {'task_ids': task_ids}) + return self.post("disable_tasks", {"task_ids": task_ids}) def get_tasks(self, task_ids): - return self.post('get_tasks', {'task_ids': task_ids}) + return self.post("get_tasks", {"task_ids": task_ids}) def get_task_runs(self, task_ids, limit=None): + return self.post("get_task_runs", {"task_ids": task_ids, "limit": limit}) + + def search_tasks( + self, + task_id=None, + task_type=None, + status=None, + priority=None, + policy=None, + before=None, + after=None, + limit=None, + ): + return self.post( + "search_tasks", + dict( + task_id=task_id, + task_type=task_type, + status=status, + priority=priority, + policy=policy, + before=before, + after=after, + limit=limit, + ), + ) + + def peek_ready_tasks( + self, task_type, timestamp=None, num_tasks=None, num_tasks_priority=None + ): + return self.post( + "peek_ready_tasks", + { + "task_type": task_type, + "timestamp": timestamp, + "num_tasks": num_tasks, + "num_tasks_priority": num_tasks_priority, + }, + ) + + def grab_ready_tasks( + self, task_type, timestamp=None, num_tasks=None, num_tasks_priority=None + ): return self.post( - 'get_task_runs', {'task_ids': task_ids, 'limit': limit}) - - def search_tasks(self, task_id=None, task_type=None, status=None, - priority=None, policy=None, before=None, after=None, - limit=None): - return self.post('search_tasks', dict( - task_id=task_id, task_type=task_type, status=status, - priority=priority, policy=policy, before=before, after=after, - limit=limit)) - - def peek_ready_tasks(self, task_type, timestamp=None, num_tasks=None, - num_tasks_priority=None): - return self.post('peek_ready_tasks', { - 'task_type': task_type, - 'timestamp': timestamp, - 'num_tasks': num_tasks, - 'num_tasks_priority': num_tasks_priority, - }) - - def grab_ready_tasks(self, task_type, timestamp=None, num_tasks=None, - num_tasks_priority=None): - return self.post('grab_ready_tasks', { - 'task_type': task_type, - 'timestamp': timestamp, - 'num_tasks': num_tasks, - 'num_tasks_priority': num_tasks_priority, - }) - - def schedule_task_run(self, task_id, backend_id, metadata=None, - timestamp=None): - return self.post('schedule_task_run', { - 'task_id': task_id, - 'backend_id': backend_id, - 'metadata': metadata, - 'timestamp': timestamp, - }) + "grab_ready_tasks", + { + "task_type": task_type, + "timestamp": timestamp, + "num_tasks": num_tasks, + "num_tasks_priority": num_tasks_priority, + }, + ) + + def schedule_task_run(self, task_id, backend_id, metadata=None, timestamp=None): + return self.post( + "schedule_task_run", + { + "task_id": task_id, + "backend_id": backend_id, + "metadata": metadata, + "timestamp": timestamp, + }, + ) def mass_schedule_task_runs(self, task_runs): - return self.post('mass_schedule_task_runs', {'task_runs': task_runs}) + return self.post("mass_schedule_task_runs", {"task_runs": task_runs}) def start_task_run(self, backend_id, metadata=None, timestamp=None): - return self.post('start_task_run', { - 'backend_id': backend_id, - 'metadata': metadata, - 'timestamp': timestamp, - }) + return self.post( + "start_task_run", + {"backend_id": backend_id, "metadata": metadata, "timestamp": timestamp,}, + ) def end_task_run(self, backend_id, status, metadata=None, timestamp=None): - return self.post('end_task_run', { - 'backend_id': backend_id, - 'status': status, - 'metadata': metadata, - 'timestamp': timestamp, - }) - - def filter_task_to_archive(self, after_ts, before_ts, - limit=10, page_token=None): - return self.post('filter_task_to_archive', { - 'after_ts': after_ts, - 'before_ts': before_ts, - 'limit': limit, - 'page_token': page_token, - }) + return self.post( + "end_task_run", + { + "backend_id": backend_id, + "status": status, + "metadata": metadata, + "timestamp": timestamp, + }, + ) + + def filter_task_to_archive(self, after_ts, before_ts, limit=10, page_token=None): + return self.post( + "filter_task_to_archive", + { + "after_ts": after_ts, + "before_ts": before_ts, + "limit": limit, + "page_token": page_token, + }, + ) def delete_archived_tasks(self, task_ids): - return self.post('delete_archived_tasks', {'task_ids': task_ids}) + return self.post("delete_archived_tasks", {"task_ids": task_ids}) def get_priority_ratios(self): - return self.get('get_priority_ratios') + return self.get("get_priority_ratios") diff --git a/swh/scheduler/api/server.py b/swh/scheduler/api/server.py index 8f243be..02ad19f 100644 --- a/swh/scheduler/api/server.py +++ b/swh/scheduler/api/server.py @@ -1,266 +1,266 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import logging from flask import request, Flask from swh.core import config -from swh.core.api import (decode_request, - error_handler, - encode_data_server as encode_data) +from swh.core.api import ( + decode_request, + error_handler, + encode_data_server as encode_data, +) from swh.core.api import negotiate, JSONFormatter, MsgpackFormatter from swh.scheduler import get_scheduler as get_scheduler_from app = Flask(__name__) scheduler = None @app.errorhandler(Exception) def my_error_handler(exception): return error_handler(exception, encode_data) def get_sched(): global scheduler if not scheduler: - scheduler = get_scheduler_from(**app.config['scheduler']) + scheduler = get_scheduler_from(**app.config["scheduler"]) return scheduler def has_no_empty_params(rule): return len(rule.defaults or ()) >= len(rule.arguments or ()) -@app.route('/') +@app.route("/") @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def index(): - return 'SWH Scheduler API server' + return "SWH Scheduler API server" -@app.route('/close_connection', methods=['GET', 'POST']) +@app.route("/close_connection", methods=["GET", "POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def close_connection(): return get_sched().close_connection() -@app.route('/set_status_tasks', methods=['POST']) +@app.route("/set_status_tasks", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def set_status_tasks(): return get_sched().set_status_tasks(**decode_request(request)) -@app.route('/create_task_type', methods=['POST']) +@app.route("/create_task_type", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def create_task_type(): return get_sched().create_task_type(**decode_request(request)) -@app.route('/get_task_type', methods=['POST']) +@app.route("/get_task_type", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def get_task_type(): return get_sched().get_task_type(**decode_request(request)) -@app.route('/get_task_types', methods=['GET', 'POST']) +@app.route("/get_task_types", methods=["GET", "POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def get_task_types(): return get_sched().get_task_types(**decode_request(request)) -@app.route('/create_tasks', methods=['POST']) +@app.route("/create_tasks", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def create_tasks(): return get_sched().create_tasks(**decode_request(request)) -@app.route('/disable_tasks', methods=['POST']) +@app.route("/disable_tasks", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def disable_tasks(): return get_sched().disable_tasks(**decode_request(request)) -@app.route('/get_tasks', methods=['POST']) +@app.route("/get_tasks", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def get_tasks(): return get_sched().get_tasks(**decode_request(request)) -@app.route('/get_task_runs', methods=['POST']) +@app.route("/get_task_runs", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def get_task_runs(): return get_sched().get_task_runs(**decode_request(request)) -@app.route('/search_tasks', methods=['POST']) +@app.route("/search_tasks", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def search_tasks(): return get_sched().search_tasks(**decode_request(request)) -@app.route('/peek_ready_tasks', methods=['POST']) +@app.route("/peek_ready_tasks", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def peek_ready_tasks(): return get_sched().peek_ready_tasks(**decode_request(request)) -@app.route('/grab_ready_tasks', methods=['POST']) +@app.route("/grab_ready_tasks", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def grab_ready_tasks(): return get_sched().grab_ready_tasks(**decode_request(request)) -@app.route('/schedule_task_run', methods=['POST']) +@app.route("/schedule_task_run", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def schedule_task_run(): return get_sched().schedule_task_run(**decode_request(request)) -@app.route('/mass_schedule_task_runs', methods=['POST']) +@app.route("/mass_schedule_task_runs", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def mass_schedule_task_runs(): return get_sched().mass_schedule_task_runs(**decode_request(request)) -@app.route('/start_task_run', methods=['POST']) +@app.route("/start_task_run", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def start_task_run(): return get_sched().start_task_run(**decode_request(request)) -@app.route('/end_task_run', methods=['POST']) +@app.route("/end_task_run", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def end_task_run(): return get_sched().end_task_run(**decode_request(request)) -@app.route('/filter_task_to_archive', methods=['POST']) +@app.route("/filter_task_to_archive", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def filter_task_to_archive(): return get_sched().filter_task_to_archive(**decode_request(request)) -@app.route('/delete_archived_tasks', methods=['POST']) +@app.route("/delete_archived_tasks", methods=["POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def delete_archived_tasks(): return get_sched().delete_archived_tasks(**decode_request(request)) -@app.route('/get_priority_ratios', methods=['GET', 'POST']) +@app.route("/get_priority_ratios", methods=["GET", "POST"]) @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def get_priority_ratios(): return get_sched().get_priority_ratios(**decode_request(request)) @app.route("/site-map") @negotiate(MsgpackFormatter) @negotiate(JSONFormatter) def site_map(): links = [] sched = get_sched() for rule in app.url_map.iter_rules(): if has_no_empty_params(rule) and hasattr(sched, rule.endpoint): - links.append(dict( - rule=rule.rule, - description=getattr(sched, rule.endpoint).__doc__)) + links.append( + dict(rule=rule.rule, description=getattr(sched, rule.endpoint).__doc__) + ) # links is now a list of url, endpoint tuples return links -def load_and_check_config(config_file, type='local'): +def load_and_check_config(config_file, type="local"): """Check the minimal configuration is set to run the api or raise an error explanation. Args: config_file (str): Path to the configuration file to load type (str): configuration type. For 'local' type, more checks are done. Raises: Error if the setup is not as expected Returns: configuration as a dict """ if not config_file: - raise EnvironmentError('Configuration file must be defined') + raise EnvironmentError("Configuration file must be defined") if not os.path.exists(config_file): - raise FileNotFoundError('Configuration file %s does not exist' % ( - config_file, )) + raise FileNotFoundError("Configuration file %s does not exist" % (config_file,)) cfg = config.read(config_file) - vcfg = cfg.get('scheduler') + vcfg = cfg.get("scheduler") if not vcfg: raise KeyError("Missing '%scheduler' configuration") - if type == 'local': - cls = vcfg.get('cls') - if cls != 'local': + if type == "local": + cls = vcfg.get("cls") + if cls != "local": raise ValueError( "The scheduler backend can only be started with a 'local' " - "configuration") + "configuration" + ) - args = vcfg.get('args') + args = vcfg.get("args") if not args: - raise KeyError( - "Invalid configuration; missing 'args' config entry") + raise KeyError("Invalid configuration; missing 'args' config entry") - db = args.get('db') + db = args.get("db") if not db: - raise KeyError( - "Invalid configuration; missing 'db' config entry") + raise KeyError("Invalid configuration; missing 'db' config entry") return cfg api_cfg = None def make_app_from_configfile(): """Run the WSGI app from the webserver, loading the configuration from a configuration file. SWH_CONFIG_FILENAME environment variable defines the configuration path to load. """ global api_cfg if not api_cfg: - config_file = os.environ.get('SWH_CONFIG_FILENAME') + config_file = os.environ.get("SWH_CONFIG_FILENAME") api_cfg = load_and_check_config(config_file) app.config.update(api_cfg) handler = logging.StreamHandler() app.logger.addHandler(handler) return app -if __name__ == '__main__': +if __name__ == "__main__": print('Please use the "swh-scheduler api-server" command') diff --git a/swh/scheduler/backend.py b/swh/scheduler/backend.py index ffeaca7..5ae47dc 100644 --- a/swh/scheduler/backend.py +++ b/swh/scheduler/backend.py @@ -1,516 +1,571 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json import logging from arrow import Arrow, utcnow import psycopg2.pool import psycopg2.extras from typing import Any, Dict, Optional from psycopg2.extensions import AsIs from swh.core.db import BaseDb from swh.core.db.common import db_transaction logger = logging.getLogger(__name__) def adapt_arrow(arrow): return AsIs("'%s'::timestamptz" % arrow.isoformat()) psycopg2.extensions.register_adapter(dict, psycopg2.extras.Json) psycopg2.extensions.register_adapter(Arrow, adapt_arrow) def format_query(query, keys): """Format a query with the given keys""" - query_keys = ', '.join(keys) - placeholders = ', '.join(['%s'] * len(keys)) + query_keys = ", ".join(keys) + placeholders = ", ".join(["%s"] * len(keys)) return query.format(keys=query_keys, placeholders=placeholders) class SchedulerBackend: """Backend for the Software Heritage scheduling database. """ def __init__(self, db, min_pool_conns=1, max_pool_conns=10): """ Args: db_conn: either a libpq connection string, or a psycopg2 connection """ if isinstance(db, psycopg2.extensions.connection): self._pool = None self._db = BaseDb(db) else: self._pool = psycopg2.pool.ThreadedConnectionPool( - min_pool_conns, max_pool_conns, db, + min_pool_conns, + max_pool_conns, + db, cursor_factory=psycopg2.extras.RealDictCursor, ) self._db = None def get_db(self): if self._db: return self._db return BaseDb.from_pool(self._pool) def put_db(self, db): if db is not self._db: db.put_conn() task_type_keys = [ - 'type', 'description', 'backend_name', 'default_interval', - 'min_interval', 'max_interval', 'backoff_factor', 'max_queue_length', - 'num_retries', 'retry_delay', + "type", + "description", + "backend_name", + "default_interval", + "min_interval", + "max_interval", + "backoff_factor", + "max_queue_length", + "num_retries", + "retry_delay", ] @db_transaction() def create_task_type(self, task_type, db=None, cur=None): """Create a new task type ready for scheduling. Args: task_type (dict): a dictionary with the following keys: - type (str): an identifier for the task type - description (str): a human-readable description of what the task does - backend_name (str): the name of the task in the job-scheduling backend - default_interval (datetime.timedelta): the default interval between two task runs - min_interval (datetime.timedelta): the minimum interval between two task runs - max_interval (datetime.timedelta): the maximum interval between two task runs - backoff_factor (float): the factor by which the interval changes at each run - max_queue_length (int): the maximum length of the task queue for this task type """ keys = [key for key in self.task_type_keys if key in task_type] query = format_query( """insert into task_type ({keys}) values ({placeholders}) on conflict do nothing""", - keys) + keys, + ) cur.execute(query, [task_type[key] for key in keys]) @db_transaction() def get_task_type(self, task_type_name, db=None, cur=None): """Retrieve the task type with id task_type_name""" query = format_query( - "select {keys} from task_type where type=%s", - self.task_type_keys, + "select {keys} from task_type where type=%s", self.task_type_keys, ) cur.execute(query, (task_type_name,)) return cur.fetchone() @db_transaction() def get_task_types(self, db=None, cur=None): """Retrieve all registered task types""" - query = format_query( - "select {keys} from task_type", - self.task_type_keys, - ) + query = format_query("select {keys} from task_type", self.task_type_keys,) cur.execute(query) return cur.fetchall() task_create_keys = [ - 'type', 'arguments', 'next_run', 'policy', 'status', 'retries_left', - 'priority' + "type", + "arguments", + "next_run", + "policy", + "status", + "retries_left", + "priority", ] - task_keys = task_create_keys + ['id', 'current_interval'] + task_keys = task_create_keys + ["id", "current_interval"] @db_transaction() - def create_tasks(self, tasks, policy='recurring', db=None, cur=None): + def create_tasks(self, tasks, policy="recurring", db=None, cur=None): """Create new tasks. Args: tasks (list): each task is a dictionary with the following keys: - type (str): the task type - arguments (dict): the arguments for the task runner, keys: - args (list of str): arguments - kwargs (dict str -> str): keyword arguments - next_run (datetime.datetime): the next scheduled run for the task Returns: a list of created tasks. """ - cur.execute('select swh_scheduler_mktemp_task()') - db.copy_to(tasks, 'tmp_task', self.task_create_keys, - default_values={ - 'policy': policy, - 'status': 'next_run_not_scheduled' - }, - cur=cur) + cur.execute("select swh_scheduler_mktemp_task()") + db.copy_to( + tasks, + "tmp_task", + self.task_create_keys, + default_values={"policy": policy, "status": "next_run_not_scheduled"}, + cur=cur, + ) query = format_query( - 'select {keys} from swh_scheduler_create_tasks_from_temp()', - self.task_keys, + "select {keys} from swh_scheduler_create_tasks_from_temp()", self.task_keys, ) cur.execute(query) return cur.fetchall() @db_transaction() - def set_status_tasks(self, task_ids, status='disabled', next_run=None, - db=None, cur=None): + def set_status_tasks( + self, task_ids, status="disabled", next_run=None, db=None, cur=None + ): """Set the tasks' status whose ids are listed. If given, also set the next_run date. """ if not task_ids: return query = ["UPDATE task SET status = %s"] args = [status] if next_run: - query.append(', next_run = %s') + query.append(", next_run = %s") args.append(next_run) query.append(" WHERE id IN %s") args.append(tuple(task_ids)) - cur.execute(''.join(query), args) + cur.execute("".join(query), args) @db_transaction() def disable_tasks(self, task_ids, db=None, cur=None): """Disable the tasks whose ids are listed.""" return self.set_status_tasks(task_ids, db=db, cur=cur) @db_transaction() - def search_tasks(self, task_id=None, task_type=None, status=None, - priority=None, policy=None, before=None, after=None, - limit=None, db=None, cur=None): + def search_tasks( + self, + task_id=None, + task_type=None, + status=None, + priority=None, + policy=None, + before=None, + after=None, + limit=None, + db=None, + cur=None, + ): """Search tasks from selected criterions""" where = [] args = [] if task_id: if isinstance(task_id, (str, int)): - where.append('id = %s') + where.append("id = %s") else: - where.append('id in %s') + where.append("id in %s") task_id = tuple(task_id) args.append(task_id) if task_type: if isinstance(task_type, str): - where.append('type = %s') + where.append("type = %s") else: - where.append('type in %s') + where.append("type in %s") task_type = tuple(task_type) args.append(task_type) if status: if isinstance(status, str): - where.append('status = %s') + where.append("status = %s") else: - where.append('status in %s') + where.append("status in %s") status = tuple(status) args.append(status) if priority: if isinstance(priority, str): - where.append('priority = %s') + where.append("priority = %s") else: priority = tuple(priority) - where.append('priority in %s') + where.append("priority in %s") args.append(priority) if policy: - where.append('policy = %s') + where.append("policy = %s") args.append(policy) if before: - where.append('next_run <= %s') + where.append("next_run <= %s") args.append(before) if after: - where.append('next_run >= %s') + where.append("next_run >= %s") args.append(after) - query = 'select * from task' + query = "select * from task" if where: - query += ' where ' + ' and '.join(where) + query += " where " + " and ".join(where) if limit: - query += ' limit %s :: bigint' + query += " limit %s :: bigint" args.append(limit) cur.execute(query, args) return cur.fetchall() @db_transaction() def get_tasks(self, task_ids, db=None, cur=None): """Retrieve the info of tasks whose ids are listed.""" - query = format_query('select {keys} from task where id in %s', - self.task_keys) + query = format_query("select {keys} from task where id in %s", self.task_keys) cur.execute(query, (tuple(task_ids),)) return cur.fetchall() @db_transaction() - def peek_ready_tasks(self, task_type, timestamp=None, num_tasks=None, - num_tasks_priority=None, - db=None, cur=None): + def peek_ready_tasks( + self, + task_type, + timestamp=None, + num_tasks=None, + num_tasks_priority=None, + db=None, + cur=None, + ): """Fetch the list of ready tasks Args: task_type (str): filtering task per their type timestamp (datetime.datetime): peek tasks that need to be executed before that timestamp num_tasks (int): only peek at num_tasks tasks (with no priority) num_tasks_priority (int): only peek at num_tasks_priority tasks (with priority) Returns: a list of tasks """ if timestamp is None: timestamp = utcnow() cur.execute( - '''select * from swh_scheduler_peek_ready_tasks( - %s, %s, %s :: bigint, %s :: bigint)''', - (task_type, timestamp, num_tasks, num_tasks_priority) + """select * from swh_scheduler_peek_ready_tasks( + %s, %s, %s :: bigint, %s :: bigint)""", + (task_type, timestamp, num_tasks, num_tasks_priority), ) - logger.debug('PEEK %s => %s' % (task_type, cur.rowcount)) + logger.debug("PEEK %s => %s" % (task_type, cur.rowcount)) return cur.fetchall() @db_transaction() - def grab_ready_tasks(self, task_type, timestamp=None, num_tasks=None, - num_tasks_priority=None, db=None, cur=None): + def grab_ready_tasks( + self, + task_type, + timestamp=None, + num_tasks=None, + num_tasks_priority=None, + db=None, + cur=None, + ): """Fetch the list of ready tasks, and mark them as scheduled Args: task_type (str): filtering task per their type timestamp (datetime.datetime): grab tasks that need to be executed before that timestamp num_tasks (int): only grab num_tasks tasks (with no priority) num_tasks_priority (int): only grab oneshot num_tasks tasks (with priorities) Returns: a list of tasks """ if timestamp is None: timestamp = utcnow() cur.execute( - '''select * from swh_scheduler_grab_ready_tasks( - %s, %s, %s :: bigint, %s :: bigint)''', - (task_type, timestamp, num_tasks, num_tasks_priority) + """select * from swh_scheduler_grab_ready_tasks( + %s, %s, %s :: bigint, %s :: bigint)""", + (task_type, timestamp, num_tasks, num_tasks_priority), ) - logger.debug('GRAB %s => %s' % (task_type, cur.rowcount)) + logger.debug("GRAB %s => %s" % (task_type, cur.rowcount)) return cur.fetchall() - task_run_create_keys = ['task', 'backend_id', 'scheduled', 'metadata'] + task_run_create_keys = ["task", "backend_id", "scheduled", "metadata"] @db_transaction() - def schedule_task_run(self, task_id, backend_id, metadata=None, - timestamp=None, db=None, cur=None): + def schedule_task_run( + self, task_id, backend_id, metadata=None, timestamp=None, db=None, cur=None + ): """Mark a given task as scheduled, adding a task_run entry in the database. Args: task_id (int): the identifier for the task being scheduled backend_id (str): the identifier of the job in the backend metadata (dict): metadata to add to the task_run entry timestamp (datetime.datetime): the instant the event occurred Returns: a fresh task_run entry """ if metadata is None: metadata = {} if timestamp is None: timestamp = utcnow() cur.execute( - 'select * from swh_scheduler_schedule_task_run(%s, %s, %s, %s)', - (task_id, backend_id, metadata, timestamp) + "select * from swh_scheduler_schedule_task_run(%s, %s, %s, %s)", + (task_id, backend_id, metadata, timestamp), ) return cur.fetchone() @db_transaction() def mass_schedule_task_runs(self, task_runs, db=None, cur=None): """Schedule a bunch of task runs. Args: task_runs (list): a list of dicts with keys: - task (int): the identifier for the task being scheduled - backend_id (str): the identifier of the job in the backend - metadata (dict): metadata to add to the task_run entry - scheduled (datetime.datetime): the instant the event occurred Returns: None """ - cur.execute('select swh_scheduler_mktemp_task_run()') - db.copy_to(task_runs, 'tmp_task_run', self.task_run_create_keys, - cur=cur) - cur.execute('select swh_scheduler_schedule_task_run_from_temp()') + cur.execute("select swh_scheduler_mktemp_task_run()") + db.copy_to(task_runs, "tmp_task_run", self.task_run_create_keys, cur=cur) + cur.execute("select swh_scheduler_schedule_task_run_from_temp()") @db_transaction() - def start_task_run(self, backend_id, metadata=None, timestamp=None, - db=None, cur=None): + def start_task_run( + self, backend_id, metadata=None, timestamp=None, db=None, cur=None + ): """Mark a given task as started, updating the corresponding task_run entry in the database. Args: backend_id (str): the identifier of the job in the backend metadata (dict): metadata to add to the task_run entry timestamp (datetime.datetime): the instant the event occurred Returns: the updated task_run entry """ if metadata is None: metadata = {} if timestamp is None: timestamp = utcnow() cur.execute( - 'select * from swh_scheduler_start_task_run(%s, %s, %s)', - (backend_id, metadata, timestamp) + "select * from swh_scheduler_start_task_run(%s, %s, %s)", + (backend_id, metadata, timestamp), ) return cur.fetchone() @db_transaction() - def end_task_run(self, backend_id, status, metadata=None, timestamp=None, - result=None, db=None, cur=None): + def end_task_run( + self, + backend_id, + status, + metadata=None, + timestamp=None, + result=None, + db=None, + cur=None, + ): """Mark a given task as ended, updating the corresponding task_run entry in the database. Args: backend_id (str): the identifier of the job in the backend status (str): how the task ended; one of: 'eventful', 'uneventful', 'failed' metadata (dict): metadata to add to the task_run entry timestamp (datetime.datetime): the instant the event occurred Returns: the updated task_run entry """ if metadata is None: metadata = {} if timestamp is None: timestamp = utcnow() cur.execute( - 'select * from swh_scheduler_end_task_run(%s, %s, %s, %s)', - (backend_id, status, metadata, timestamp) + "select * from swh_scheduler_end_task_run(%s, %s, %s, %s)", + (backend_id, status, metadata, timestamp), ) return cur.fetchone() @db_transaction() def filter_task_to_archive( - self, after_ts: str, before_ts: str, - limit: int = 10, page_token: Optional[str] = None, - db=None, cur=None) -> Dict[str, Any]: + self, + after_ts: str, + before_ts: str, + limit: int = 10, + page_token: Optional[str] = None, + db=None, + cur=None, + ) -> Dict[str, Any]: """Compute the tasks to archive within the datetime interval [after_ts, before_ts[. The method returns a paginated result. Returns: dict with the following keys: - **next_page_token**: opaque token to be used as `page_token` to retrieve the next page of result. If absent, there is no more pages to gather. - **tasks**: list of task dictionaries with the following keys: **id** (str): origin task id **started** (Optional[datetime]): started date **scheduled** (datetime): scheduled date **arguments** (json dict): task's arguments ... """ assert not page_token or isinstance(page_token, str) last_id = -1 if page_token is None else int(page_token) tasks = [] cur.execute( "select * from swh_scheduler_task_to_archive(%s, %s, %s, %s)", - (after_ts, before_ts, last_id, limit + 1) + (after_ts, before_ts, last_id, limit + 1), ) for row in cur: task = dict(row) # nested type index does not accept bare values # transform it as a dict to comply with this - task['arguments']['args'] = { - i: v for i, v in enumerate(task['arguments']['args']) + task["arguments"]["args"] = { + i: v for i, v in enumerate(task["arguments"]["args"]) } - kwargs = task['arguments']['kwargs'] - task['arguments']['kwargs'] = json.dumps(kwargs) + kwargs = task["arguments"]["kwargs"] + task["arguments"]["kwargs"] = json.dumps(kwargs) tasks.append(task) if len(tasks) >= limit + 1: # remains data, add pagination information result = { - 'tasks': tasks[:limit], - 'next_page_token': str(tasks[-1]['task_id']), + "tasks": tasks[:limit], + "next_page_token": str(tasks[-1]["task_id"]), } else: - result = { - 'tasks': tasks - } + result = {"tasks": tasks} return result @db_transaction() def delete_archived_tasks(self, task_ids, db=None, cur=None): """Delete archived tasks as much as possible. Only the task_ids whose complete associated task_run have been cleaned up will be. """ _task_ids = _task_run_ids = [] for task_id in task_ids: - _task_ids.append(task_id['task_id']) - _task_run_ids.append(task_id['task_run_id']) + _task_ids.append(task_id["task_id"]) + _task_run_ids.append(task_id["task_run_id"]) cur.execute( "select * from swh_scheduler_delete_archived_tasks(%s, %s)", - (_task_ids, _task_run_ids)) + (_task_ids, _task_run_ids), + ) - task_run_keys = ['id', 'task', 'backend_id', 'scheduled', - 'started', 'ended', 'metadata', 'status', ] + task_run_keys = [ + "id", + "task", + "backend_id", + "scheduled", + "started", + "ended", + "metadata", + "status", + ] @db_transaction() def get_task_runs(self, task_ids, limit=None, db=None, cur=None): """Search task run for a task id""" where = [] args = [] if task_ids: if isinstance(task_ids, (str, int)): - where.append('task = %s') + where.append("task = %s") else: - where.append('task in %s') + where.append("task in %s") task_ids = tuple(task_ids) args.append(task_ids) else: return () - query = 'select * from task_run where ' + ' and '.join(where) + query = "select * from task_run where " + " and ".join(where) if limit: - query += ' limit %s :: bigint' + query += " limit %s :: bigint" args.append(limit) cur.execute(query, args) return cur.fetchall() @db_transaction() def get_priority_ratios(self, db=None, cur=None): - cur.execute('select id, ratio from priority_ratio') - return {row['id']: row['ratio'] for row in cur.fetchall()} + cur.execute("select id, ratio from priority_ratio") + return {row["id"]: row["ratio"] for row in cur.fetchall()} diff --git a/swh/scheduler/backend_es.py b/swh/scheduler/backend_es.py index 30bbbbb..ad7b6a3 100644 --- a/swh/scheduler/backend_es.py +++ b/swh/scheduler/backend_es.py @@ -1,265 +1,268 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Elastic Search backend """ import datetime # noqa import logging from copy import deepcopy from typing import Any, Dict from elasticsearch import helpers from swh.core import utils logger = logging.getLogger(__name__) DEFAULT_CONFIG = { - 'elasticsearch': { - 'cls': 'local', - 'args': { - 'index_name_prefix': 'swh-tasks', - 'storage_nodes': ['localhost:9200'], - 'client_options': { - 'sniff_on_start': False, - 'sniff_on_connection_fail': True, - 'http_compress': False, - 'sniffer_timeout': 60 + "elasticsearch": { + "cls": "local", + "args": { + "index_name_prefix": "swh-tasks", + "storage_nodes": ["localhost:9200"], + "client_options": { + "sniff_on_start": False, + "sniff_on_connection_fail": True, + "http_compress": False, + "sniffer_timeout": 60, }, }, } } def get_elasticsearch(cls: str, args: Dict[str, Any] = {}): """Instantiate an elastic search instance """ - if cls == 'local': + if cls == "local": from elasticsearch import Elasticsearch - elif cls == 'memory': + elif cls == "memory": from .elasticsearch_memory import MemoryElasticsearch as Elasticsearch # type: ignore # noqa else: - raise ValueError('Unknown elasticsearch class `%s`' % cls) + raise ValueError("Unknown elasticsearch class `%s`" % cls) return Elasticsearch(**args) class ElasticSearchBackend: """ElasticSearch backend to index tasks This uses an elasticsearch client to actually discuss with the elasticsearch instance. """ + def __init__(self, **config): self.config = deepcopy(DEFAULT_CONFIG) self.config.update(config) - es_conf = self.config['elasticsearch'] - args = deepcopy(es_conf['args']) - self.index_name_prefix = args.pop('index_name_prefix') + es_conf = self.config["elasticsearch"] + args = deepcopy(es_conf["args"]) + self.index_name_prefix = args.pop("index_name_prefix") self.storage = get_elasticsearch( - cls=es_conf['cls'], + cls=es_conf["cls"], args={ - 'hosts': args.get('storage_nodes', []), - **args.get('client_options', {}), - } + "hosts": args.get("storage_nodes", []), + **args.get("client_options", {}), + }, ) # document's index type (cf. /data/elastic-template.json) - self.doc_type = 'task' + self.doc_type = "task" def initialize(self): self.storage.indices.put_mapping( index=f"{self.index_name_prefix}-*", doc_type=self.doc_type, # to allow type definition below include_type_name=True, # to allow install mapping even if no index yet allow_no_indices=True, body={ "properties": { "task_id": {"type": "double"}, "task_policy": {"type": "text"}, "task_status": {"type": "text"}, "task_run_id": {"type": "double"}, "arguments": { "type": "object", "properties": { - "args": { - "type": "nested", - "dynamic": False - }, - "kwargs": { - "type": "text" - } - } + "args": {"type": "nested", "dynamic": False}, + "kwargs": {"type": "text"}, + }, }, "type": {"type": "text"}, "backend_id": {"type": "text"}, - "metadata": { - "type": "object", - "enabled": False - }, - "scheduled": { + "metadata": {"type": "object", "enabled": False}, + "scheduled": { "type": "date", - "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis" # noqa + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis", # noqa }, - "started": { + "started": { "type": "date", - "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis" # noqa + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis", # noqa }, - "ended": { + "ended": { "type": "date", - "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis" # noqa - } + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis", # noqa + }, } - }) + }, + ) self.storage.indices.put_settings( index=f"{self.index_name_prefix}-*", allow_no_indices=True, body={ "index": { "codec": "best_compression", "refresh_interval": "1s", - "number_of_shards": 1 + "number_of_shards": 1, } - }) + }, + ) def create(self, index_name) -> None: """Create and initialize index_name with mapping for all indices matching `swh-tasks-` pattern """ assert index_name.startswith(self.index_name_prefix) self.storage.indices.create(index_name) def compute_index_name(self, year, month): """Given a year, month, compute the index's name. """ - return '%s-%s-%s' % ( - self.index_name_prefix, year, '%02d' % month) + return "%s-%s-%s" % (self.index_name_prefix, year, "%02d" % month) - def mget(self, index_name, doc_ids, chunk_size=500, - source=True): + def mget(self, index_name, doc_ids, chunk_size=500, source=True): """Retrieve document's full content according to their ids as per source's setup. The `source` allows to retrieve only what's interesting, e.g: - source=True ; gives back the original indexed data - source=False ; returns without the original _source field - source=['task_id'] ; returns only task_id in the _source field Args: index_name (str): Name of the concerned index. doc_ids (generator): Generator of ids to retrieve chunk_size (int): Number of documents chunk to send for retrieval source (bool/[str]): Source of information to return Yields: document indexed as per source's setup """ if isinstance(source, list): - source = {'_source': ','.join(source)} + source = {"_source": ",".join(source)} else: - source = {'_source': str(source).lower()} + source = {"_source": str(source).lower()} for ids in utils.grouper(doc_ids, n=1000): - res = self.storage.mget(body={'ids': list(ids)}, - index=index_name, - doc_type=self.doc_type, - params=source) + res = self.storage.mget( + body={"ids": list(ids)}, + index=index_name, + doc_type=self.doc_type, + params=source, + ) if not res: - logger.error('Error during retrieval of data, skipping!') + logger.error("Error during retrieval of data, skipping!") continue - for doc in res['docs']: - found = doc.get('found') + for doc in res["docs"]: + found = doc.get("found") if not found: - msg = 'Doc id %s not found, not indexed yet' % doc['_id'] + msg = "Doc id %s not found, not indexed yet" % doc["_id"] logger.warning(msg) continue - yield doc['_source'] + yield doc["_source"] def _streaming_bulk(self, index_name, doc_stream, chunk_size=500): """Bulk index data and returns the successful indexed data's identifier. Args: index_name (str): Name of the concerned index. doc_stream (generator): Generator of documents to index chunk_size (int): Number of documents chunk to send for indexation Yields: document id indexed """ - actions = ({'_index': index_name, - '_op_type': 'index', - '_type': self.doc_type, - '_source': data} for data in doc_stream) - for ok, result in helpers.streaming_bulk(client=self.storage, - actions=actions, - chunk_size=chunk_size, - raise_on_error=False, - raise_on_exception=False): + actions = ( + { + "_index": index_name, + "_op_type": "index", + "_type": self.doc_type, + "_source": data, + } + for data in doc_stream + ) + for ok, result in helpers.streaming_bulk( + client=self.storage, + actions=actions, + chunk_size=chunk_size, + raise_on_error=False, + raise_on_exception=False, + ): if not ok: - logger.error('Error during %s indexation. Skipping.', result) + logger.error("Error during %s indexation. Skipping.", result) continue - yield result['index']['_id'] + yield result["index"]["_id"] def is_index_opened(self, index_name: str) -> bool: """Determine if an index is opened or not """ try: self.storage.indices.stats(index_name) return True except Exception: # fails when indice is closed (no other api call found) return False - def streaming_bulk(self, index_name, doc_stream, chunk_size=500, - source=True): + def streaming_bulk(self, index_name, doc_stream, chunk_size=500, source=True): """Bulk index data and returns the successful indexed data as per source's setup. the `source` permits to retrieve only what's of interest to us, e.g: - source=True ; gives back the original indexed data - source=False ; returns without the original _source field - source=['task_id'] ; returns only task_id in the _source field Note that: - if the index is closed, it will be opened - if the index does not exist, it will be created and opened This keeps the index opened for performance reasons. Args: index_name (str): Name of the concerned index. doc_stream (generator): Document generator to index chunk_size (int): Number of documents chunk to send source (bool, [str]): the information to return """ # index must exist if not self.storage.indices.exists(index_name): self.create(index_name) # index must be opened if not self.is_index_opened(index_name): self.storage.indices.open(index_name) indexed_ids = self._streaming_bulk( - index_name, doc_stream, chunk_size=chunk_size) + index_name, doc_stream, chunk_size=chunk_size + ) yield from self.mget( - index_name, indexed_ids, chunk_size=chunk_size, source=source) + index_name, indexed_ids, chunk_size=chunk_size, source=source + ) diff --git a/swh/scheduler/celery_backend/config.py b/swh/scheduler/celery_backend/config.py index ab8b4d4..0dcbf26 100644 --- a/swh/scheduler/celery_backend/config.py +++ b/swh/scheduler/celery_backend/config.py @@ -1,329 +1,339 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import functools import logging import os import pkg_resources import traceback from typing import Any, Dict import urllib.parse from celery import Celery from celery.signals import setup_logging, celeryd_after_setup, worker_init from celery.utils.log import ColorFormatter from celery.worker.control import Panel from kombu import Exchange, Queue from kombu.five import monotonic as _monotonic import requests from swh.scheduler import CONFIG as SWH_CONFIG from swh.core.config import load_named_config, merge_configs from swh.core.sentry import init_sentry try: from swh.core.logger import JournalHandler except ImportError: JournalHandler = None # type: ignore -DEFAULT_CONFIG_NAME = 'worker' -CONFIG_NAME_ENVVAR = 'SWH_WORKER_INSTANCE' -CONFIG_NAME_TEMPLATE = 'worker/%s' +DEFAULT_CONFIG_NAME = "worker" +CONFIG_NAME_ENVVAR = "SWH_WORKER_INSTANCE" +CONFIG_NAME_TEMPLATE = "worker/%s" DEFAULT_CONFIG = { - 'task_broker': ('str', 'amqp://guest@localhost//'), - 'task_modules': ('list[str]', []), - 'task_queues': ('list[str]', []), - 'task_soft_time_limit': ('int', 0), + "task_broker": ("str", "amqp://guest@localhost//"), + "task_modules": ("list[str]", []), + "task_queues": ("list[str]", []), + "task_soft_time_limit": ("int", 0), } logger = logging.getLogger(__name__) # Celery eats tracebacks in signal callbacks, this decorator catches # and prints them. # Also tries to notify Sentry if possible. def _print_errors(f): @functools.wraps(f) def newf(*args, **kwargs): try: return f(*args, **kwargs) except Exception as exc: traceback.print_exc() try: import sentry_sdk + sentry_sdk.capture_exception(exc) except Exception: traceback.print_exc() return newf @setup_logging.connect @_print_errors -def setup_log_handler(loglevel=None, logfile=None, format=None, colorize=None, - log_console=None, log_journal=None, **kwargs): +def setup_log_handler( + loglevel=None, + logfile=None, + format=None, + colorize=None, + log_console=None, + log_journal=None, + **kwargs +): """Setup logging according to Software Heritage preferences. We use the command-line loglevel for tasks only, as we never really care about the debug messages from celery. """ if loglevel is None: loglevel = logging.DEBUG if isinstance(loglevel, str): loglevel = logging._nameToLevel[loglevel] formatter = logging.Formatter(format) - root_logger = logging.getLogger('') + root_logger = logging.getLogger("") root_logger.setLevel(logging.INFO) - log_target = os.environ.get('SWH_LOG_TARGET', 'console') - if log_target == 'console': + log_target = os.environ.get("SWH_LOG_TARGET", "console") + if log_target == "console": log_console = True - elif log_target == 'journal': + elif log_target == "journal": log_journal = True # this looks for log levels *higher* than DEBUG if loglevel <= logging.DEBUG and log_console is None: log_console = True if log_console: color_formatter = ColorFormatter(format) if colorize else formatter console = logging.StreamHandler() console.setLevel(logging.DEBUG) console.setFormatter(color_formatter) root_logger.addHandler(console) if log_journal: if not JournalHandler: - root_logger.warning('JournalHandler is not available, skipping. ' - 'Please install swh-core[logging].') + root_logger.warning( + "JournalHandler is not available, skipping. " + "Please install swh-core[logging]." + ) else: systemd_journal = JournalHandler() systemd_journal.setLevel(logging.DEBUG) systemd_journal.setFormatter(formatter) root_logger.addHandler(systemd_journal) - logging.getLogger('celery').setLevel(logging.INFO) + logging.getLogger("celery").setLevel(logging.INFO) # Silence amqp heartbeat_tick messages - logger = logging.getLogger('amqp') - logger.addFilter(lambda record: not record.msg.startswith( - 'heartbeat_tick')) + logger = logging.getLogger("amqp") + logger.addFilter(lambda record: not record.msg.startswith("heartbeat_tick")) logger.setLevel(loglevel) # Silence useless "Starting new HTTP connection" messages - logging.getLogger('urllib3').setLevel(logging.WARNING) + logging.getLogger("urllib3").setLevel(logging.WARNING) - logging.getLogger('swh').setLevel(loglevel) + logging.getLogger("swh").setLevel(loglevel) # get_task_logger makes the swh tasks loggers children of celery.task - logging.getLogger('celery.task').setLevel(loglevel) + logging.getLogger("celery.task").setLevel(loglevel) return loglevel @celeryd_after_setup.connect @_print_errors def setup_queues_and_tasks(sender, instance, **kwargs): """Signal called on worker start. This automatically registers swh.scheduler.task.Task subclasses as available celery tasks. This also subscribes the worker to the "implicit" per-task queues defined for these task classes. """ - logger.info('Setup Queues & Tasks for %s', sender) - instance.app.conf['worker_name'] = sender + logger.info("Setup Queues & Tasks for %s", sender) + instance.app.conf["worker_name"] = sender @worker_init.connect @_print_errors def on_worker_init(*args, **kwargs): try: from sentry_sdk.integrations.celery import CeleryIntegration except ImportError: integrations = [] else: integrations = [CeleryIntegration()] sentry_dsn = None # will be set in `init_sentry` function init_sentry(sentry_dsn, integrations=integrations) @Panel.register def monotonic(state): """Get the current value for the monotonic clock""" - return {'monotonic': _monotonic()} + return {"monotonic": _monotonic()} def route_for_task(name, args, kwargs, options, task=None, **kw): """Route tasks according to the task_queue attribute in the task class""" - if name is not None and name.startswith('swh.'): - return {'queue': name} + if name is not None and name.startswith("swh."): + return {"queue": name} def get_queue_stats(app, queue_name): """Get the statistics regarding a queue on the broker. Arguments: queue_name: name of the queue to check Returns a dictionary raw from the RabbitMQ management API; or `None` if the current configuration does not use RabbitMQ. Interesting keys: - Consumers (number of consumers for the queue) - messages (number of messages in queue) - messages_unacknowledged (number of messages currently being processed) Documentation: https://www.rabbitmq.com/management.html#http-api """ conn_info = app.connection().info() - if conn_info['transport'] == 'memory': + if conn_info["transport"] == "memory": # We're running in a test environment, without RabbitMQ. return None - url = 'http://{hostname}:{port}/api/queues/{vhost}/{queue}'.format( - hostname=conn_info['hostname'], - port=conn_info['port'] + 10000, - vhost=urllib.parse.quote(conn_info['virtual_host'], safe=''), - queue=urllib.parse.quote(queue_name, safe=''), + url = "http://{hostname}:{port}/api/queues/{vhost}/{queue}".format( + hostname=conn_info["hostname"], + port=conn_info["port"] + 10000, + vhost=urllib.parse.quote(conn_info["virtual_host"], safe=""), + queue=urllib.parse.quote(queue_name, safe=""), ) - credentials = (conn_info['userid'], conn_info['password']) + credentials = (conn_info["userid"], conn_info["password"]) r = requests.get(url, auth=credentials) if r.status_code == 404: return {} if r.status_code != 200: - raise ValueError('Got error %s when reading queue stats: %s' % ( - r.status_code, r.json())) + raise ValueError( + "Got error %s when reading queue stats: %s" % (r.status_code, r.json()) + ) return r.json() def get_queue_length(app, queue_name): """Shortcut to get a queue's length""" stats = get_queue_stats(app, queue_name) if stats: - return stats.get('messages') + return stats.get("messages") def register_task_class(app, name, cls): """Register a class-based task under the given name""" if name in app.tasks: return task_instance = cls() task_instance.name = name app.register_task(task_instance) INSTANCE_NAME = os.environ.get(CONFIG_NAME_ENVVAR) -CONFIG_NAME = os.environ.get('SWH_CONFIG_FILENAME') +CONFIG_NAME = os.environ.get("SWH_CONFIG_FILENAME") CONFIG = {} # type: Dict[str, Any] if CONFIG_NAME: # load the celery config from the main config file given as # SWH_CONFIG_FILENAME environment variable. # This is expected to have a [celery] section in which we have the # celery specific configuration. SWH_CONFIG.clear() SWH_CONFIG.update(load_named_config(CONFIG_NAME)) - CONFIG = SWH_CONFIG.get('celery', {}) + CONFIG = SWH_CONFIG.get("celery", {}) if not CONFIG: # otherwise, back to compat config loading mechanism if INSTANCE_NAME: CONFIG_NAME = CONFIG_NAME_TEMPLATE % INSTANCE_NAME else: CONFIG_NAME = DEFAULT_CONFIG_NAME # Load the Celery config CONFIG = load_named_config(CONFIG_NAME, DEFAULT_CONFIG) -CONFIG.setdefault('task_modules', []) +CONFIG.setdefault("task_modules", []) # load tasks modules declared as plugin entry points -for entrypoint in pkg_resources.iter_entry_points('swh.workers'): +for entrypoint in pkg_resources.iter_entry_points("swh.workers"): worker_registrer_fn = entrypoint.load() # The registry function is expected to return a dict which the 'tasks' key # is a string (or a list of strings) with the name of the python module in # which celery tasks are defined. - task_modules = worker_registrer_fn().get('task_modules', []) - CONFIG['task_modules'].extend(task_modules) + task_modules = worker_registrer_fn().get("task_modules", []) + CONFIG["task_modules"].extend(task_modules) # Celery Queues -CELERY_QUEUES = [Queue('celery', Exchange('celery'), routing_key='celery')] +CELERY_QUEUES = [Queue("celery", Exchange("celery"), routing_key="celery")] CELERY_DEFAULT_CONFIG = dict( # Timezone configuration: all in UTC enable_utc=True, - timezone='UTC', + timezone="UTC", # Imported modules - imports=CONFIG.get('task_modules', []), + imports=CONFIG.get("task_modules", []), # Time (in seconds, or a timedelta object) for when after stored task # tombstones will be deleted. None means to never expire results. result_expires=None, # A string identifying the default serialization method to use. Can # be json (default), pickle, yaml, msgpack, or any custom # serialization methods that have been registered with - task_serializer='msgpack', + task_serializer="msgpack", # Result serialization format - result_serializer='msgpack', + result_serializer="msgpack", # Late ack means the task messages will be acknowledged after the task has # been executed, not just before, which is the default behavior. task_acks_late=True, # A string identifying the default serialization method to use. # Can be pickle (default), json, yaml, msgpack or any custom serialization # methods that have been registered with kombu.serialization.registry - accept_content=['msgpack', 'json'], + accept_content=["msgpack", "json"], # If True the task will report its status as “started” # when the task is executed by a worker. task_track_started=True, # Default compression used for task messages. Can be gzip, bzip2 # (if available), or any custom compression schemes registered # in the Kombu compression registry. # result_compression='bzip2', # task_compression='bzip2', # Disable all rate limits, even if tasks has explicit rate limits set. # (Disabling rate limits altogether is recommended if you don’t have any # tasks using them.) worker_disable_rate_limits=True, # Task routing task_routes=route_for_task, # Allow pool restarts from remote worker_pool_restarts=True, # Do not prefetch tasks worker_prefetch_multiplier=1, # Send events worker_send_task_events=True, # Do not send useless task_sent events task_send_sent_event=False, - ) +) def build_app(config=None): config = merge_configs( - {k: v for (k, (_, v)) in DEFAULT_CONFIG.items()}, - config or {}) + {k: v for (k, (_, v)) in DEFAULT_CONFIG.items()}, config or {} + ) - config['task_queues'] = CELERY_QUEUES + [ + config["task_queues"] = CELERY_QUEUES + [ Queue(queue, Exchange(queue), routing_key=queue) - for queue in config.get('task_queues', ())] - logger.debug('Creating a Celery app with %s', config) + for queue in config.get("task_queues", ()) + ] + logger.debug("Creating a Celery app with %s", config) # Instantiate the Celery app - app = Celery(broker=config['task_broker'], - task_cls='swh.scheduler.task:SWHTask') + app = Celery(broker=config["task_broker"], task_cls="swh.scheduler.task:SWHTask") app.add_defaults(CELERY_DEFAULT_CONFIG) app.add_defaults(config) return app app = build_app(CONFIG) # XXX for BW compat Celery.get_queue_length = get_queue_length diff --git a/swh/scheduler/celery_backend/listener.py b/swh/scheduler/celery_backend/listener.py index cc419d8..a9e3fc7 100644 --- a/swh/scheduler/celery_backend/listener.py +++ b/swh/scheduler/celery_backend/listener.py @@ -1,210 +1,222 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import logging import time import sys import click from arrow import utcnow from kombu import Queue import celery from celery.events import EventReceiver from swh.core.statsd import statsd class ReliableEventReceiver(EventReceiver): - def __init__(self, channel, handlers=None, routing_key='#', - node_id=None, app=None, queue_prefix='celeryev', - accept=None): + def __init__( + self, + channel, + handlers=None, + routing_key="#", + node_id=None, + app=None, + queue_prefix="celeryev", + accept=None, + ): super(ReliableEventReceiver, self).__init__( - channel, handlers, routing_key, node_id, app, queue_prefix, accept) + channel, handlers, routing_key, node_id, app, queue_prefix, accept + ) - self.queue = Queue('.'.join([self.queue_prefix, self.node_id]), - exchange=self.exchange, - routing_key=self.routing_key, - auto_delete=False, - durable=True) + self.queue = Queue( + ".".join([self.queue_prefix, self.node_id]), + exchange=self.exchange, + routing_key=self.routing_key, + auto_delete=False, + durable=True, + ) def get_consumers(self, consumer, channel): - return [consumer(queues=[self.queue], - callbacks=[self._receive], no_ack=False, - accept=self.accept)] + return [ + consumer( + queues=[self.queue], + callbacks=[self._receive], + no_ack=False, + accept=self.accept, + ) + ] def _receive(self, bodies, message): if not isinstance(bodies, list): # celery<4 returned body as element bodies = [bodies] for body in bodies: type, body = self.event_from_message(body) self.process(type, body, message) def process(self, type, event, message): """Process the received event by dispatching it to the appropriate handler.""" - handler = self.handlers.get(type) or self.handlers.get('*') + handler = self.handlers.get(type) or self.handlers.get("*") if handler: handler(event, message) - statsd.increment('swh_scheduler_listener_handled_event_total', - tags={'event_type': type}) + statsd.increment( + "swh_scheduler_listener_handled_event_total", tags={"event_type": type} + ) ACTION_SEND_DELAY = datetime.timedelta(seconds=1.0) ACTION_QUEUE_MAX_LENGTH = 1000 def event_monitor(app, backend): - logger = logging.getLogger('swh.scheduler.listener') + logger = logging.getLogger("swh.scheduler.listener") actions = { - 'last_send': utcnow() - 2*ACTION_SEND_DELAY, - 'queue': [], + "last_send": utcnow() - 2 * ACTION_SEND_DELAY, + "queue": [], } def try_perform_actions(actions=actions): - logger.debug('Try perform pending actions') - if actions['queue'] and ( - len(actions['queue']) > ACTION_QUEUE_MAX_LENGTH or - utcnow() - actions['last_send'] > ACTION_SEND_DELAY): + logger.debug("Try perform pending actions") + if actions["queue"] and ( + len(actions["queue"]) > ACTION_QUEUE_MAX_LENGTH + or utcnow() - actions["last_send"] > ACTION_SEND_DELAY + ): perform_actions(actions) def perform_actions(actions, backend=backend): - logger.info('Perform %s pending actions' % len(actions['queue'])) + logger.info("Perform %s pending actions" % len(actions["queue"])) action_map = { - 'start_task_run': backend.start_task_run, - 'end_task_run': backend.end_task_run, + "start_task_run": backend.start_task_run, + "end_task_run": backend.end_task_run, } messages = [] db = backend.get_db() try: cursor = db.cursor(None) - for action in actions['queue']: - messages.append(action['message']) - function = action_map[action['action']] - args = action.get('args', ()) - kwargs = action.get('kwargs', {}) - kwargs['cur'] = cursor + for action in actions["queue"]: + messages.append(action["message"]) + function = action_map[action["action"]] + args = action.get("args", ()) + kwargs = action.get("kwargs", {}) + kwargs["cur"] = cursor function(*args, **kwargs) except Exception: db.conn.rollback() else: db.conn.commit() finally: backend.put_db(db) for message in messages: if not message.acknowledged: message.ack() - actions['queue'] = [] - actions['last_send'] = utcnow() + actions["queue"] = [] + actions["last_send"] = utcnow() def queue_action(action, actions=actions): - actions['queue'].append(action) + actions["queue"].append(action) try_perform_actions() def catchall_event(event, message): - logger.debug('event: %s %s', event['type'], event.get('name', 'N/A')) + logger.debug("event: %s %s", event["type"], event.get("name", "N/A")) if not message.acknowledged: message.ack() try_perform_actions() def task_started(event, message): - logger.debug('task_started: %s %s', - event['type'], event.get('name', 'N/A')) - - queue_action({ - 'action': 'start_task_run', - 'args': [event['uuid']], - 'kwargs': { - 'timestamp': utcnow(), - 'metadata': { - 'worker': event['hostname'], + logger.debug("task_started: %s %s", event["type"], event.get("name", "N/A")) + + queue_action( + { + "action": "start_task_run", + "args": [event["uuid"]], + "kwargs": { + "timestamp": utcnow(), + "metadata": {"worker": event["hostname"],}, }, - }, - 'message': message, - }) + "message": message, + } + ) def task_succeeded(event, message): - logger.debug('task_succeeded: event: %s' % event) - logger.debug(' message: %s' % message) - result = event['result'] + logger.debug("task_succeeded: event: %s" % event) + logger.debug(" message: %s" % message) + result = event["result"] - logger.debug('task_succeeded: result: %s' % result) + logger.debug("task_succeeded: result: %s" % result) try: - status = result.get('status') - if status == 'success': - status = 'eventful' if result.get('eventful') else 'uneventful' + status = result.get("status") + if status == "success": + status = "eventful" if result.get("eventful") else "uneventful" except Exception: - status = 'eventful' if result else 'uneventful' - - queue_action({ - 'action': 'end_task_run', - 'args': [event['uuid']], - 'kwargs': { - 'timestamp': utcnow(), - 'status': status, - 'result': result, - }, - 'message': message, - }) + status = "eventful" if result else "uneventful" + + queue_action( + { + "action": "end_task_run", + "args": [event["uuid"]], + "kwargs": {"timestamp": utcnow(), "status": status, "result": result,}, + "message": message, + } + ) def task_failed(event, message): - logger.debug('task_failed: event: %s' % event) - logger.debug(' message: %s' % message) - - queue_action({ - 'action': 'end_task_run', - 'args': [event['uuid']], - 'kwargs': { - 'timestamp': utcnow(), - 'status': 'failed', - }, - 'message': message, - }) + logger.debug("task_failed: event: %s" % event) + logger.debug(" message: %s" % message) + + queue_action( + { + "action": "end_task_run", + "args": [event["uuid"]], + "kwargs": {"timestamp": utcnow(), "status": "failed",}, + "message": message, + } + ) recv = ReliableEventReceiver( celery.current_app.connection(), app=celery.current_app, handlers={ - 'task-started': task_started, - 'task-result': task_succeeded, - 'task-failed': task_failed, - '*': catchall_event, + "task-started": task_started, + "task-result": task_succeeded, + "task-failed": task_failed, + "*": catchall_event, }, - node_id='listener', + node_id="listener", ) errors = 0 while True: try: recv.capture(limit=None, timeout=None, wakeup=True) errors = 0 except KeyboardInterrupt: - logger.exception('Keyboard interrupt, exiting') + logger.exception("Keyboard interrupt, exiting") break except Exception: - logger.exception('Unexpected exception') + logger.exception("Unexpected exception") if errors < 5: time.sleep(errors) errors += 1 else: - logger.error('Too many consecutive errors, exiting') + logger.error("Too many consecutive errors, exiting") sys.exit(1) @click.command() @click.pass_context def main(ctx): - click.echo("Deprecated! Use 'swh-scheduler listener' instead.", - err=True) + click.echo("Deprecated! Use 'swh-scheduler listener' instead.", err=True) ctx.exit(1) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/swh/scheduler/celery_backend/pika_listener.py b/swh/scheduler/celery_backend/pika_listener.py index f233914..d8047e8 100644 --- a/swh/scheduler/celery_backend/pika_listener.py +++ b/swh/scheduler/celery_backend/pika_listener.py @@ -1,108 +1,102 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import json import logging import sys import pika from swh.core.statsd import statsd from swh.scheduler import get_scheduler logger = logging.getLogger(__name__) def utcnow(): return datetime.datetime.now(tz=datetime.timezone.utc) def get_listener(broker_url, queue_name, scheduler_backend): - connection = pika.BlockingConnection( - pika.URLParameters(broker_url) - ) + connection = pika.BlockingConnection(pika.URLParameters(broker_url)) channel = connection.channel() channel.queue_declare(queue=queue_name, durable=True) - exchange = 'celeryev' - routing_key = '#' - channel.queue_bind(queue=queue_name, exchange=exchange, - routing_key=routing_key) + exchange = "celeryev" + routing_key = "#" + channel.queue_bind(queue=queue_name, exchange=exchange, routing_key=routing_key) channel.basic_qos(prefetch_count=1000) channel.basic_consume( - queue=queue_name, - on_message_callback=get_on_message(scheduler_backend), + queue=queue_name, on_message_callback=get_on_message(scheduler_backend), ) return channel def get_on_message(scheduler_backend): def on_message(channel, method_frame, properties, body): try: events = json.loads(body) except Exception: - logger.warning('Could not parse body %r', body) + logger.warning("Could not parse body %r", body) events = [] if not isinstance(events, list): events = [events] for event in events: - logger.debug('Received event %r', event) + logger.debug("Received event %r", event) process_event(event, scheduler_backend) channel.basic_ack(delivery_tag=method_frame.delivery_tag) return on_message def process_event(event, scheduler_backend): - uuid = event.get('uuid') + uuid = event.get("uuid") if not uuid: return - event_type = event['type'] - statsd.increment('swh_scheduler_listener_handled_event_total', - tags={'event_type': event_type}) + event_type = event["type"] + statsd.increment( + "swh_scheduler_listener_handled_event_total", tags={"event_type": event_type} + ) - if event_type == 'task-started': + if event_type == "task-started": scheduler_backend.start_task_run( - uuid, timestamp=utcnow(), - metadata={'worker': event.get('hostname')}, + uuid, timestamp=utcnow(), metadata={"worker": event.get("hostname")}, ) - elif event_type == 'task-result': - result = event['result'] + elif event_type == "task-result": + result = event["result"] status = None - if isinstance(result, dict) and 'status' in result: - status = result['status'] - if status == 'success': - status = 'eventful' if result.get('eventful') else 'uneventful' + if isinstance(result, dict) and "status" in result: + status = result["status"] + if status == "success": + status = "eventful" if result.get("eventful") else "uneventful" if status is None: - status = 'eventful' if result else 'uneventful' + status = "eventful" if result else "uneventful" - scheduler_backend.end_task_run(uuid, timestamp=utcnow(), - status=status, result=result) - elif event_type == 'task-failed': - scheduler_backend.end_task_run(uuid, timestamp=utcnow(), - status='failed') + scheduler_backend.end_task_run( + uuid, timestamp=utcnow(), status=status, result=result + ) + elif event_type == "task-failed": + scheduler_backend.end_task_run(uuid, timestamp=utcnow(), status="failed") -if __name__ == '__main__': +if __name__ == "__main__": url = sys.argv[1] logging.basicConfig(level=logging.DEBUG) - scheduler_backend = get_scheduler('local', args={ - 'db': 'service=swh-scheduler' - }) - channel = get_listener(url, 'celeryev.test', scheduler_backend) - logger.info('Start consuming') + scheduler_backend = get_scheduler("local", args={"db": "service=swh-scheduler"}) + channel = get_listener(url, "celeryev.test", scheduler_backend) + logger.info("Start consuming") channel.start_consuming() diff --git a/swh/scheduler/celery_backend/runner.py b/swh/scheduler/celery_backend/runner.py index c9250ee..4faa7e8 100644 --- a/swh/scheduler/celery_backend/runner.py +++ b/swh/scheduler/celery_backend/runner.py @@ -1,125 +1,127 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import arrow import logging from kombu.utils.uuid import uuid from swh.core.statsd import statsd from swh.scheduler import get_scheduler, compute_nb_tasks_from logger = logging.getLogger(__name__) # Max batch size for tasks MAX_NUM_TASKS = 10000 def run_ready_tasks(backend, app): """Run tasks that are ready Args: backend (Scheduler): backend to read tasks to schedule app (App): Celery application to send tasks to Returns: A list of dictionaries:: { 'task': the scheduler's task id, 'backend_id': Celery's task id, 'scheduler': arrow.utcnow() } The result can be used to block-wait for the tasks' results:: backend_tasks = run_ready_tasks(self.scheduler, app) for task in backend_tasks: AsyncResult(id=task['backend_id']).get() """ all_backend_tasks = [] while True: task_types = {} pending_tasks = [] for task_type in backend.get_task_types(): - task_type_name = task_type['type'] + task_type_name = task_type["type"] task_types[task_type_name] = task_type - max_queue_length = task_type['max_queue_length'] - backend_name = task_type['backend_name'] + max_queue_length = task_type["max_queue_length"] + backend_name = task_type["backend_name"] if max_queue_length: try: queue_length = app.get_queue_length(backend_name) except ValueError: queue_length = None if queue_length is None: # Running without RabbitMQ (probably a test env). num_tasks = MAX_NUM_TASKS else: - num_tasks = min(max_queue_length - queue_length, - MAX_NUM_TASKS) + num_tasks = min(max_queue_length - queue_length, MAX_NUM_TASKS) else: num_tasks = MAX_NUM_TASKS if num_tasks > 0: - num_tasks, num_tasks_priority = compute_nb_tasks_from( - num_tasks) + num_tasks, num_tasks_priority = compute_nb_tasks_from(num_tasks) grabbed_tasks = backend.grab_ready_tasks( - task_type_name, - num_tasks=num_tasks, - num_tasks_priority=num_tasks_priority) + task_type_name, + num_tasks=num_tasks, + num_tasks_priority=num_tasks_priority, + ) if grabbed_tasks: pending_tasks.extend(grabbed_tasks) - logger.info('Grabbed %s tasks %s', - len(grabbed_tasks), task_type_name) + logger.info( + "Grabbed %s tasks %s", len(grabbed_tasks), task_type_name + ) statsd.increment( - 'swh_scheduler_runner_scheduled_task_total', + "swh_scheduler_runner_scheduled_task_total", len(grabbed_tasks), - tags={'task_type': task_type_name}) + tags={"task_type": task_type_name}, + ) if not pending_tasks: return all_backend_tasks backend_tasks = [] celery_tasks = [] for task in pending_tasks: - args = task['arguments']['args'] - kwargs = task['arguments']['kwargs'] + args = task["arguments"]["args"] + kwargs = task["arguments"]["kwargs"] - backend_name = task_types[task['type']]['backend_name'] + backend_name = task_types[task["type"]]["backend_name"] backend_id = uuid() celery_tasks.append((backend_name, backend_id, args, kwargs)) data = { - 'task': task['id'], - 'backend_id': backend_id, - 'scheduled': arrow.utcnow(), + "task": task["id"], + "backend_id": backend_id, + "scheduled": arrow.utcnow(), } backend_tasks.append(data) - logger.debug('Sent %s celery tasks', len(backend_tasks)) + logger.debug("Sent %s celery tasks", len(backend_tasks)) backend.mass_schedule_task_runs(backend_tasks) for backend_name, backend_id, args, kwargs in celery_tasks: app.send_task( backend_name, task_id=backend_id, args=args, kwargs=kwargs, ) all_backend_tasks.extend(backend_tasks) def main(): from .config import app as main_app + for module in main_app.conf.CELERY_IMPORTS: __import__(module) - main_backend = get_scheduler('local') + main_backend = get_scheduler("local") try: run_ready_tasks(main_backend, main_app) except Exception: main_backend.rollback() raise -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/swh/scheduler/cli/__init__.py b/swh/scheduler/cli/__init__.py index 0a25ba6..34f33dd 100644 --- a/swh/scheduler/cli/__init__.py +++ b/swh/scheduler/cli/__init__.py @@ -1,81 +1,93 @@ -# Copyright (C) 2016-2019 The Software Heritage developers +# Copyright (C) 2016-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging import click from swh.core.cli import CONTEXT_SETTINGS, AliasedGroup -@click.group(name='scheduler', context_settings=CONTEXT_SETTINGS, - cls=AliasedGroup) -@click.option('--config-file', '-C', default=None, - type=click.Path(exists=True, dir_okay=False,), - help="Configuration file.") -@click.option('--database', '-d', default=None, - help="Scheduling database DSN (imply cls is 'local')") -@click.option('--url', '-u', default=None, - help="Scheduler's url access (imply cls is 'remote')") -@click.option('--no-stdout', is_flag=True, default=False, - help="Do NOT output logs on the console") +@click.group(name="scheduler", context_settings=CONTEXT_SETTINGS, cls=AliasedGroup) +@click.option( + "--config-file", + "-C", + default=None, + type=click.Path(exists=True, dir_okay=False,), + help="Configuration file.", +) +@click.option( + "--database", + "-d", + default=None, + help="Scheduling database DSN (imply cls is 'local')", +) +@click.option( + "--url", "-u", default=None, help="Scheduler's url access (imply cls is 'remote')" +) +@click.option( + "--no-stdout", is_flag=True, default=False, help="Do NOT output logs on the console" +) @click.pass_context def cli(ctx, config_file, database, url, no_stdout): """Software Heritage Scheduler tools. Use a local scheduler instance by default (plugged to the main scheduler db). """ from swh.core import config from swh.scheduler.celery_backend.config import setup_log_handler from swh.scheduler import get_scheduler, DEFAULT_CONFIG ctx.ensure_object(dict) - log_level = ctx.obj.get('log_level', logging.INFO) + log_level = ctx.obj.get("log_level", logging.INFO) setup_log_handler( - loglevel=log_level, colorize=False, - format='[%(levelname)s] %(name)s -- %(message)s', - log_console=not no_stdout) + loglevel=log_level, + colorize=False, + format="[%(levelname)s] %(name)s -- %(message)s", + log_console=not no_stdout, + ) logger = logging.getLogger(__name__) scheduler = None conf = config.read(config_file, DEFAULT_CONFIG) - if 'scheduler' not in conf: + if "scheduler" not in conf: raise ValueError("missing 'scheduler' configuration") if database: - conf['scheduler']['cls'] = 'local' - conf['scheduler']['args']['db'] = database + conf["scheduler"]["cls"] = "local" + conf["scheduler"]["args"]["db"] = database elif url: - conf['scheduler']['cls'] = 'remote' - conf['scheduler']['args'] = {'url': url} - sched_conf = conf['scheduler'] + conf["scheduler"]["cls"] = "remote" + conf["scheduler"]["args"] = {"url": url} + sched_conf = conf["scheduler"] try: - logger.debug('Instanciating scheduler with %s' % ( - sched_conf)) + logger.debug("Instantiating scheduler with %s" % (sched_conf)) scheduler = get_scheduler(**sched_conf) except ValueError: # it's the subcommand to decide whether not having a proper # scheduler instance is a problem. pass - ctx.obj['scheduler'] = scheduler - ctx.obj['config'] = conf + ctx.obj["scheduler"] = scheduler + ctx.obj["config"] = conf + from . import admin, task, task_type # noqa def main(): import click.core - click.core.DEPRECATED_HELP_NOTICE = ''' -DEPRECATED! Please use the command 'swh scheduler'.''' + click.core.DEPRECATED_HELP_NOTICE = """ + +DEPRECATED! Please use the command 'swh scheduler'.""" cli.deprecated = True - return cli(auto_envvar_prefix='SWH_SCHEDULER') + return cli(auto_envvar_prefix="SWH_SCHEDULER") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/swh/scheduler/cli/admin.py b/swh/scheduler/cli/admin.py index 467816c..a864897 100644 --- a/swh/scheduler/cli/admin.py +++ b/swh/scheduler/cli/admin.py @@ -1,95 +1,108 @@ # Copyright (C) 2016-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging import time import click from . import cli -@cli.command('start-runner') -@click.option('--period', '-p', default=0, - help=('Period (in s) at witch pending tasks are checked and ' - 'executed. Set to 0 (default) for a one shot.')) +@cli.command("start-runner") +@click.option( + "--period", + "-p", + default=0, + help=( + "Period (in s) at witch pending tasks are checked and " + "executed. Set to 0 (default) for a one shot." + ), +) @click.pass_context def runner(ctx, period): """Starts a swh-scheduler runner service. This process is responsible for checking for ready-to-run tasks and schedule them.""" from swh.scheduler.celery_backend.runner import run_ready_tasks from swh.scheduler.celery_backend.config import build_app - app = build_app(ctx.obj['config'].get('celery')) + app = build_app(ctx.obj["config"].get("celery")) app.set_current() - logger = logging.getLogger(__name__ + '.runner') - scheduler = ctx.obj['scheduler'] - logger.debug('Scheduler %s' % scheduler) + logger = logging.getLogger(__name__ + ".runner") + scheduler = ctx.obj["scheduler"] + logger.debug("Scheduler %s" % scheduler) try: while True: - logger.debug('Run ready tasks') + logger.debug("Run ready tasks") try: ntasks = len(run_ready_tasks(scheduler, app)) if ntasks: - logger.info('Scheduled %s tasks', ntasks) + logger.info("Scheduled %s tasks", ntasks) except Exception: - logger.exception('Unexpected error in run_ready_tasks()') + logger.exception("Unexpected error in run_ready_tasks()") if not period: break time.sleep(period) except KeyboardInterrupt: ctx.exit(0) -@cli.command('start-listener') +@cli.command("start-listener") @click.pass_context def listener(ctx): """Starts a swh-scheduler listener service. This service is responsible for listening at task lifecycle events and handle their workflow status in the database.""" - scheduler_backend = ctx.obj['scheduler'] + scheduler_backend = ctx.obj["scheduler"] if not scheduler_backend: - raise ValueError('Scheduler class (local/remote) must be instantiated') + raise ValueError("Scheduler class (local/remote) must be instantiated") - broker = ctx.obj['config']\ - .get('celery', {})\ - .get('task_broker', 'amqp://guest@localhost/%2f') + broker = ( + ctx.obj["config"] + .get("celery", {}) + .get("task_broker", "amqp://guest@localhost/%2f") + ) from swh.scheduler.celery_backend.pika_listener import get_listener - listener = get_listener(broker, 'celeryev.listener', scheduler_backend) + listener = get_listener(broker, "celeryev.listener", scheduler_backend) try: listener.start_consuming() finally: listener.stop_consuming() -@cli.command('rpc-serve') -@click.option('--host', default='0.0.0.0', - help="Host to run the scheduler server api") -@click.option('--port', default=5008, type=click.INT, - help="Binding port of the server") -@click.option('--debug/--nodebug', default=None, - help=("Indicates if the server should run in debug mode. " - "Defaults to True if log-level is DEBUG, False otherwise.") - ) +@cli.command("rpc-serve") +@click.option("--host", default="0.0.0.0", help="Host to run the scheduler server api") +@click.option("--port", default=5008, type=click.INT, help="Binding port of the server") +@click.option( + "--debug/--nodebug", + default=None, + help=( + "Indicates if the server should run in debug mode. " + "Defaults to True if log-level is DEBUG, False otherwise." + ), +) @click.pass_context def rpc_server(ctx, host, port, debug): """Starts a swh-scheduler API HTTP server. """ - if ctx.obj['config']['scheduler']['cls'] == 'remote': - click.echo("The API server can only be started with a 'local' " - "configuration", err=True) + if ctx.obj["config"]["scheduler"]["cls"] == "remote": + click.echo( + "The API server can only be started with a 'local' " "configuration", + err=True, + ) ctx.exit(1) from swh.scheduler.api import server - server.app.config.update(ctx.obj['config']) + + server.app.config.update(ctx.obj["config"]) if debug is None: - debug = ctx.obj['log_level'] <= logging.DEBUG + debug = ctx.obj["log_level"] <= logging.DEBUG server.app.run(host, port=port, debug=bool(debug)) diff --git a/swh/scheduler/cli/task.py b/swh/scheduler/cli/task.py index 1541d02..d76d16f 100644 --- a/swh/scheduler/cli/task.py +++ b/swh/scheduler/cli/task.py @@ -1,579 +1,720 @@ # Copyright (C) 2016-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import json import itertools import locale import logging import arrow import csv import click from typing import Any, Dict from . import cli -locale.setlocale(locale.LC_ALL, '') +locale.setlocale(locale.LC_ALL, "") ARROW_LOCALE = locale.getlocale(locale.LC_TIME)[0] class DateTimeType(click.ParamType): - name = 'time and date' + name = "time and date" def convert(self, value, param, ctx): if not isinstance(value, arrow.Arrow): value = arrow.get(value) return value DATETIME = DateTimeType() -CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) +CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) def format_dict(d): ret = {} for k, v in d.items(): if isinstance(v, (arrow.Arrow, datetime.date, datetime.datetime)): v = arrow.get(v).format() elif isinstance(v, dict): v = format_dict(v) ret[k] = v return ret def pretty_print_list(list, indent=0): """Pretty-print a list""" - return ''.join('%s%r\n' % (' ' * indent, item) for item in list) + return "".join("%s%r\n" % (" " * indent, item) for item in list) def pretty_print_dict(dict, indent=0): """Pretty-print a list""" - return ''.join('%s%s: %r\n' % - (' ' * indent, click.style(key, bold=True), value) - for key, value in sorted(dict.items())) + return "".join( + "%s%s: %r\n" % (" " * indent, click.style(key, bold=True), value) + for key, value in sorted(dict.items()) + ) def pretty_print_run(run, indent=4): - fmt = ('{indent}{backend_id} [{status}]\n' - '{indent} scheduled: {scheduled} [{started}:{ended}]') - return fmt.format(indent=' '*indent, **format_dict(run)) + fmt = ( + "{indent}{backend_id} [{status}]\n" + "{indent} scheduled: {scheduled} [{started}:{ended}]" + ) + return fmt.format(indent=" " * indent, **format_dict(run)) def pretty_print_task(task, full=False): """Pretty-print a task If 'full' is True, also print the status and priority fields. >>> task = { ... 'id': 1234, ... 'arguments': { ... 'args': ['foo', 'bar', True], ... 'kwargs': {'key': 'value', 'key2': 42}, ... }, ... 'current_interval': datetime.timedelta(hours=1), ... 'next_run': datetime.datetime(2019, 2, 21, 13, 52, 35, 407818), ... 'policy': 'oneshot', ... 'priority': None, ... 'status': 'next_run_not_scheduled', ... 'type': 'test_task', ... } >>> print(click.unstyle(pretty_print_task(task))) Task 1234 Next run: ... (2019-02-21 13:52:35+00:00) Interval: 1:00:00 Type: test_task Policy: oneshot Args: 'foo' 'bar' True Keyword args: key: 'value' key2: 42 >>> print(click.unstyle(pretty_print_task(task, full=True))) Task 1234 Next run: ... (2019-02-21 13:52:35+00:00) Interval: 1:00:00 Type: test_task Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: 'foo' 'bar' True Keyword args: key: 'value' key2: 42 """ - next_run = arrow.get(task['next_run']) + next_run = arrow.get(task["next_run"]) lines = [ - '%s %s\n' % (click.style('Task', bold=True), task['id']), - click.style(' Next run: ', bold=True), - "%s (%s)" % (next_run.humanize(locale=ARROW_LOCALE), - next_run.format()), - '\n', - click.style(' Interval: ', bold=True), - str(task['current_interval']), '\n', - click.style(' Type: ', bold=True), task['type'] or '', '\n', - click.style(' Policy: ', bold=True), task['policy'] or '', '\n', - ] + "%s %s\n" % (click.style("Task", bold=True), task["id"]), + click.style(" Next run: ", bold=True), + "%s (%s)" % (next_run.humanize(locale=ARROW_LOCALE), next_run.format()), + "\n", + click.style(" Interval: ", bold=True), + str(task["current_interval"]), + "\n", + click.style(" Type: ", bold=True), + task["type"] or "", + "\n", + click.style(" Policy: ", bold=True), + task["policy"] or "", + "\n", + ] if full: lines += [ - click.style(' Status: ', bold=True), - task['status'] or '', '\n', - click.style(' Priority: ', bold=True), - task['priority'] or '', '\n', + click.style(" Status: ", bold=True), + task["status"] or "", + "\n", + click.style(" Priority: ", bold=True), + task["priority"] or "", + "\n", ] lines += [ - click.style(' Args:\n', bold=True), - pretty_print_list(task['arguments']['args'], indent=4), - click.style(' Keyword args:\n', bold=True), - pretty_print_dict(task['arguments']['kwargs'], indent=4), + click.style(" Args:\n", bold=True), + pretty_print_list(task["arguments"]["args"], indent=4), + click.style(" Keyword args:\n", bold=True), + pretty_print_dict(task["arguments"]["kwargs"], indent=4), ] - return ''.join(lines) + return "".join(lines) -@cli.group('task') +@cli.group("task") @click.pass_context def task(ctx): """Manipulate tasks.""" pass -@task.command('schedule') -@click.option('--columns', '-c', multiple=True, - default=['type', 'args', 'kwargs', 'next_run'], - type=click.Choice([ - 'type', 'args', 'kwargs', 'policy', 'next_run']), - help='columns present in the CSV file') -@click.option('--delimiter', '-d', default=',') -@click.argument('file', type=click.File(encoding='utf-8')) +@task.command("schedule") +@click.option( + "--columns", + "-c", + multiple=True, + default=["type", "args", "kwargs", "next_run"], + type=click.Choice(["type", "args", "kwargs", "policy", "next_run"]), + help="columns present in the CSV file", +) +@click.option("--delimiter", "-d", default=",") +@click.argument("file", type=click.File(encoding="utf-8")) @click.pass_context def schedule_tasks(ctx, columns, delimiter, file): """Schedule tasks from a CSV input file. The following columns are expected, and can be set through the -c option: - type: the type of the task to be scheduled (mandatory) - args: the arguments passed to the task (JSON list, defaults to an empty list) - kwargs: the keyword arguments passed to the task (JSON object, defaults to an empty dict) - next_run: the date at which the task should run (datetime, defaults to now) The CSV can be read either from a named file, or from stdin (use - as filename). Use sample: cat scheduling-task.txt | \ python3 -m swh.scheduler.cli \ --database 'service=swh-scheduler-dev' \ task schedule \ --columns type --columns kwargs --columns policy \ --delimiter ';' - """ tasks = [] now = arrow.utcnow() - scheduler = ctx.obj['scheduler'] + scheduler = ctx.obj["scheduler"] if not scheduler: - raise ValueError('Scheduler class (local/remote) must be instantiated') + raise ValueError("Scheduler class (local/remote) must be instantiated") reader = csv.reader(file, delimiter=delimiter) for line in reader: task = dict(zip(columns, line)) - args = json.loads(task.pop('args', '[]')) - kwargs = json.loads(task.pop('kwargs', '{}')) - task['arguments'] = { - 'args': args, - 'kwargs': kwargs, + args = json.loads(task.pop("args", "[]")) + kwargs = json.loads(task.pop("kwargs", "{}")) + task["arguments"] = { + "args": args, + "kwargs": kwargs, } - task['next_run'] = DATETIME.convert(task.get('next_run', now), - None, None) + task["next_run"] = DATETIME.convert(task.get("next_run", now), None, None) tasks.append(task) created = scheduler.create_tasks(tasks) output = [ - 'Created %d tasks\n' % len(created), + "Created %d tasks\n" % len(created), ] for task in created: output.append(pretty_print_task(task)) - click.echo_via_pager('\n'.join(output)) + click.echo_via_pager("\n".join(output)) -@task.command('add') -@click.argument('type', nargs=1, required=True) -@click.argument('options', nargs=-1) -@click.option('--policy', '-p', default='recurring', - type=click.Choice(['recurring', 'oneshot'])) -@click.option('--priority', '-P', default=None, - type=click.Choice(['low', 'normal', 'high'])) -@click.option('--next-run', '-n', default=None) +@task.command("add") +@click.argument("type", nargs=1, required=True) +@click.argument("options", nargs=-1) +@click.option( + "--policy", "-p", default="recurring", type=click.Choice(["recurring", "oneshot"]) +) +@click.option( + "--priority", "-P", default=None, type=click.Choice(["low", "normal", "high"]) +) +@click.option("--next-run", "-n", default=None) @click.pass_context def schedule_task(ctx, type, options, policy, priority, next_run): """Schedule one task from arguments. The first argument is the name of the task type, further ones are positional and keyword argument(s) of the task, in YAML format. Keyword args are of the form key=value. Usage sample: swh-scheduler --database 'service=swh-scheduler' \ task add list-pypi swh-scheduler --database 'service=swh-scheduler' \ task add list-debian-distribution --policy=oneshot distribution=stretch Note: if the priority is not given, the task won't have the priority set, which is considered as the lowest priority level. """ from .utils import parse_options - scheduler = ctx.obj['scheduler'] + scheduler = ctx.obj["scheduler"] if not scheduler: - raise ValueError('Scheduler class (local/remote) must be instantiated') + raise ValueError("Scheduler class (local/remote) must be instantiated") now = arrow.utcnow() (args, kw) = parse_options(options) - task = {'type': type, - 'policy': policy, - 'priority': priority, - 'arguments': { - 'args': args, - 'kwargs': kw, - }, - 'next_run': DATETIME.convert(next_run or now, - None, None), - } + task = { + "type": type, + "policy": policy, + "priority": priority, + "arguments": {"args": args, "kwargs": kw,}, + "next_run": DATETIME.convert(next_run or now, None, None), + } created = scheduler.create_tasks([task]) output = [ - 'Created %d tasks\n' % len(created), + "Created %d tasks\n" % len(created), ] for task in created: output.append(pretty_print_task(task)) - click.echo('\n'.join(output)) - - -@task.command('schedule_origins') -@click.argument('type', nargs=1, required=True) -@click.argument('options', nargs=-1) -@click.option('--batch-size', '-b', 'origin_batch_size', - default=10, show_default=True, type=int, - help="Number of origins per task") -@click.option('--min-id', - default=0, show_default=True, type=int, - help="Only schedule tasks for origins whose ID is greater") -@click.option('--max-id', - default=None, type=int, - help="Only schedule tasks for origins whose ID is lower") -@click.option('--storage-url', '-g', - help="URL of the (graph) storage API") -@click.option('--dry-run/--no-dry-run', is_flag=True, - default=False, - help='List only what would be scheduled.') + click.echo("\n".join(output)) + + +@task.command("schedule_origins") +@click.argument("type", nargs=1, required=True) +@click.argument("options", nargs=-1) +@click.option( + "--batch-size", + "-b", + "origin_batch_size", + default=10, + show_default=True, + type=int, + help="Number of origins per task", +) +@click.option( + "--min-id", + default=0, + show_default=True, + type=int, + help="Only schedule tasks for origins whose ID is greater", +) +@click.option( + "--max-id", + default=None, + type=int, + help="Only schedule tasks for origins whose ID is lower", +) +@click.option("--storage-url", "-g", help="URL of the (graph) storage API") +@click.option( + "--dry-run/--no-dry-run", + is_flag=True, + default=False, + help="List only what would be scheduled.", +) @click.pass_context def schedule_origin_metadata_index( - ctx, type, options, storage_url, origin_batch_size, - min_id, max_id, dry_run): + ctx, type, options, storage_url, origin_batch_size, min_id, max_id, dry_run +): """Schedules tasks for origins that are already known. The first argument is the name of the task type, further ones are keyword argument(s) of the task in the form key=value, where value is in YAML format. Usage sample: swh-scheduler --database 'service=swh-scheduler' \ task schedule_origins index-origin-metadata """ from swh.storage import get_storage from swh.storage.algos.origin import iter_origins from .utils import parse_options, schedule_origin_batches - scheduler = ctx.obj['scheduler'] - storage = get_storage('remote', url=storage_url) + scheduler = ctx.obj["scheduler"] + storage = get_storage("remote", url=storage_url) if dry_run: scheduler = None (args, kw) = parse_options(options) if args: - raise click.ClickException('Only keywords arguments are allowed.') + raise click.ClickException("Only keywords arguments are allowed.") origins = iter_origins(storage, origin_from=min_id, origin_to=max_id) - origin_urls = (origin['url'] for origin in origins) - - schedule_origin_batches( - scheduler, type, origin_urls, origin_batch_size, kw) - - -@task.command('list-pending') -@click.argument('task-types', required=True, nargs=-1) -@click.option('--limit', '-l', required=False, type=click.INT, - help='The maximum number of tasks to fetch') -@click.option('--before', '-b', required=False, type=DATETIME, - help='List all jobs supposed to run before the given date') + origin_urls = (origin["url"] for origin in origins) + + schedule_origin_batches(scheduler, type, origin_urls, origin_batch_size, kw) + + +@task.command("list-pending") +@click.argument("task-types", required=True, nargs=-1) +@click.option( + "--limit", + "-l", + required=False, + type=click.INT, + help="The maximum number of tasks to fetch", +) +@click.option( + "--before", + "-b", + required=False, + type=DATETIME, + help="List all jobs supposed to run before the given date", +) @click.pass_context def list_pending_tasks(ctx, task_types, limit, before): """List the tasks that are going to be run. You can override the number of tasks to fetch """ from swh.scheduler import compute_nb_tasks_from - scheduler = ctx.obj['scheduler'] + + scheduler = ctx.obj["scheduler"] if not scheduler: - raise ValueError('Scheduler class (local/remote) must be instantiated') + raise ValueError("Scheduler class (local/remote) must be instantiated") num_tasks, num_tasks_priority = compute_nb_tasks_from(limit) output = [] for task_type in task_types: pending = scheduler.peek_ready_tasks( - task_type, timestamp=before, - num_tasks=num_tasks, num_tasks_priority=num_tasks_priority) - output.append('Found %d %s tasks\n' % ( - len(pending), task_type)) + task_type, + timestamp=before, + num_tasks=num_tasks, + num_tasks_priority=num_tasks_priority, + ) + output.append("Found %d %s tasks\n" % (len(pending), task_type)) for task in pending: output.append(pretty_print_task(task)) - click.echo('\n'.join(output)) - - -@task.command('list') -@click.option('--task-id', '-i', default=None, multiple=True, metavar='ID', - help='List only tasks whose id is ID.') -@click.option('--task-type', '-t', default=None, multiple=True, metavar='TYPE', - help='List only tasks of type TYPE') -@click.option('--limit', '-l', required=False, type=click.INT, - help='The maximum number of tasks to fetch.') -@click.option('--status', '-s', multiple=True, metavar='STATUS', - type=click.Choice( - ('next_run_not_scheduled', 'next_run_scheduled', - 'completed', 'disabled')), - default=None, - help='List tasks whose status is STATUS.') -@click.option('--policy', '-p', default=None, - type=click.Choice(['recurring', 'oneshot']), - help='List tasks whose policy is POLICY.') -@click.option('--priority', '-P', default=None, multiple=True, - type=click.Choice(['all', 'low', 'normal', 'high']), - help='List tasks whose priority is PRIORITY.') -@click.option('--before', '-b', required=False, type=DATETIME, - metavar='DATETIME', - help='Limit to tasks supposed to run before the given date.') -@click.option('--after', '-a', required=False, type=DATETIME, - metavar='DATETIME', - help='Limit to tasks supposed to run after the given date.') -@click.option('--list-runs', '-r', is_flag=True, default=False, - help='Also list past executions of each task.') + click.echo("\n".join(output)) + + +@task.command("list") +@click.option( + "--task-id", + "-i", + default=None, + multiple=True, + metavar="ID", + help="List only tasks whose id is ID.", +) +@click.option( + "--task-type", + "-t", + default=None, + multiple=True, + metavar="TYPE", + help="List only tasks of type TYPE", +) +@click.option( + "--limit", + "-l", + required=False, + type=click.INT, + help="The maximum number of tasks to fetch.", +) +@click.option( + "--status", + "-s", + multiple=True, + metavar="STATUS", + type=click.Choice( + ("next_run_not_scheduled", "next_run_scheduled", "completed", "disabled") + ), + default=None, + help="List tasks whose status is STATUS.", +) +@click.option( + "--policy", + "-p", + default=None, + type=click.Choice(["recurring", "oneshot"]), + help="List tasks whose policy is POLICY.", +) +@click.option( + "--priority", + "-P", + default=None, + multiple=True, + type=click.Choice(["all", "low", "normal", "high"]), + help="List tasks whose priority is PRIORITY.", +) +@click.option( + "--before", + "-b", + required=False, + type=DATETIME, + metavar="DATETIME", + help="Limit to tasks supposed to run before the given date.", +) +@click.option( + "--after", + "-a", + required=False, + type=DATETIME, + metavar="DATETIME", + help="Limit to tasks supposed to run after the given date.", +) +@click.option( + "--list-runs", + "-r", + is_flag=True, + default=False, + help="Also list past executions of each task.", +) @click.pass_context -def list_tasks(ctx, task_id, task_type, limit, status, policy, priority, - before, after, list_runs): +def list_tasks( + ctx, task_id, task_type, limit, status, policy, priority, before, after, list_runs +): """List tasks. """ - scheduler = ctx.obj['scheduler'] + scheduler = ctx.obj["scheduler"] if not scheduler: - raise ValueError('Scheduler class (local/remote) must be instantiated') + raise ValueError("Scheduler class (local/remote) must be instantiated") if not task_type: - task_type = [x['type'] for x in scheduler.get_task_types()] + task_type = [x["type"] for x in scheduler.get_task_types()] # if task_id is not given, default value for status is # 'next_run_not_scheduled' # if task_id is given, default status is 'all' if task_id is None and status is None: - status = ['next_run_not_scheduled'] - if status and 'all' in status: + status = ["next_run_not_scheduled"] + if status and "all" in status: status = None - if priority and 'all' in priority: + if priority and "all" in priority: priority = None output = [] tasks = scheduler.search_tasks( task_id=task_id, task_type=task_type, - status=status, priority=priority, policy=policy, - before=before, after=after, - limit=limit) + status=status, + priority=priority, + policy=policy, + before=before, + after=after, + limit=limit, + ) if list_runs: - runs = {t['id']: [] for t in tasks} - for r in scheduler.get_task_runs([task['id'] for task in tasks]): - runs[r['task']].append(r) + runs = {t["id"]: [] for t in tasks} + for r in scheduler.get_task_runs([task["id"] for task in tasks]): + runs[r["task"]].append(r) else: runs = {} - output.append('Found %d tasks\n' % ( - len(tasks))) + output.append("Found %d tasks\n" % (len(tasks))) for task in tasks: output.append(pretty_print_task(task, full=True)) - if runs.get(task['id']): - output.append(click.style(' Executions:', bold=True)) - for run in runs[task['id']]: + if runs.get(task["id"]): + output.append(click.style(" Executions:", bold=True)) + for run in runs[task["id"]]: output.append(pretty_print_run(run, indent=4)) - click.echo('\n'.join(output)) + click.echo("\n".join(output)) -@task.command('respawn') -@click.argument('task-ids', required=True, nargs=-1) -@click.option('--next-run', '-n', required=False, type=DATETIME, - metavar='DATETIME', default=None, - help='Re spawn the selected tasks at this date') +@task.command("respawn") +@click.argument("task-ids", required=True, nargs=-1) +@click.option( + "--next-run", + "-n", + required=False, + type=DATETIME, + metavar="DATETIME", + default=None, + help="Re spawn the selected tasks at this date", +) @click.pass_context def respawn_tasks(ctx, task_ids, next_run): """Respawn tasks. Respawn tasks given by their ids (see the 'task list' command to find task ids) at the given date (immediately by default). Eg. swh-scheduler task respawn 1 3 12 """ - scheduler = ctx.obj['scheduler'] + scheduler = ctx.obj["scheduler"] if not scheduler: - raise ValueError('Scheduler class (local/remote) must be instantiated') + raise ValueError("Scheduler class (local/remote) must be instantiated") if next_run is None: next_run = arrow.utcnow() output = [] scheduler.set_status_tasks( - task_ids, status='next_run_not_scheduled', next_run=next_run) - output.append('Respawn tasks %s\n' % (task_ids,)) - - click.echo('\n'.join(output)) - - -@task.command('archive') -@click.option('--before', '-b', default=None, - help='''Task whose ended date is anterior will be archived. - Default to current month's first day.''') -@click.option('--after', '-a', default=None, - help='''Task whose ended date is after the specified date will - be archived. Default to prior month's first day.''') -@click.option('--batch-index', default=1000, type=click.INT, - help='Batch size of tasks to read from db to archive') -@click.option('--bulk-index', default=200, type=click.INT, - help='Batch size of tasks to bulk index') -@click.option('--batch-clean', default=1000, type=click.INT, - help='Batch size of task to clean after archival') -@click.option('--dry-run/--no-dry-run', is_flag=True, default=False, - help='Default to list only what would be archived.') -@click.option('--verbose', is_flag=True, default=False, - help='Verbose mode') -@click.option('--cleanup/--no-cleanup', is_flag=True, default=True, - help='Clean up archived tasks (default)') -@click.option('--start-from', type=click.STRING, default=None, - help='(Optional) default page to start from.') + task_ids, status="next_run_not_scheduled", next_run=next_run + ) + output.append("Respawn tasks %s\n" % (task_ids,)) + + click.echo("\n".join(output)) + + +@task.command("archive") +@click.option( + "--before", + "-b", + default=None, + help="""Task whose ended date is anterior will be archived. + Default to current month's first day.""", +) +@click.option( + "--after", + "-a", + default=None, + help="""Task whose ended date is after the specified date will + be archived. Default to prior month's first day.""", +) +@click.option( + "--batch-index", + default=1000, + type=click.INT, + help="Batch size of tasks to read from db to archive", +) +@click.option( + "--bulk-index", + default=200, + type=click.INT, + help="Batch size of tasks to bulk index", +) +@click.option( + "--batch-clean", + default=1000, + type=click.INT, + help="Batch size of task to clean after archival", +) +@click.option( + "--dry-run/--no-dry-run", + is_flag=True, + default=False, + help="Default to list only what would be archived.", +) +@click.option("--verbose", is_flag=True, default=False, help="Verbose mode") +@click.option( + "--cleanup/--no-cleanup", + is_flag=True, + default=True, + help="Clean up archived tasks (default)", +) +@click.option( + "--start-from", + type=click.STRING, + default=None, + help="(Optional) default page to start from.", +) @click.pass_context -def archive_tasks(ctx, before, after, batch_index, bulk_index, batch_clean, - dry_run, verbose, cleanup, start_from): +def archive_tasks( + ctx, + before, + after, + batch_index, + bulk_index, + batch_clean, + dry_run, + verbose, + cleanup, + start_from, +): """Archive task/task_run whose (task_type is 'oneshot' and task_status is 'completed') or (task_type is 'recurring' and task_status is 'disabled'). With --dry-run flag set (default), only list those. """ from swh.core.utils import grouper from swh.scheduler.backend_es import ElasticSearchBackend - config = ctx.obj['config'] - scheduler = ctx.obj['scheduler'] + + config = ctx.obj["config"] + scheduler = ctx.obj["scheduler"] if not scheduler: - raise ValueError('Scheduler class (local/remote) must be instantiated') + raise ValueError("Scheduler class (local/remote) must be instantiated") logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) logger = logging.getLogger(__name__) - logging.getLogger('urllib3').setLevel(logging.WARN) - logging.getLogger('elasticsearch').setLevel(logging.ERROR) + logging.getLogger("urllib3").setLevel(logging.WARN) + logging.getLogger("elasticsearch").setLevel(logging.ERROR) if dry_run: - logger.info('**DRY-RUN** (only reading db)') + logger.info("**DRY-RUN** (only reading db)") if not cleanup: - logger.info('**NO CLEANUP**') + logger.info("**NO CLEANUP**") es_storage = ElasticSearchBackend(**config) now = arrow.utcnow() # Default to archive tasks from a rolling month starting the week # prior to the current one if not before: - before = now.shift(weeks=-1).format('YYYY-MM-DD') + before = now.shift(weeks=-1).format("YYYY-MM-DD") if not after: - after = now.shift(weeks=-1).shift(months=-1).format('YYYY-MM-DD') + after = now.shift(weeks=-1).shift(months=-1).format("YYYY-MM-DD") - logger.debug('index: %s; cleanup: %s; period: [%s ; %s]' % ( - not dry_run, not dry_run and cleanup, after, before)) + logger.debug( + "index: %s; cleanup: %s; period: [%s ; %s]" + % (not dry_run, not dry_run and cleanup, after, before) + ) - def get_index_name(data: Dict[str, Any], - es_storage: ElasticSearchBackend = es_storage) -> str: + def get_index_name( + data: Dict[str, Any], es_storage: ElasticSearchBackend = es_storage + ) -> str: """Given a data record, determine the index's name through its ending date. This varies greatly depending on the task_run's status. """ - date = data.get('started') + date = data.get("started") if not date: - date = data['scheduled'] + date = data["scheduled"] return es_storage.compute_index_name(date.year, date.month) def index_data(before, page_token, batch_index): while True: result = scheduler.filter_task_to_archive( - after, before, page_token=page_token, limit=batch_index) - tasks_sorted = sorted(result['tasks'], key=get_index_name) + after, before, page_token=page_token, limit=batch_index + ) + tasks_sorted = sorted(result["tasks"], key=get_index_name) groups = itertools.groupby(tasks_sorted, key=get_index_name) for index_name, tasks_group in groups: - logger.debug('Index tasks to %s' % index_name) + logger.debug("Index tasks to %s" % index_name) if dry_run: for task in tasks_group: yield task continue yield from es_storage.streaming_bulk( - index_name, tasks_group, source=['task_id', 'task_run_id'], - chunk_size=bulk_index) + index_name, + tasks_group, + source=["task_id", "task_run_id"], + chunk_size=bulk_index, + ) - page_token = result.get('next_page_token') + page_token = result.get("next_page_token") if page_token is None: break gen = index_data(before, page_token=start_from, batch_index=batch_index) if cleanup: for task_ids in grouper(gen, n=batch_clean): task_ids = list(task_ids) - logger.info('Clean up %s tasks: [%s, ...]' % ( - len(task_ids), task_ids[0])) + logger.info("Clean up %s tasks: [%s, ...]" % (len(task_ids), task_ids[0])) if dry_run: # no clean up continue - ctx.obj['scheduler'].delete_archived_tasks(task_ids) + ctx.obj["scheduler"].delete_archived_tasks(task_ids) else: for task_ids in grouper(gen, n=batch_index): task_ids = list(task_ids) - logger.info('Indexed %s tasks: [%s, ...]' % ( - len(task_ids), task_ids[0])) + logger.info("Indexed %s tasks: [%s, ...]" % (len(task_ids), task_ids[0])) - logger.debug('Done!') + logger.debug("Done!") diff --git a/swh/scheduler/cli/task_type.py b/swh/scheduler/cli/task_type.py index 5da5ee2..f4eb546 100644 --- a/swh/scheduler/cli/task_type.py +++ b/swh/scheduler/cli/task_type.py @@ -1,198 +1,230 @@ # Copyright (C) 2016-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import celery.app.task import click import logging from importlib import import_module from typing import Mapping from pkg_resources import iter_entry_points from . import cli logger = logging.getLogger(__name__) DEFAULT_TASK_TYPE = { - 'full': { # for tasks like 'list_xxx_full()' - 'default_interval': '90 days', - 'min_interval': '90 days', - 'max_interval': '90 days', - 'backoff_factor': 1 + "full": { # for tasks like 'list_xxx_full()' + "default_interval": "90 days", + "min_interval": "90 days", + "max_interval": "90 days", + "backoff_factor": 1, }, - '*': { # value if not suffix matches - 'default_interval': '1 day', - 'min_interval': '1 day', - 'max_interval': '1 day', - 'backoff_factor': 1 + "*": { # value if not suffix matches + "default_interval": "1 day", + "min_interval": "1 day", + "max_interval": "1 day", + "backoff_factor": 1, }, } PLUGIN_WORKER_DESCRIPTIONS = { - entry_point.name: entry_point - for entry_point in iter_entry_points('swh.workers') + entry_point.name: entry_point for entry_point in iter_entry_points("swh.workers") } -@cli.group('task-type') +@cli.group("task-type") @click.pass_context def task_type(ctx): """Manipulate task types.""" pass -@task_type.command('list') -@click.option('--verbose', '-v', is_flag=True, default=False, - help='Verbose mode') -@click.option('--task_type', '-t', multiple=True, default=None, - help='List task types of given type') -@click.option('--task_name', '-n', multiple=True, default=None, - help='List task types of given backend task name') +@task_type.command("list") +@click.option("--verbose", "-v", is_flag=True, default=False, help="Verbose mode") +@click.option( + "--task_type", + "-t", + multiple=True, + default=None, + help="List task types of given type", +) +@click.option( + "--task_name", + "-n", + multiple=True, + default=None, + help="List task types of given backend task name", +) @click.pass_context def list_task_types(ctx, verbose, task_type, task_name): click.echo("Known task types:") if verbose: - tmpl = click.style('{type}: ', bold=True) + '''{backend_name} + tmpl = ( + click.style("{type}: ", bold=True) + + """{backend_name} {description} interval: {default_interval} [{min_interval}, {max_interval}] backoff_factor: {backoff_factor} max_queue_length: {max_queue_length} num_retries: {num_retries} retry_delay: {retry_delay} -''' +""" + ) else: - tmpl = '{type}:\n {description}' - for tasktype in sorted(ctx.obj['scheduler'].get_task_types(), - key=lambda x: x['type']): - if task_type and tasktype['type'] not in task_type: + tmpl = "{type}:\n {description}" + for tasktype in sorted( + ctx.obj["scheduler"].get_task_types(), key=lambda x: x["type"] + ): + if task_type and tasktype["type"] not in task_type: continue - if task_name and tasktype['backend_name'] not in task_name: + if task_name and tasktype["backend_name"] not in task_name: continue click.echo(tmpl.format(**tasktype)) -@task_type.command('register') -@click.option('--plugins', '-p', 'plugins', multiple=True, default=('all', ), - type=click.Choice(['all'] + list(PLUGIN_WORKER_DESCRIPTIONS)), - help='Registers task-types for provided plugins. ' - 'Defaults to all') +@task_type.command("register") +@click.option( + "--plugins", + "-p", + "plugins", + multiple=True, + default=("all",), + type=click.Choice(["all"] + list(PLUGIN_WORKER_DESCRIPTIONS)), + help="Registers task-types for provided plugins. " "Defaults to all", +) @click.pass_context def register_task_types(ctx, plugins): """Register missing task-type entries in the scheduler. According to declared tasks in each loaded worker (e.g. lister, loader, ...) plugins. """ - scheduler = ctx.obj['scheduler'] + scheduler = ctx.obj["scheduler"] - if plugins == ('all', ): + if plugins == ("all",): plugins = list(PLUGIN_WORKER_DESCRIPTIONS) for plugin in plugins: entrypoint = PLUGIN_WORKER_DESCRIPTIONS[plugin] - logger.info('Loading entrypoint for plugin %s', plugin) + logger.info("Loading entrypoint for plugin %s", plugin) registry_entry = entrypoint.load()() - for task_module in registry_entry['task_modules']: + for task_module in registry_entry["task_modules"]: mod = import_module(task_module) - for task_name in (x for x in dir(mod) if not x.startswith('_')): - logger.debug('Loading task name %s', task_name) + for task_name in (x for x in dir(mod) if not x.startswith("_")): + logger.debug("Loading task name %s", task_name) taskobj = getattr(mod, task_name) if isinstance(taskobj, celery.app.task.Task): - tt_name = task_name.replace('_', '-') - task_cfg = registry_entry.get('task_types', {}).get( - tt_name, {}) - ensure_task_type( - task_module, tt_name, taskobj, task_cfg, scheduler) + tt_name = task_name.replace("_", "-") + task_cfg = registry_entry.get("task_types", {}).get(tt_name, {}) + ensure_task_type(task_module, tt_name, taskobj, task_cfg, scheduler) def ensure_task_type( - task_module: str, task_type: str, - swhtask, task_config: Mapping, scheduler): + task_module: str, task_type: str, swhtask, task_config: Mapping, scheduler +): """Ensure a given task-type (for the task_module) exists in the scheduler. Args: task_module: task module we are currently checking for task type consistency task_type: the type of the task to check/insert (correspond to the 'type' field in the db) swhtask (SWHTask): the SWHTask instance the task-type correspond to task_config: a dict with specific/overloaded values for the task-type to be created scheduler: the scheduler object used to access the scheduler db """ for suffix, defaults in DEFAULT_TASK_TYPE.items(): - if task_type.endswith('-' + suffix): + if task_type.endswith("-" + suffix): task_type_dict = defaults.copy() break else: - task_type_dict = DEFAULT_TASK_TYPE['*'].copy() + task_type_dict = DEFAULT_TASK_TYPE["*"].copy() - task_type_dict['type'] = task_type - task_type_dict['backend_name'] = swhtask.name + task_type_dict["type"] = task_type + task_type_dict["backend_name"] = swhtask.name if swhtask.__doc__: - task_type_dict['description'] = swhtask.__doc__.splitlines()[0] + task_type_dict["description"] = swhtask.__doc__.splitlines()[0] task_type_dict.update(task_config) current_task_type = scheduler.get_task_type(task_type) if current_task_type: # Ensure the existing task_type is consistent in the scheduler - if current_task_type['backend_name'] != task_type_dict['backend_name']: + if current_task_type["backend_name"] != task_type_dict["backend_name"]: logger.warning( - 'Existing task type %s for module %s has a ' - 'different backend name than current ' - 'code version provides (%s vs. %s)', + "Existing task type %s for module %s has a " + "different backend name than current " + "code version provides (%s vs. %s)", task_type, task_module, - current_task_type['backend_name'], - task_type_dict['backend_name'], + current_task_type["backend_name"], + task_type_dict["backend_name"], ) else: - logger.info('Create task type %s in scheduler', task_type) - logger.debug(' %s', task_type_dict) + logger.info("Create task type %s in scheduler", task_type) + logger.debug(" %s", task_type_dict) scheduler.create_task_type(task_type_dict) -@task_type.command('add') -@click.argument('type', required=True) -@click.argument('task-name', required=True) -@click.argument('description', required=True) -@click.option('--default-interval', '-i', default='90 days', - help='Default interval ("90 days" by default)') -@click.option('--min-interval', default=None, - help='Minimum interval (default interval if not set)') -@click.option('--max-interval', '-i', default=None, - help='Maximal interval (default interval if not set)') -@click.option('--backoff-factor', '-f', type=float, default=1, - help='Backoff factor') +@task_type.command("add") +@click.argument("type", required=True) +@click.argument("task-name", required=True) +@click.argument("description", required=True) +@click.option( + "--default-interval", + "-i", + default="90 days", + help='Default interval ("90 days" by default)', +) +@click.option( + "--min-interval", + default=None, + help="Minimum interval (default interval if not set)", +) +@click.option( + "--max-interval", + "-i", + default=None, + help="Maximal interval (default interval if not set)", +) +@click.option("--backoff-factor", "-f", type=float, default=1, help="Backoff factor") @click.pass_context -def add_task_type(ctx, type, task_name, description, - default_interval, min_interval, max_interval, - backoff_factor): +def add_task_type( + ctx, + type, + task_name, + description, + default_interval, + min_interval, + max_interval, + backoff_factor, +): """Create a new task type """ - scheduler = ctx.obj['scheduler'] + scheduler = ctx.obj["scheduler"] if not scheduler: - raise ValueError('Scheduler class (local/remote) must be instantiated') + raise ValueError("Scheduler class (local/remote) must be instantiated") task_type = dict( type=type, backend_name=task_name, description=description, default_interval=default_interval, min_interval=min_interval, max_interval=max_interval, backoff_factor=backoff_factor, max_queue_length=None, num_retries=None, retry_delay=None, - ) + ) scheduler.create_task_type(task_type) - click.echo('OK') + click.echo("OK") diff --git a/swh/scheduler/cli/utils.py b/swh/scheduler/cli/utils.py index 0cd5506..547d70c 100644 --- a/swh/scheduler/cli/utils.py +++ b/swh/scheduler/cli/utils.py @@ -1,87 +1,86 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import itertools import click import yaml from swh.scheduler.utils import create_task_dict TASK_BATCH_SIZE = 1000 # Number of tasks per query to the scheduler -def schedule_origin_batches( - scheduler, task_type, origins, origin_batch_size, kwargs): +def schedule_origin_batches(scheduler, task_type, origins, origin_batch_size, kwargs): nb_origins = 0 nb_tasks = 0 while True: task_batch = [] for _ in range(TASK_BATCH_SIZE): # Group origins origin_batch = [] for origin in itertools.islice(origins, origin_batch_size): origin_batch.append(origin) nb_origins += len(origin_batch) if not origin_batch: break # Create a task for these origins args = [origin_batch] - task_dict = create_task_dict(task_type, 'oneshot', *args, **kwargs) + task_dict = create_task_dict(task_type, "oneshot", *args, **kwargs) task_batch.append(task_dict) # Schedule a batch of tasks if not task_batch: break nb_tasks += len(task_batch) if scheduler: scheduler.create_tasks(task_batch) - click.echo('Scheduled %d tasks (%d origins).' % (nb_tasks, nb_origins)) + click.echo("Scheduled %d tasks (%d origins)." % (nb_tasks, nb_origins)) # Print final status. if nb_tasks: - click.echo('Done.') + click.echo("Done.") else: - click.echo('Nothing to do (no origin metadata matched the criteria).') + click.echo("Nothing to do (no origin metadata matched the criteria).") def parse_argument(option): try: return yaml.safe_load(option) except Exception: - raise click.ClickException('Invalid argument: {}'.format(option)) + raise click.ClickException("Invalid argument: {}".format(option)) def parse_options(options): """Parses options from a CLI as YAML and turns it into Python args and kwargs. >>> parse_options([]) ([], {}) >>> parse_options(['foo', 'bar']) (['foo', 'bar'], {}) >>> parse_options(['[foo, bar]']) ([['foo', 'bar']], {}) >>> parse_options(['"foo"', '"bar"']) (['foo', 'bar'], {}) >>> parse_options(['foo="bar"']) ([], {'foo': 'bar'}) >>> parse_options(['"foo"', 'bar="baz"']) (['foo'], {'bar': 'baz'}) >>> parse_options(['42', 'bar=False']) ([42], {'bar': False}) >>> parse_options(['42', 'bar=false']) ([42], {'bar': False}) >>> parse_options(['42', '"foo']) Traceback (most recent call last): ... click.exceptions.ClickException: Invalid argument: "foo """ - kw_pairs = [x.split('=', 1) for x in options if '=' in x] - args = [parse_argument(x) for x in options if '=' not in x] + kw_pairs = [x.split("=", 1) for x in options if "=" in x] + args = [parse_argument(x) for x in options if "=" not in x] kw = {k: parse_argument(v) for (k, v) in kw_pairs} return (args, kw) diff --git a/swh/scheduler/elasticsearch_memory.py b/swh/scheduler/elasticsearch_memory.py index c8c9964..da59852 100644 --- a/swh/scheduler/elasticsearch_memory.py +++ b/swh/scheduler/elasticsearch_memory.py @@ -1,154 +1,146 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Memory Elastic Search backend """ import datetime # noqa serialization purposes import hashlib import logging from ast import literal_eval from typing import Optional import psycopg2 # noqa serialization purposes logger = logging.getLogger(__name__) class BasicSerializer: """For memory elastic search implementation (not for production) """ + def __init__(self, *args, **kwargs): pass def dumps(self, *args, **kwargs): return str(*args) class BasicTransport: """For memory elastic search implementation, (not for production) """ + def __init__(self, *args, **kwargs): self.serializer = BasicSerializer() class MemoryElasticsearch: """Memory Elasticsearch instance (for test purposes) Partial implementation oriented towards index storage (and not search) For now, its sole client is the scheduler for task archival purposes. """ + def __init__(self, *args, **kwargs): self.index = {} self.mapping = {} self.settings = {} self.indices = self # HACK self.main_mapping_key: Optional[str] = None self.main_settings_key: Optional[str] = None self.transport = BasicTransport() def create(self, index, **kwargs): - logger.debug(f'create index {index}') - logger.debug(f'indices: {self.index}') - logger.debug(f'mapping: {self.mapping}') - logger.debug(f'settings: {self.settings}') + logger.debug(f"create index {index}") + logger.debug(f"indices: {self.index}") + logger.debug(f"mapping: {self.mapping}") + logger.debug(f"settings: {self.settings}") self.index[index] = { - 'status': 'opened', - 'data': {}, - 'mapping': self.get_mapping(self.main_mapping_key), - 'settings': self.get_settings(self.main_settings_key), + "status": "opened", + "data": {}, + "mapping": self.get_mapping(self.main_mapping_key), + "settings": self.get_settings(self.main_settings_key), } - logger.debug(f'index {index} created') + logger.debug(f"index {index} created") def close(self, index, **kwargs): """Close index""" idx = self.index.get(index) if idx: - idx['status'] = 'closed' + idx["status"] = "closed" def open(self, index, **kwargs): """Open index""" idx = self.index.get(index) if idx: - idx['status'] = 'opened' + idx["status"] = "opened" def bulk(self, body, **kwargs): """Bulk insert document in index""" assert isinstance(body, str) - all_data = body.split('\n') - if all_data[-1] == '': + all_data = body.split("\n") + if all_data[-1] == "": all_data = all_data[:-1] # drop the empty line if any ids = [] # data is sent as tuple (index, data-to-index) for i in range(0, len(all_data), 2): # The first entry is about the index to use # not about a data to index # find the index index_data = literal_eval(all_data[i]) - idx_name = index_data['index']['_index'] + idx_name = index_data["index"]["_index"] # associated data to index - data = all_data[i+1] - _id = hashlib.sha1(data.encode('utf-8')).hexdigest() + data = all_data[i + 1] + _id = hashlib.sha1(data.encode("utf-8")).hexdigest() parsed_data = eval(data) # for datetime - self.index[idx_name]['data'][_id] = parsed_data + self.index[idx_name]["data"][_id] = parsed_data ids.append(_id) # everything is indexed fine - return { - 'items': [ - { - 'index': { - 'status': 200, - '_id': _id, - } - } for _id in ids - ] - } + return {"items": [{"index": {"status": 200, "_id": _id,}} for _id in ids]} def mget(self, *args, body, index, **kwargs): """Bulk indexed documents retrieval""" idx = self.index[index] docs = [] - idx_docs = idx['data'] - for _id in body['ids']: + idx_docs = idx["data"] + for _id in body["ids"]: doc = idx_docs.get(_id) if doc: d = { - 'found': True, - '_source': doc, + "found": True, + "_source": doc, } docs.append(d) - return {'docs': docs} + return {"docs": docs} def stats(self, index, **kwargs): idx = self.index[index] # will raise if it does not exist - if not idx or idx['status'] == 'closed': - raise ValueError('Closed index') # simulate issue if index closed + if not idx or idx["status"] == "closed": + raise ValueError("Closed index") # simulate issue if index closed def exists(self, index, **kwargs): return self.index.get(index) is not None def put_mapping(self, index, body, **kwargs): self.mapping[index] = body self.main_mapping_key = index def get_mapping(self, index, **kwargs): - return self.mapping.get(index) or \ - self.index.get(index, {}).get('mapping', {}) + return self.mapping.get(index) or self.index.get(index, {}).get("mapping", {}) def put_settings(self, index, body, **kwargs): self.settings[index] = body self.main_settings_key = index def get_settings(self, index, **kwargs): - return self.settings.get(index) or \ - self.index.get(index, {}).get('settings', {}) + return self.settings.get(index) or self.index.get(index, {}).get("settings", {}) diff --git a/swh/scheduler/task.py b/swh/scheduler/task.py index 83d3c0e..46bbe81 100644 --- a/swh/scheduler/task.py +++ b/swh/scheduler/task.py @@ -1,86 +1,81 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from datetime import datetime from celery import current_app import celery.app.task from celery.utils.log import get_task_logger from swh.core.statsd import Statsd def ts(): return int(datetime.utcnow().timestamp()) class SWHTask(celery.app.task.Task): """a schedulable task (abstract class) Current implementation is based on Celery. See http://docs.celeryproject.org/en/latest/reference/celery.app.task.html for how to use tasks once instantiated """ _statsd = None _log = None @property def statsd(self): if self._statsd: return self._statsd - worker_name = current_app.conf.get('worker_name') + worker_name = current_app.conf.get("worker_name") if worker_name: - self._statsd = Statsd(constant_tags={ - 'task': self.name, - 'worker': worker_name, - }) + self._statsd = Statsd( + constant_tags={"task": self.name, "worker": worker_name,} + ) return self._statsd else: - statsd = Statsd(constant_tags={ - 'task': self.name, - 'worker': 'unknown worker', - }) + statsd = Statsd( + constant_tags={"task": self.name, "worker": "unknown worker",} + ) return statsd def __call__(self, *args, **kwargs): - self.statsd.increment('swh_task_called_count') - self.statsd.gauge('swh_task_start_ts', ts()) - with self.statsd.timed('swh_task_duration_seconds'): + self.statsd.increment("swh_task_called_count") + self.statsd.gauge("swh_task_start_ts", ts()) + with self.statsd.timed("swh_task_duration_seconds"): result = super().__call__(*args, **kwargs) try: - status = result['status'] - if status == 'success': - status = 'eventful' if result.get('eventful') \ - else 'uneventful' + status = result["status"] + if status == "success": + status = "eventful" if result.get("eventful") else "uneventful" except Exception: - status = 'eventful' if result else 'uneventful' + status = "eventful" if result else "uneventful" - self.statsd.gauge( - 'swh_task_end_ts', ts(), - tags={'status': status}) + self.statsd.gauge("swh_task_end_ts", ts(), tags={"status": status}) return result def on_failure(self, exc, task_id, args, kwargs, einfo): - self.statsd.increment('swh_task_failure_count') + self.statsd.increment("swh_task_failure_count") def on_success(self, retval, task_id, args, kwargs): - self.statsd.increment('swh_task_success_count') + self.statsd.increment("swh_task_success_count") # this is a swh specific event. Used to attach the retval to the # task_run - self.send_event('task-result', result=retval) + self.send_event("task-result", result=retval) @property def log(self): if self._log is None: self._log = get_task_logger(self.name) return self._log def run(self, *args, **kwargs): - self.log.debug('%s: args=%s, kwargs=%s', self.name, args, kwargs) + self.log.debug("%s: args=%s, kwargs=%s", self.name, args, kwargs) ret = super().run(*args, **kwargs) - self.log.debug('%s: OK => %s', self.name, ret) + self.log.debug("%s: OK => %s", self.name, ret) return ret diff --git a/swh/scheduler/tests/__init__.py b/swh/scheduler/tests/__init__.py index 04915f5..7f3771d 100644 --- a/swh/scheduler/tests/__init__.py +++ b/swh/scheduler/tests/__init__.py @@ -1,5 +1,5 @@ from os import path import swh.scheduler -SQL_DIR = path.join(path.dirname(swh.scheduler.__file__), 'sql') +SQL_DIR = path.join(path.dirname(swh.scheduler.__file__), "sql") diff --git a/swh/scheduler/tests/common.py b/swh/scheduler/tests/common.py index 3efd906..258c80f 100644 --- a/swh/scheduler/tests/common.py +++ b/swh/scheduler/tests/common.py @@ -1,100 +1,95 @@ # Copyright (C) 2017-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import datetime TEMPLATES = { - 'git': { - 'type': 'update-git', - 'arguments': { - 'args': [], - 'kwargs': {}, - }, - 'next_run': None, + "git": { + "type": "update-git", + "arguments": {"args": [], "kwargs": {},}, + "next_run": None, + }, + "hg": { + "type": "update-hg", + "arguments": {"args": [], "kwargs": {},}, + "next_run": None, + "policy": "oneshot", }, - 'hg': { - 'type': 'update-hg', - 'arguments': { - 'args': [], - 'kwargs': {}, - }, - 'next_run': None, - 'policy': 'oneshot', - } } TASK_TYPES = { - 'git': { - 'type': 'update-git', - 'description': 'Update a git repository', - 'backend_name': 'swh.loader.git.tasks.UpdateGitRepository', - 'default_interval': datetime.timedelta(days=64), - 'min_interval': datetime.timedelta(hours=12), - 'max_interval': datetime.timedelta(days=64), - 'backoff_factor': 2, - 'max_queue_length': None, - 'num_retries': 7, - 'retry_delay': datetime.timedelta(hours=2), + "git": { + "type": "update-git", + "description": "Update a git repository", + "backend_name": "swh.loader.git.tasks.UpdateGitRepository", + "default_interval": datetime.timedelta(days=64), + "min_interval": datetime.timedelta(hours=12), + "max_interval": datetime.timedelta(days=64), + "backoff_factor": 2, + "max_queue_length": None, + "num_retries": 7, + "retry_delay": datetime.timedelta(hours=2), }, - 'hg': { - 'type': 'update-hg', - 'description': 'Update a mercurial repository', - 'backend_name': 'swh.loader.mercurial.tasks.UpdateHgRepository', - 'default_interval': datetime.timedelta(days=64), - 'min_interval': datetime.timedelta(hours=12), - 'max_interval': datetime.timedelta(days=64), - 'backoff_factor': 2, - 'max_queue_length': None, - 'num_retries': 7, - 'retry_delay': datetime.timedelta(hours=2), + "hg": { + "type": "update-hg", + "description": "Update a mercurial repository", + "backend_name": "swh.loader.mercurial.tasks.UpdateHgRepository", + "default_interval": datetime.timedelta(days=64), + "min_interval": datetime.timedelta(hours=12), + "max_interval": datetime.timedelta(days=64), + "backoff_factor": 2, + "max_queue_length": None, + "num_retries": 7, + "retry_delay": datetime.timedelta(hours=2), }, } -def tasks_from_template(template, max_timestamp, num, - num_priority=0, priorities=None): +def tasks_from_template(template, max_timestamp, num, num_priority=0, priorities=None): """Build tasks from template """ + def _task_from_template(template, next_run, priority, *args, **kwargs): ret = copy.deepcopy(template) - ret['next_run'] = next_run + ret["next_run"] = next_run if priority: - ret['priority'] = priority + ret["priority"] = priority if args: - ret['arguments']['args'] = list(args) + ret["arguments"]["args"] = list(args) if kwargs: - ret['arguments']['kwargs'] = kwargs + ret["arguments"]["kwargs"] = kwargs return ret def _pop_priority(priorities): if not priorities: return None for priority, remains in priorities.items(): if remains > 0: priorities[priority] = remains - 1 return priority return None if num_priority and priorities: priorities = { - priority: ratio * num_priority - for priority, ratio in priorities.items() + priority: ratio * num_priority for priority, ratio in priorities.items() } tasks = [] for i in range(num + num_priority): priority = _pop_priority(priorities) - tasks.append(_task_from_template( - template, - max_timestamp - datetime.timedelta(microseconds=i), - priority, - 'argument-%03d' % i, - **{'kwarg%03d' % i: 'bogus-kwarg'} - )) + tasks.append( + _task_from_template( + template, + max_timestamp - datetime.timedelta(microseconds=i), + priority, + "argument-%03d" % i, + **{"kwarg%03d" % i: "bogus-kwarg"} + ) + ) return tasks diff --git a/swh/scheduler/tests/conftest.py b/swh/scheduler/tests/conftest.py index 6534132..b3be2e4 100644 --- a/swh/scheduler/tests/conftest.py +++ b/swh/scheduler/tests/conftest.py @@ -1,109 +1,112 @@ # Copyright (C) 2016-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import pytest import glob from datetime import timedelta import pkg_resources from swh.core.utils import numfile_sortkey as sortkey from swh.scheduler import get_scheduler from swh.scheduler.tests import SQL_DIR # make sure we are not fooled by CELERY_ config environment vars -for var in [x for x in os.environ.keys() if x.startswith('CELERY')]: +for var in [x for x in os.environ.keys() if x.startswith("CELERY")]: os.environ.pop(var) # test_cli tests depends on a en/C locale, so ensure it -os.environ['LC_ALL'] = 'C.UTF-8' +os.environ["LC_ALL"] = "C.UTF-8" -DUMP_FILES = os.path.join(SQL_DIR, '*.sql') +DUMP_FILES = os.path.join(SQL_DIR, "*.sql") # celery tasks for testing purpose; tasks themselves should be # in swh/scheduler/tests/tasks.py -TASK_NAMES = ['ping', 'multiping', 'add', 'error', 'echo'] +TASK_NAMES = ["ping", "multiping", "add", "error", "echo"] -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def celery_enable_logging(): return True -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def celery_includes(): task_modules = [ - 'swh.scheduler.tests.tasks', + "swh.scheduler.tests.tasks", ] - for entrypoint in pkg_resources.iter_entry_points('swh.workers'): - task_modules.extend(entrypoint.load()().get('task_modules', [])) + for entrypoint in pkg_resources.iter_entry_points("swh.workers"): + task_modules.extend(entrypoint.load()().get("task_modules", [])) return task_modules -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def celery_parameters(): return { - 'task_cls': 'swh.scheduler.task:SWHTask', - } + "task_cls": "swh.scheduler.task:SWHTask", + } -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def celery_config(): return { - 'accept_content': ['application/x-msgpack', 'application/json'], - 'task_serializer': 'msgpack', - 'result_serializer': 'json', - } + "accept_content": ["application/x-msgpack", "application/json"], + "task_serializer": "msgpack", + "result_serializer": "json", + } # use the celery_session_app fixture to monkeypatch the 'main' # swh.scheduler.celery_backend.config.app Celery application # with the test application -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def swh_app(celery_session_app): from swh.scheduler.celery_backend import config + config.app = celery_session_app yield celery_session_app @pytest.fixture def swh_scheduler_config(request, postgresql): scheduler_config = { - 'db': postgresql.dsn, + "db": postgresql.dsn, } all_dump_files = sorted(glob.glob(DUMP_FILES), key=sortkey) cursor = postgresql.cursor() for fname in all_dump_files: with open(fname) as fobj: cursor.execute(fobj.read()) postgresql.commit() return scheduler_config @pytest.fixture def swh_scheduler(swh_scheduler_config): - scheduler = get_scheduler('local', swh_scheduler_config) + scheduler = get_scheduler("local", swh_scheduler_config) for taskname in TASK_NAMES: - scheduler.create_task_type({ - 'type': 'swh-test-{}'.format(taskname), - 'description': 'The {} testing task'.format(taskname), - 'backend_name': 'swh.scheduler.tests.tasks.{}'.format(taskname), - 'default_interval': timedelta(days=1), - 'min_interval': timedelta(hours=6), - 'max_interval': timedelta(days=12), - }) + scheduler.create_task_type( + { + "type": "swh-test-{}".format(taskname), + "description": "The {} testing task".format(taskname), + "backend_name": "swh.scheduler.tests.tasks.{}".format(taskname), + "default_interval": timedelta(days=1), + "min_interval": timedelta(hours=6), + "max_interval": timedelta(days=12), + } + ) return scheduler # this alias is used to be able to easily instantiate a db-backed Scheduler # eg. for the RPC client/server test suite. swh_db_scheduler = swh_scheduler diff --git a/swh/scheduler/tests/es/conftest.py b/swh/scheduler/tests/es/conftest.py index a699e07..8d86522 100644 --- a/swh/scheduler/tests/es/conftest.py +++ b/swh/scheduler/tests/es/conftest.py @@ -1,53 +1,49 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import yaml import pytest from swh.scheduler import get_scheduler @pytest.fixture def swh_sched_config(swh_scheduler_config): return { - 'scheduler': { - 'cls': 'local', - 'args': swh_scheduler_config, - }, - 'elasticsearch': { - 'cls': 'memory', - 'args': { - 'index_name_prefix': 'swh-tasks', - }, + "scheduler": {"cls": "local", "args": swh_scheduler_config,}, + "elasticsearch": { + "cls": "memory", + "args": {"index_name_prefix": "swh-tasks",}, }, } @pytest.fixture def swh_sched_config_file(swh_sched_config, monkeypatch, tmp_path): - conffile = str(tmp_path / 'elastic.yml') - with open(conffile, 'w') as f: + conffile = str(tmp_path / "elastic.yml") + with open(conffile, "w") as f: f.write(yaml.dump(swh_sched_config)) - monkeypatch.setenv('SWH_CONFIG_FILENAME', conffile) + monkeypatch.setenv("SWH_CONFIG_FILENAME", conffile) return conffile @pytest.fixture def swh_sched(swh_sched_config): - return get_scheduler(**swh_sched_config['scheduler']) + return get_scheduler(**swh_sched_config["scheduler"]) @pytest.fixture def swh_elasticsearch_backend(swh_sched_config): from swh.scheduler.backend_es import ElasticSearchBackend + backend = ElasticSearchBackend(**swh_sched_config) backend.initialize() return backend @pytest.fixture def swh_elasticsearch_memory(swh_elasticsearch_backend): return swh_elasticsearch_backend.storage diff --git a/swh/scheduler/tests/es/test_backend_es.py b/swh/scheduler/tests/es/test_backend_es.py index cae6f03..6b332ad 100644 --- a/swh/scheduler/tests/es/test_backend_es.py +++ b/swh/scheduler/tests/es/test_backend_es.py @@ -1,78 +1,80 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import pytest import elasticsearch from swh.scheduler.backend_es import get_elasticsearch from ..common import tasks_from_template, TEMPLATES def test_get_elasticsearch(): - with pytest.raises(ValueError, match='Unknown elasticsearch class'): - get_elasticsearch('unknown') + with pytest.raises(ValueError, match="Unknown elasticsearch class"): + get_elasticsearch("unknown") - es = get_elasticsearch('memory') + es = get_elasticsearch("memory") assert es from swh.scheduler.elasticsearch_memory import MemoryElasticsearch + assert isinstance(es, MemoryElasticsearch) - es = get_elasticsearch('local') + es = get_elasticsearch("local") assert es assert isinstance(es, elasticsearch.Elasticsearch) def test_backend_setup_basic(swh_elasticsearch_backend): """Elastic search instance should allow to create/close/check index """ - index_name = 'swh-tasks-2010-01' + index_name = "swh-tasks-2010-01" try: swh_elasticsearch_backend.storage.indices.get_mapping(index_name) except (elasticsearch.exceptions.NotFoundError, KeyError): pass assert not swh_elasticsearch_backend.storage.indices.exists(index_name) swh_elasticsearch_backend.create(index_name) assert swh_elasticsearch_backend.storage.indices.exists(index_name) assert swh_elasticsearch_backend.is_index_opened(index_name) # index exists with a mapping mapping = swh_elasticsearch_backend.storage.indices.get_mapping(index_name) assert mapping != {} def test_backend_setup_index(swh_elasticsearch_backend): """Elastic search instance should allow to bulk index """ - template_git = TEMPLATES['git'] + template_git = TEMPLATES["git"] next_run_date = datetime.datetime.utcnow() - datetime.timedelta(days=1) tasks = tasks_from_template(template_git, next_run_date, 1) index_name = swh_elasticsearch_backend.compute_index_name( - next_run_date.year, next_run_date.month) + next_run_date.year, next_run_date.month + ) assert not swh_elasticsearch_backend.storage.indices.exists(index_name) tasks = list(swh_elasticsearch_backend.streaming_bulk(index_name, tasks)) assert len(tasks) > 0 for output_task in tasks: assert output_task is not None - assert output_task['type'] == template_git['type'] - assert output_task['arguments'] is not None - next_run = output_task['next_run'] + assert output_task["type"] == template_git["type"] + assert output_task["arguments"] is not None + next_run = output_task["next_run"] if isinstance(next_run, str): # real elasticsearch assert next_run == next_run_date.isoformat() else: # memory implem. does not really index assert next_run == next_run_date assert swh_elasticsearch_backend.storage.indices.exists(index_name) assert swh_elasticsearch_backend.is_index_opened(index_name) mapping = swh_elasticsearch_backend.storage.indices.get_mapping(index_name) assert mapping != {} diff --git a/swh/scheduler/tests/es/test_cli_task.py b/swh/scheduler/tests/es/test_cli_task.py index 0f366f3..ff8ba46 100644 --- a/swh/scheduler/tests/es/test_cli_task.py +++ b/swh/scheduler/tests/es/test_cli_task.py @@ -1,109 +1,114 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import arrow import datetime import logging import uuid import random import pytest from click.testing import CliRunner from swh.scheduler.cli import cli from ..common import tasks_from_template, TASK_TYPES, TEMPLATES logger = logging.getLogger(__name__) -@pytest.mark.usefixtures('swh_elasticsearch_backend') +@pytest.mark.usefixtures("swh_elasticsearch_backend") def test_cli_archive_tasks(swh_sched, swh_sched_config_file): scheduler = swh_sched - template_git = TEMPLATES['git'] - template_hg = TEMPLATES['hg'] + template_git = TEMPLATES["git"] + template_hg = TEMPLATES["hg"] # first initialize scheduler's db (is this still needed?) for tt in TASK_TYPES.values(): scheduler.create_task_type(tt) next_run_start = arrow.utcnow().datetime - datetime.timedelta(days=1) - recurring = tasks_from_template( - template_git, next_run_start, 100) + recurring = tasks_from_template(template_git, next_run_start, 100) oneshots = tasks_from_template( - template_hg, next_run_start - datetime.timedelta(days=1), 50) + template_hg, next_run_start - datetime.timedelta(days=1), 50 + ) past_time = next_run_start - datetime.timedelta(days=7) all_tasks = recurring + oneshots result = scheduler.create_tasks(all_tasks) assert len(result) == len(all_tasks) # simulate task run backend_tasks = [ { - 'task': task['id'], - 'backend_id': str(uuid.uuid4()), - 'scheduled': next_run_start - datetime.timedelta(minutes=i % 60), - } for i, task in enumerate(result) + "task": task["id"], + "backend_id": str(uuid.uuid4()), + "scheduled": next_run_start - datetime.timedelta(minutes=i % 60), + } + for i, task in enumerate(result) ] scheduler.mass_schedule_task_runs(backend_tasks) # Disable some tasks tasks_to_disable = set() for task in result: - status = random.choice(['disabled', 'completed']) - if status == 'disabled': - tasks_to_disable.add(task['id']) + status = random.choice(["disabled", "completed"]) + if status == "disabled": + tasks_to_disable.add(task["id"]) scheduler.disable_tasks(tasks_to_disable) - git_tasks = scheduler.search_tasks(task_type=template_git['type']) - hg_tasks = scheduler.search_tasks(task_type=template_hg['type']) + git_tasks = scheduler.search_tasks(task_type=template_git["type"]) + hg_tasks = scheduler.search_tasks(task_type=template_hg["type"]) assert len(git_tasks) + len(hg_tasks) == len(all_tasks) # Ensure the task_run are in expected state - task_runs = scheduler.get_task_runs([ - t['id'] for t in git_tasks + hg_tasks - ]) + task_runs = scheduler.get_task_runs([t["id"] for t in git_tasks + hg_tasks]) # Same for the tasks for t in git_tasks + hg_tasks: - if t['id'] in tasks_to_disable: - assert t['status'] == 'disabled' + if t["id"] in tasks_to_disable: + assert t["status"] == "disabled" future_time = next_run_start + datetime.timedelta(days=1) for tr in task_runs: - assert past_time <= tr['scheduled'] - assert tr['scheduled'] < future_time + assert past_time <= tr["scheduled"] + assert tr["scheduled"] < future_time runner = CliRunner() - result = runner.invoke(cli, [ - '--config-file', swh_sched_config_file, - 'task', 'archive', - '--after', past_time.isoformat(), - '--before', future_time.isoformat(), - '--cleanup', - ], obj={ - 'log_level': logging.DEBUG, - }) + result = runner.invoke( + cli, + [ + "--config-file", + swh_sched_config_file, + "task", + "archive", + "--after", + past_time.isoformat(), + "--before", + future_time.isoformat(), + "--cleanup", + ], + obj={"log_level": logging.DEBUG,}, + ) assert result.exit_code == 0, result.output # disabled tasks should no longer be in the scheduler - git_tasks = scheduler.search_tasks(task_type=template_git['type']) - hg_tasks = scheduler.search_tasks(task_type=template_hg['type']) + git_tasks = scheduler.search_tasks(task_type=template_git["type"]) + hg_tasks = scheduler.search_tasks(task_type=template_hg["type"]) remaining_tasks = git_tasks + hg_tasks count_disabled = 0 for task in remaining_tasks: logger.debug(f"task status: {task['status']}") - if task['status'] == 'disabled': + if task["status"] == "disabled": count_disabled += 1 assert count_disabled == 0 assert len(remaining_tasks) == len(all_tasks) - len(tasks_to_disable) diff --git a/swh/scheduler/tests/es/test_elasticsearch_memory.py b/swh/scheduler/tests/es/test_elasticsearch_memory.py index beb48f8..ada5ef0 100644 --- a/swh/scheduler/tests/es/test_elasticsearch_memory.py +++ b/swh/scheduler/tests/es/test_elasticsearch_memory.py @@ -1,159 +1,151 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import hashlib import logging import random import pytest from swh.scheduler.elasticsearch_memory import BasicSerializer, BasicTransport from ..common import tasks_from_template, TEMPLATES from typing import Any, Dict logger = logging.getLogger(__name__) def test_serializer(): s = BasicSerializer() assert s - data = {'something': [1, 2, 3], 'cool': {'1': '2'}} + data = {"something": [1, 2, 3], "cool": {"1": "2"}} actual_data = s.dumps(data) assert isinstance(actual_data, str) assert actual_data == str(data) def test_basic_transport(): b = BasicTransport() assert b assert isinstance(b.serializer, BasicSerializer) def test_index_manipulation(swh_elasticsearch_memory): - index_name = 'swh-tasks-xxxx' + index_name = "swh-tasks-xxxx" indices = swh_elasticsearch_memory.index assert not swh_elasticsearch_memory.exists(index_name) assert index_name not in indices # so stat raises with pytest.raises(Exception): swh_elasticsearch_memory.stats(index_name) # we create the index swh_elasticsearch_memory.create(index_name) # now the index exists assert swh_elasticsearch_memory.exists(index_name) assert index_name in indices # it's opened - assert indices[index_name]['status'] == 'opened' + assert indices[index_name]["status"] == "opened" # so stats is happy swh_elasticsearch_memory.stats(index_name) # open the index, nothing changes swh_elasticsearch_memory.open(index_name) - assert indices[index_name]['status'] == 'opened' + assert indices[index_name]["status"] == "opened" # close the index swh_elasticsearch_memory.close(index_name) - assert indices[index_name]['status'] == 'closed' + assert indices[index_name]["status"] == "closed" # reopen the index (fun times) swh_elasticsearch_memory.open(index_name) - assert indices[index_name]['status'] == 'opened' + assert indices[index_name]["status"] == "opened" def test_bulk_and_mget(swh_elasticsearch_memory): # initialize tasks - template_git = TEMPLATES['git'] + template_git = TEMPLATES["git"] next_run_start = datetime.datetime.utcnow() - datetime.timedelta(days=1) tasks = tasks_from_template(template_git, next_run_start, 100) def compute_id(stask): - return hashlib.sha1(stask.encode('utf-8')).hexdigest() + return hashlib.sha1(stask.encode("utf-8")).hexdigest() body = [] ids_to_task = {} for task in tasks: - date = task['next_run'] - index_name = f'swh-tasks-{date.year}-{date.month}' - idx = {'index': {'_index': index_name}} + date = task["next_run"] + index_name = f"swh-tasks-{date.year}-{date.month}" + idx = {"index": {"_index": index_name}} sidx = swh_elasticsearch_memory.transport.serializer.dumps(idx) body.append(sidx) stask = swh_elasticsearch_memory.transport.serializer.dumps(task) body.append(stask) _id = compute_id(stask) ids_to_task[_id] = task - logger.debug(f'_id: {_id}, task: {task}') + logger.debug(f"_id: {_id}, task: {task}") # store # create the index first swh_elasticsearch_memory.create(index_name) # then bulk insert new data - result = swh_elasticsearch_memory.bulk('\n'.join(body)) + result = swh_elasticsearch_memory.bulk("\n".join(body)) # no guarantee in the order assert result - actual_items = result['items'] + actual_items = result["items"] assert len(actual_items) == len(ids_to_task) def get_id(data: Dict[str, Any]) -> str: - return data['index']['_id'] + return data["index"]["_id"] actual_items = sorted(actual_items, key=get_id) expected_items = { - 'items': [ - { - 'index': { - 'status': 200, - '_id': _id - } - } for _id in list(ids_to_task) - ] + "items": [{"index": {"status": 200, "_id": _id}} for _id in list(ids_to_task)] } - expected_items = sorted(expected_items['items'], key=get_id) + expected_items = sorted(expected_items["items"], key=get_id) assert actual_items == expected_items # retrieve nb_docs = 10 ids = list(ids_to_task) random_ids = [] # add some inexistent ids for i in range(16): - noisy_id = f'{i}' * 40 + noisy_id = f"{i}" * 40 random_ids.append(noisy_id) random_ids.extend(random.sample(ids, nb_docs)) # add relevant ids for i in range(16, 32): - noisy_id = f'{i}' * 40 + noisy_id = f"{i}" * 40 random_ids.append(noisy_id) - result = swh_elasticsearch_memory.mget( - index=index_name, body={'ids': random_ids}) - assert result['docs'] - assert len(result['docs']) == nb_docs, "no random and inexistent id found" - for doc in result['docs']: - assert doc['found'] + result = swh_elasticsearch_memory.mget(index=index_name, body={"ids": random_ids}) + assert result["docs"] + assert len(result["docs"]) == nb_docs, "no random and inexistent id found" + for doc in result["docs"]: + assert doc["found"] - actual_task = doc['_source'] + actual_task = doc["_source"] _id = compute_id(str(actual_task)) expected_task = ids_to_task[_id] assert actual_task == expected_task diff --git a/swh/scheduler/tests/tasks.py b/swh/scheduler/tests/tasks.py index 2426478..8751e2c 100644 --- a/swh/scheduler/tests/tasks.py +++ b/swh/scheduler/tests/tasks.py @@ -1,42 +1,42 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from celery import group, shared_task -@shared_task(name='swh.scheduler.tests.tasks.ping', bind=True) +@shared_task(name="swh.scheduler.tests.tasks.ping", bind=True) def ping(self, **kw): # check this is a SWHTask - assert hasattr(self, 'log') - assert not hasattr(self, 'run_task') - assert 'SWHTask' in [x.__name__ for x in self.__class__.__mro__] + assert hasattr(self, "log") + assert not hasattr(self, "run_task") + assert "SWHTask" in [x.__name__ for x in self.__class__.__mro__] self.log.debug(self.name) if kw: - return 'OK (kw=%s)' % kw - return 'OK' + return "OK (kw=%s)" % kw + return "OK" -@shared_task(name='swh.scheduler.tests.tasks.multiping', bind=True) +@shared_task(name="swh.scheduler.tests.tasks.multiping", bind=True) def multiping(self, n=10): promise = group(ping.s(i=i) for i in range(n))() - self.log.debug('%s OK (spawned %s subtasks)' % (self.name, n)) + self.log.debug("%s OK (spawned %s subtasks)" % (self.name, n)) promise.save() return promise.id -@shared_task(name='swh.scheduler.tests.tasks.error') +@shared_task(name="swh.scheduler.tests.tasks.error") def not_implemented(): - raise NotImplementedError('Nope') + raise NotImplementedError("Nope") -@shared_task(name='swh.scheduler.tests.tasks.add') +@shared_task(name="swh.scheduler.tests.tasks.add") def add(x, y): return x + y -@shared_task(name='swh.scheduler.tests.tasks.echo') +@shared_task(name="swh.scheduler.tests.tasks.echo") def echo(**kw): "Does nothing, just return the given kwargs dict" return kw diff --git a/swh/scheduler/tests/test_api_client.py b/swh/scheduler/tests/test_api_client.py index 92ccbff..73b3373 100644 --- a/swh/scheduler/tests/test_api_client.py +++ b/swh/scheduler/tests/test_api_client.py @@ -1,48 +1,64 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import pytest from flask import url_for import swh.scheduler.api.server as server from swh.scheduler.api.client import RemoteScheduler from swh.scheduler.tests.test_scheduler import TestScheduler # noqa # tests are executed using imported class (TestScheduler) using overloaded # swh_scheduler fixture below # the Flask app used as server in these tests @pytest.fixture def app(swh_db_scheduler): server.scheduler = swh_db_scheduler yield server.app # the RPCClient class used as client used in these tests @pytest.fixture def swh_rpc_client_class(): return RemoteScheduler @pytest.fixture def swh_scheduler(swh_rpc_client, app): yield swh_rpc_client def test_site_map(flask_app_client): - sitemap = flask_app_client.get(url_for('site_map')) - assert sitemap.headers['Content-Type'] == 'application/json' + sitemap = flask_app_client.get(url_for("site_map")) + assert sitemap.headers["Content-Type"] == "application/json" - rules = set(x['rule'] for x in sitemap.json) + rules = set(x["rule"] for x in sitemap.json) # we expect at least these rules - expected_rules = set('/'+rule for rule in ( - 'set_status_tasks', 'create_task_type', - 'get_task_type', 'get_task_types', 'create_tasks', 'disable_tasks', - 'get_tasks', 'search_tasks', 'get_task_runs', 'peek_ready_tasks', - 'grab_ready_tasks', 'schedule_task_run', 'mass_schedule_task_runs', - 'start_task_run', 'end_task_run', 'filter_task_to_archive', - 'delete_archived_tasks', 'get_priority_ratios')) + expected_rules = set( + "/" + rule + for rule in ( + "set_status_tasks", + "create_task_type", + "get_task_type", + "get_task_types", + "create_tasks", + "disable_tasks", + "get_tasks", + "search_tasks", + "get_task_runs", + "peek_ready_tasks", + "grab_ready_tasks", + "schedule_task_run", + "mass_schedule_task_runs", + "start_task_run", + "end_task_run", + "filter_task_to_archive", + "delete_archived_tasks", + "get_priority_ratios", + ) + ) assert rules == expected_rules diff --git a/swh/scheduler/tests/test_celery_tasks.py b/swh/scheduler/tests/test_celery_tasks.py index 78bed28..8edabbd 100644 --- a/swh/scheduler/tests/test_celery_tasks.py +++ b/swh/scheduler/tests/test_celery_tasks.py @@ -1,163 +1,167 @@ from time import sleep from itertools import count from celery.result import GroupResult from celery.result import AsyncResult import pytest from swh.scheduler.utils import create_task_dict from swh.scheduler.celery_backend.runner import run_ready_tasks def test_ping(swh_app, celery_session_worker): - res = swh_app.send_task( - 'swh.scheduler.tests.tasks.ping') + res = swh_app.send_task("swh.scheduler.tests.tasks.ping") assert res res.wait() assert res.successful() - assert res.result == 'OK' + assert res.result == "OK" def test_ping_with_kw(swh_app, celery_session_worker): - res = swh_app.send_task( - 'swh.scheduler.tests.tasks.ping', kwargs={'a': 1}) + res = swh_app.send_task("swh.scheduler.tests.tasks.ping", kwargs={"a": 1}) assert res res.wait() assert res.successful() assert res.result == "OK (kw={'a': 1})" def test_multiping(swh_app, celery_session_worker): "Test that a task that spawns subtasks (group) works" - res = swh_app.send_task( - 'swh.scheduler.tests.tasks.multiping', kwargs={'n': 5}) + res = swh_app.send_task("swh.scheduler.tests.tasks.multiping", kwargs={"n": 5}) assert res res.wait() assert res.successful() # retrieve the GroupResult for this task and wait for all the subtasks # to complete promise_id = res.result assert promise_id promise = GroupResult.restore(promise_id, app=swh_app) for i in range(5): if promise.ready(): break sleep(1) results = [x.get() for x in promise.results] assert len(results) == 5 for i in range(5): assert ("OK (kw={'i': %s})" % i) in results @pytest.mark.db def test_scheduler_fixture(swh_app, celery_session_worker, swh_scheduler): "Test that the scheduler fixture works properly" - task_type = swh_scheduler.get_task_type('swh-test-ping') + task_type = swh_scheduler.get_task_type("swh-test-ping") assert task_type - assert task_type['backend_name'] == 'swh.scheduler.tests.tasks.ping' + assert task_type["backend_name"] == "swh.scheduler.tests.tasks.ping" - swh_scheduler.create_tasks([create_task_dict( - 'swh-test-ping', 'oneshot')]) + swh_scheduler.create_tasks([create_task_dict("swh-test-ping", "oneshot")]) backend_tasks = run_ready_tasks(swh_scheduler, swh_app) assert backend_tasks for task in backend_tasks: # Make sure the task completed - AsyncResult(id=task['backend_id']).get() + AsyncResult(id=task["backend_id"]).get() @pytest.mark.db def test_task_return_value(swh_app, celery_session_worker, swh_scheduler): - task_type = swh_scheduler.get_task_type('swh-test-add') + task_type = swh_scheduler.get_task_type("swh-test-add") assert task_type - assert task_type['backend_name'] == 'swh.scheduler.tests.tasks.add' + assert task_type["backend_name"] == "swh.scheduler.tests.tasks.add" - swh_scheduler.create_tasks([create_task_dict( - 'swh-test-add', 'oneshot', 12, 30)]) + swh_scheduler.create_tasks([create_task_dict("swh-test-add", "oneshot", 12, 30)]) backend_tasks = run_ready_tasks(swh_scheduler, swh_app) assert len(backend_tasks) == 1 task = backend_tasks[0] - value = AsyncResult(id=task['backend_id']).get() + value = AsyncResult(id=task["backend_id"]).get() assert value == 42 @pytest.mark.db def test_task_exception(swh_app, celery_session_worker, swh_scheduler): - task_type = swh_scheduler.get_task_type('swh-test-error') + task_type = swh_scheduler.get_task_type("swh-test-error") assert task_type - assert task_type['backend_name'] == 'swh.scheduler.tests.tasks.error' + assert task_type["backend_name"] == "swh.scheduler.tests.tasks.error" - swh_scheduler.create_tasks([create_task_dict( - 'swh-test-error', 'oneshot')]) + swh_scheduler.create_tasks([create_task_dict("swh-test-error", "oneshot")]) backend_tasks = run_ready_tasks(swh_scheduler, swh_app) assert len(backend_tasks) == 1 task = backend_tasks[0] - result = AsyncResult(id=task['backend_id']) + result = AsyncResult(id=task["backend_id"]) with pytest.raises(NotImplementedError): result.get() def test_statsd(swh_app, celery_session_worker, mocker): - m = mocker.patch('swh.scheduler.task.Statsd._send_to_server') - mocker.patch('swh.scheduler.task.ts', side_effect=count()) - mocker.patch('swh.core.statsd.monotonic', side_effect=count()) - res = swh_app.send_task( - 'swh.scheduler.tests.tasks.echo') + m = mocker.patch("swh.scheduler.task.Statsd._send_to_server") + mocker.patch("swh.scheduler.task.ts", side_effect=count()) + mocker.patch("swh.core.statsd.monotonic", side_effect=count()) + res = swh_app.send_task("swh.scheduler.tests.tasks.echo") assert res res.wait() assert res.successful() assert res.result == {} m.assert_any_call( - 'swh_task_called_count:1|c|' - '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + "swh_task_called_count:1|c|" + "#task:swh.scheduler.tests.tasks.echo,worker:unknown worker" + ) m.assert_any_call( - 'swh_task_start_ts:0|g|' - '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + "swh_task_start_ts:0|g|" + "#task:swh.scheduler.tests.tasks.echo,worker:unknown worker" + ) m.assert_any_call( - 'swh_task_end_ts:1|g|' - '#status:uneventful,task:swh.scheduler.tests.tasks.echo,' - 'worker:unknown worker') + "swh_task_end_ts:1|g|" + "#status:uneventful,task:swh.scheduler.tests.tasks.echo," + "worker:unknown worker" + ) m.assert_any_call( - 'swh_task_duration_seconds:1000|ms|' - '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + "swh_task_duration_seconds:1000|ms|" + "#task:swh.scheduler.tests.tasks.echo,worker:unknown worker" + ) m.assert_any_call( - 'swh_task_success_count:1|c|' - '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + "swh_task_success_count:1|c|" + "#task:swh.scheduler.tests.tasks.echo,worker:unknown worker" + ) def test_statsd_with_status(swh_app, celery_session_worker, mocker): - m = mocker.patch('swh.scheduler.task.Statsd._send_to_server') - mocker.patch('swh.scheduler.task.ts', side_effect=count()) - mocker.patch('swh.core.statsd.monotonic', side_effect=count()) + m = mocker.patch("swh.scheduler.task.Statsd._send_to_server") + mocker.patch("swh.scheduler.task.ts", side_effect=count()) + mocker.patch("swh.core.statsd.monotonic", side_effect=count()) res = swh_app.send_task( - 'swh.scheduler.tests.tasks.echo', kwargs={'status': 'eventful'}) + "swh.scheduler.tests.tasks.echo", kwargs={"status": "eventful"} + ) assert res res.wait() assert res.successful() - assert res.result == {'status': 'eventful'} + assert res.result == {"status": "eventful"} m.assert_any_call( - 'swh_task_called_count:1|c|' - '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + "swh_task_called_count:1|c|" + "#task:swh.scheduler.tests.tasks.echo,worker:unknown worker" + ) m.assert_any_call( - 'swh_task_start_ts:0|g|' - '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + "swh_task_start_ts:0|g|" + "#task:swh.scheduler.tests.tasks.echo,worker:unknown worker" + ) m.assert_any_call( - 'swh_task_end_ts:1|g|' - '#status:eventful,task:swh.scheduler.tests.tasks.echo,' - 'worker:unknown worker') + "swh_task_end_ts:1|g|" + "#status:eventful,task:swh.scheduler.tests.tasks.echo," + "worker:unknown worker" + ) m.assert_any_call( - 'swh_task_duration_seconds:1000|ms|' - '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + "swh_task_duration_seconds:1000|ms|" + "#task:swh.scheduler.tests.tasks.echo,worker:unknown worker" + ) m.assert_any_call( - 'swh_task_success_count:1|c|' - '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + "swh_task_success_count:1|c|" + "#task:swh.scheduler.tests.tasks.echo,worker:unknown worker" + ) diff --git a/swh/scheduler/tests/test_cli.py b/swh/scheduler/tests/test_cli.py index c894e14..3744482 100644 --- a/swh/scheduler/tests/test_cli.py +++ b/swh/scheduler/tests/test_cli.py @@ -1,689 +1,712 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import re import tempfile from unittest.mock import patch import logging from click.testing import CliRunner import pytest from swh.storage import get_storage from swh.scheduler.cli import cli from swh.scheduler.utils import create_task_dict -CLI_CONFIG = ''' +CLI_CONFIG = """ scheduler: cls: foo args: {} -''' +""" def invoke(scheduler, catch_exceptions, args): runner = CliRunner() - with patch('swh.scheduler.get_scheduler') as get_scheduler_mock, \ - tempfile.NamedTemporaryFile('a', suffix='.yml') as config_fd: + with patch( + "swh.scheduler.get_scheduler" + ) as get_scheduler_mock, tempfile.NamedTemporaryFile( + "a", suffix=".yml" + ) as config_fd: config_fd.write(CLI_CONFIG) config_fd.seek(0) get_scheduler_mock.return_value = scheduler - args = ['-C' + config_fd.name, ] + args - result = runner.invoke(cli, args, obj={'log_level': logging.WARNING}) + args = ["-C" + config_fd.name,] + args + result = runner.invoke(cli, args, obj={"log_level": logging.WARNING}) if not catch_exceptions and result.exception: print(result.output) raise result.exception return result def test_schedule_tasks(swh_scheduler): csv_data = ( b'swh-test-ping;[["arg1", "arg2"]];{"key": "value"};' - + datetime.datetime.utcnow().isoformat().encode() + b'\n' + + datetime.datetime.utcnow().isoformat().encode() + + b"\n" + b'swh-test-ping;[["arg3", "arg4"]];{"key": "value"};' - + datetime.datetime.utcnow().isoformat().encode() + b'\n') - with tempfile.NamedTemporaryFile(suffix='.csv') as csv_fd: + + datetime.datetime.utcnow().isoformat().encode() + + b"\n" + ) + with tempfile.NamedTemporaryFile(suffix=".csv") as csv_fd: csv_fd.write(csv_data) csv_fd.seek(0) - result = invoke(swh_scheduler, False, [ - 'task', 'schedule', - '-d', ';', - csv_fd.name - ]) - expected = r''' + result = invoke( + swh_scheduler, False, ["task", "schedule", "-d", ";", csv_fd.name] + ) + expected = r""" Created 2 tasks Task 1 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: recurring Args: \['arg1', 'arg2'\] Keyword args: key: 'value' Task 2 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: recurring Args: \['arg3', 'arg4'\] Keyword args: key: 'value' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_schedule_tasks_columns(swh_scheduler): - with tempfile.NamedTemporaryFile(suffix='.csv') as csv_fd: - csv_fd.write( - b'swh-test-ping;oneshot;["arg1", "arg2"];{"key": "value"}\n') + with tempfile.NamedTemporaryFile(suffix=".csv") as csv_fd: + csv_fd.write(b'swh-test-ping;oneshot;["arg1", "arg2"];{"key": "value"}\n') csv_fd.seek(0) - result = invoke(swh_scheduler, False, [ - 'task', 'schedule', - '-c', 'type', '-c', 'policy', '-c', 'args', '-c', 'kwargs', - '-d', ';', - csv_fd.name - ]) - expected = r''' + result = invoke( + swh_scheduler, + False, + [ + "task", + "schedule", + "-c", + "type", + "-c", + "policy", + "-c", + "args", + "-c", + "kwargs", + "-d", + ";", + csv_fd.name, + ], + ) + expected = r""" Created 1 tasks Task 1 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Args: 'arg1' 'arg2' Keyword args: key: 'value' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_schedule_task(swh_scheduler): - result = invoke(swh_scheduler, False, [ - 'task', 'add', - 'swh-test-ping', 'arg1', 'arg2', 'key=value', - ]) - expected = r''' + result = invoke( + swh_scheduler, + False, + ["task", "add", "swh-test-ping", "arg1", "arg2", "key=value",], + ) + expected = r""" Created 1 tasks Task 1 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: recurring Args: 'arg1' 'arg2' Keyword args: key: 'value' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_pending_tasks_none(swh_scheduler): - result = invoke(swh_scheduler, False, [ - 'task', 'list-pending', 'swh-test-ping', - ]) + result = invoke(swh_scheduler, False, ["task", "list-pending", "swh-test-ping",]) - expected = r''' + expected = r""" Found 0 swh-test-ping tasks -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_pending_tasks(swh_scheduler): - task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') - task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') - task2['next_run'] += datetime.timedelta(days=1) + task1 = create_task_dict("swh-test-ping", "oneshot", key="value1") + task2 = create_task_dict("swh-test-ping", "oneshot", key="value2") + task2["next_run"] += datetime.timedelta(days=1) swh_scheduler.create_tasks([task1, task2]) - result = invoke(swh_scheduler, False, [ - 'task', 'list-pending', 'swh-test-ping', - ]) + result = invoke(swh_scheduler, False, ["task", "list-pending", "swh-test-ping",]) - expected = r''' + expected = r""" Found 1 swh-test-ping tasks Task 1 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Args: Keyword args: key: 'value1' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output - swh_scheduler.grab_ready_tasks('swh-test-ping') + swh_scheduler.grab_ready_tasks("swh-test-ping") - result = invoke(swh_scheduler, False, [ - 'task', 'list-pending', 'swh-test-ping', - ]) + result = invoke(swh_scheduler, False, ["task", "list-pending", "swh-test-ping",]) - expected = r''' + expected = r""" Found 0 swh-test-ping tasks -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_pending_tasks_filter(swh_scheduler): - task = create_task_dict('swh-test-multiping', 'oneshot', key='value') + task = create_task_dict("swh-test-multiping", "oneshot", key="value") swh_scheduler.create_tasks([task]) - result = invoke(swh_scheduler, False, [ - 'task', 'list-pending', 'swh-test-ping', - ]) + result = invoke(swh_scheduler, False, ["task", "list-pending", "swh-test-ping",]) - expected = r''' + expected = r""" Found 0 swh-test-ping tasks -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_pending_tasks_filter_2(swh_scheduler): - swh_scheduler.create_tasks([ - create_task_dict('swh-test-multiping', 'oneshot', key='value'), - create_task_dict('swh-test-ping', 'oneshot', key='value2'), - ]) + swh_scheduler.create_tasks( + [ + create_task_dict("swh-test-multiping", "oneshot", key="value"), + create_task_dict("swh-test-ping", "oneshot", key="value2"), + ] + ) - result = invoke(swh_scheduler, False, [ - 'task', 'list-pending', 'swh-test-ping', - ]) + result = invoke(swh_scheduler, False, ["task", "list-pending", "swh-test-ping",]) - expected = r''' + expected = r""" Found 1 swh-test-ping tasks Task 2 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Args: Keyword args: key: 'value2' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output # Fails because "task list-pending --limit 3" only returns 2 tasks, because # of how compute_nb_tasks_from works. @pytest.mark.xfail def test_list_pending_tasks_limit(swh_scheduler): - swh_scheduler.create_tasks([ - create_task_dict('swh-test-ping', 'oneshot', key='value%d' % i) - for i in range(10) - ]) + swh_scheduler.create_tasks( + [ + create_task_dict("swh-test-ping", "oneshot", key="value%d" % i) + for i in range(10) + ] + ) - result = invoke(swh_scheduler, False, [ - 'task', 'list-pending', 'swh-test-ping', '--limit', '3', - ]) + result = invoke( + swh_scheduler, False, ["task", "list-pending", "swh-test-ping", "--limit", "3",] + ) - expected = r''' + expected = r""" Found 2 swh-test-ping tasks Task 1 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Args: Keyword args: key: 'value0' Task 2 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Args: Keyword args: key: 'value1' Task 3 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Args: Keyword args: key: 'value2' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_pending_tasks_before(swh_scheduler): - task1 = create_task_dict('swh-test-ping', 'oneshot', key='value') - task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') - task1['next_run'] += datetime.timedelta(days=3) - task2['next_run'] += datetime.timedelta(days=1) + task1 = create_task_dict("swh-test-ping", "oneshot", key="value") + task2 = create_task_dict("swh-test-ping", "oneshot", key="value2") + task1["next_run"] += datetime.timedelta(days=3) + task2["next_run"] += datetime.timedelta(days=1) swh_scheduler.create_tasks([task1, task2]) - result = invoke(swh_scheduler, False, [ - 'task', 'list-pending', 'swh-test-ping', '--before', - (datetime.date.today() + datetime.timedelta(days=2)).isoformat() - ]) - - expected = r''' + result = invoke( + swh_scheduler, + False, + [ + "task", + "list-pending", + "swh-test-ping", + "--before", + (datetime.date.today() + datetime.timedelta(days=2)).isoformat(), + ], + ) + + expected = r""" Found 1 swh-test-ping tasks Task 2 Next run: in a day \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Args: Keyword args: key: 'value2' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_tasks(swh_scheduler): - task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') - task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') - task1['next_run'] += datetime.timedelta(days=3, hours=2) + task1 = create_task_dict("swh-test-ping", "oneshot", key="value1") + task2 = create_task_dict("swh-test-ping", "oneshot", key="value2") + task1["next_run"] += datetime.timedelta(days=3, hours=2) swh_scheduler.create_tasks([task1, task2]) - swh_scheduler.grab_ready_tasks('swh-test-ping') + swh_scheduler.grab_ready_tasks("swh-test-ping") - result = invoke(swh_scheduler, False, [ - 'task', 'list', - ]) + result = invoke(swh_scheduler, False, ["task", "list",]) - expected = r''' + expected = r""" Found 2 tasks Task 1 Next run: in 3 days \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: Keyword args: key: 'value1' Task 2 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Status: next_run_scheduled Priority:\x20 Args: Keyword args: key: 'value2' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_tasks_id(swh_scheduler): - task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') - task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') - task3 = create_task_dict('swh-test-ping', 'oneshot', key='value3') + task1 = create_task_dict("swh-test-ping", "oneshot", key="value1") + task2 = create_task_dict("swh-test-ping", "oneshot", key="value2") + task3 = create_task_dict("swh-test-ping", "oneshot", key="value3") swh_scheduler.create_tasks([task1, task2, task3]) - result = invoke(swh_scheduler, False, [ - 'task', 'list', '--task-id', '2', - ]) + result = invoke(swh_scheduler, False, ["task", "list", "--task-id", "2",]) - expected = r''' + expected = r""" Found 1 tasks Task 2 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: Keyword args: key: 'value2' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_tasks_id_2(swh_scheduler): - task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') - task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') - task3 = create_task_dict('swh-test-ping', 'oneshot', key='value3') + task1 = create_task_dict("swh-test-ping", "oneshot", key="value1") + task2 = create_task_dict("swh-test-ping", "oneshot", key="value2") + task3 = create_task_dict("swh-test-ping", "oneshot", key="value3") swh_scheduler.create_tasks([task1, task2, task3]) - result = invoke(swh_scheduler, False, [ - 'task', 'list', '--task-id', '2', '--task-id', '3' - ]) + result = invoke( + swh_scheduler, False, ["task", "list", "--task-id", "2", "--task-id", "3"] + ) - expected = r''' + expected = r""" Found 2 tasks Task 2 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: Keyword args: key: 'value2' Task 3 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: Keyword args: key: 'value3' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_tasks_type(swh_scheduler): - task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') - task2 = create_task_dict('swh-test-multiping', 'oneshot', key='value2') - task3 = create_task_dict('swh-test-ping', 'oneshot', key='value3') + task1 = create_task_dict("swh-test-ping", "oneshot", key="value1") + task2 = create_task_dict("swh-test-multiping", "oneshot", key="value2") + task3 = create_task_dict("swh-test-ping", "oneshot", key="value3") swh_scheduler.create_tasks([task1, task2, task3]) - result = invoke(swh_scheduler, False, [ - 'task', 'list', '--task-type', 'swh-test-ping' - ]) + result = invoke( + swh_scheduler, False, ["task", "list", "--task-type", "swh-test-ping"] + ) - expected = r''' + expected = r""" Found 2 tasks Task 1 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: Keyword args: key: 'value1' Task 3 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: Keyword args: key: 'value3' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_tasks_limit(swh_scheduler): - task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') - task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') - task3 = create_task_dict('swh-test-ping', 'oneshot', key='value3') + task1 = create_task_dict("swh-test-ping", "oneshot", key="value1") + task2 = create_task_dict("swh-test-ping", "oneshot", key="value2") + task3 = create_task_dict("swh-test-ping", "oneshot", key="value3") swh_scheduler.create_tasks([task1, task2, task3]) - result = invoke(swh_scheduler, False, [ - 'task', 'list', '--limit', '2', - ]) + result = invoke(swh_scheduler, False, ["task", "list", "--limit", "2",]) - expected = r''' + expected = r""" Found 2 tasks Task 1 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: Keyword args: key: 'value1' Task 2 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: Keyword args: key: 'value2' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_tasks_before(swh_scheduler): - task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') - task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') - task1['next_run'] += datetime.timedelta(days=3, hours=2) + task1 = create_task_dict("swh-test-ping", "oneshot", key="value1") + task2 = create_task_dict("swh-test-ping", "oneshot", key="value2") + task1["next_run"] += datetime.timedelta(days=3, hours=2) swh_scheduler.create_tasks([task1, task2]) - swh_scheduler.grab_ready_tasks('swh-test-ping') + swh_scheduler.grab_ready_tasks("swh-test-ping") - result = invoke(swh_scheduler, False, [ - 'task', 'list', '--before', - (datetime.date.today() + datetime.timedelta(days=2)).isoformat() - ]) + result = invoke( + swh_scheduler, + False, + [ + "task", + "list", + "--before", + (datetime.date.today() + datetime.timedelta(days=2)).isoformat(), + ], + ) - expected = r''' + expected = r""" Found 1 tasks Task 2 Next run: just now \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Status: next_run_scheduled Priority:\x20 Args: Keyword args: key: 'value2' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def test_list_tasks_after(swh_scheduler): - task1 = create_task_dict('swh-test-ping', 'oneshot', key='value1') - task2 = create_task_dict('swh-test-ping', 'oneshot', key='value2') - task1['next_run'] += datetime.timedelta(days=3, hours=2) + task1 = create_task_dict("swh-test-ping", "oneshot", key="value1") + task2 = create_task_dict("swh-test-ping", "oneshot", key="value2") + task1["next_run"] += datetime.timedelta(days=3, hours=2) swh_scheduler.create_tasks([task1, task2]) - swh_scheduler.grab_ready_tasks('swh-test-ping') + swh_scheduler.grab_ready_tasks("swh-test-ping") - result = invoke(swh_scheduler, False, [ - 'task', 'list', '--after', - (datetime.date.today() + datetime.timedelta(days=2)).isoformat() - ]) + result = invoke( + swh_scheduler, + False, + [ + "task", + "list", + "--after", + (datetime.date.today() + datetime.timedelta(days=2)).isoformat(), + ], + ) - expected = r''' + expected = r""" Found 1 tasks Task 1 Next run: in 3 days \(.*\) Interval: 1 day, 0:00:00 Type: swh-test-ping Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: Keyword args: key: 'value1' -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output assert re.fullmatch(expected, result.output, re.MULTILINE), result.output def _fill_storage_with_origins(storage, nb_origins): - origins = [ - { - 'url': 'http://example.com/{}'.format(i), - } - for i in range(nb_origins) - ] + origins = [{"url": "http://example.com/{}".format(i),} for i in range(nb_origins)] storage.origin_add(origins) return origins @pytest.fixture def storage(): """An instance of in-memory storage that gets injected into the CLI functions.""" storage_config = { - 'cls': 'pipeline', - 'steps': [ - {'cls': 'validate'}, - {'cls': 'memory'}, - ] + "cls": "pipeline", + "steps": [{"cls": "validate"}, {"cls": "memory"},], } storage = get_storage(**storage_config) - with patch('swh.storage.get_storage') as get_storage_mock: + with patch("swh.storage.get_storage") as get_storage_mock: get_storage_mock.return_value = storage yield storage -@patch('swh.scheduler.cli.utils.TASK_BATCH_SIZE', 3) -def test_task_schedule_origins_dry_run( - swh_scheduler, storage): +@patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3) +def test_task_schedule_origins_dry_run(swh_scheduler, storage): """Tests the scheduling when origin_batch_size*task_batch_size is a divisor of nb_origins.""" _fill_storage_with_origins(storage, 90) - result = invoke(swh_scheduler, False, [ - 'task', 'schedule_origins', '--dry-run', 'swh-test-ping', - ]) + result = invoke( + swh_scheduler, + False, + ["task", "schedule_origins", "--dry-run", "swh-test-ping",], + ) # Check the output - expected = r''' + expected = r""" Scheduled 3 tasks \(30 origins\). Scheduled 6 tasks \(60 origins\). Scheduled 9 tasks \(90 origins\). Done. -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output - assert re.fullmatch(expected, result.output, re.MULTILINE), \ - repr(result.output) + assert re.fullmatch(expected, result.output, re.MULTILINE), repr(result.output) # Check scheduled tasks tasks = swh_scheduler.search_tasks() assert len(tasks) == 0 -def _assert_origin_tasks_contraints( - tasks, max_tasks, max_task_size, expected_origins): +def _assert_origin_tasks_contraints(tasks, max_tasks, max_task_size, expected_origins): # check there are not too many tasks assert len(tasks) <= max_tasks # check tasks are not too large - assert all(len(task['arguments']['args'][0]) <= max_task_size - for task in tasks) + assert all(len(task["arguments"]["args"][0]) <= max_task_size for task in tasks) # check the tasks are exhaustive - assert sum([len(task['arguments']['args'][0]) for task in tasks]) == \ - len(expected_origins) - assert \ - set.union(*(set(task['arguments']['args'][0]) for task in tasks)) == \ - {origin['url'] for origin in expected_origins} + assert sum([len(task["arguments"]["args"][0]) for task in tasks]) == len( + expected_origins + ) + assert set.union(*(set(task["arguments"]["args"][0]) for task in tasks)) == { + origin["url"] for origin in expected_origins + } -@patch('swh.scheduler.cli.utils.TASK_BATCH_SIZE', 3) +@patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3) def test_task_schedule_origins(swh_scheduler, storage): """Tests the scheduling when neither origin_batch_size or task_batch_size is a divisor of nb_origins.""" origins = _fill_storage_with_origins(storage, 70) - result = invoke(swh_scheduler, False, [ - 'task', 'schedule_origins', 'swh-test-ping', - '--batch-size', '20', - ]) + result = invoke( + swh_scheduler, + False, + ["task", "schedule_origins", "swh-test-ping", "--batch-size", "20",], + ) # Check the output - expected = r''' + expected = r""" Scheduled 3 tasks \(60 origins\). Scheduled 4 tasks \(70 origins\). Done. -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output - assert re.fullmatch(expected, result.output, re.MULTILINE), \ - repr(result.output) + assert re.fullmatch(expected, result.output, re.MULTILINE), repr(result.output) # Check tasks tasks = swh_scheduler.search_tasks() _assert_origin_tasks_contraints(tasks, 4, 20, origins) - assert all(task['arguments']['kwargs'] == {} for task in tasks) + assert all(task["arguments"]["kwargs"] == {} for task in tasks) def test_task_schedule_origins_kwargs(swh_scheduler, storage): """Tests support of extra keyword-arguments.""" origins = _fill_storage_with_origins(storage, 30) - result = invoke(swh_scheduler, False, [ - 'task', 'schedule_origins', 'swh-test-ping', - '--batch-size', '20', - 'key1="value1"', 'key2="value2"', - ]) + result = invoke( + swh_scheduler, + False, + [ + "task", + "schedule_origins", + "swh-test-ping", + "--batch-size", + "20", + 'key1="value1"', + 'key2="value2"', + ], + ) # Check the output - expected = r''' + expected = r""" Scheduled 2 tasks \(30 origins\). Done. -'''.lstrip() +""".lstrip() assert result.exit_code == 0, result.output - assert re.fullmatch(expected, result.output, re.MULTILINE), \ - repr(result.output) + assert re.fullmatch(expected, result.output, re.MULTILINE), repr(result.output) # Check tasks tasks = swh_scheduler.search_tasks() _assert_origin_tasks_contraints(tasks, 2, 20, origins) - assert all(task['arguments']['kwargs'] == - {'key1': 'value1', 'key2': 'value2'} - for task in tasks) + assert all( + task["arguments"]["kwargs"] == {"key1": "value1", "key2": "value2"} + for task in tasks + ) diff --git a/swh/scheduler/tests/test_cli_task_type.py b/swh/scheduler/tests/test_cli_task_type.py index 0a762b5..c158760 100644 --- a/swh/scheduler/tests/test_cli_task_type.py +++ b/swh/scheduler/tests/test_cli_task_type.py @@ -1,120 +1,128 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import pytest import traceback import yaml import pkg_resources from click.testing import CliRunner from swh.scheduler import get_scheduler from swh.scheduler.cli import cli FAKE_MODULE_ENTRY_POINTS = { - 'lister.gnu=swh.lister.gnu:register', - 'lister.pypi=swh.lister.pypi:register', + "lister.gnu=swh.lister.gnu:register", + "lister.pypi=swh.lister.pypi:register", } @pytest.fixture def mock_pkg_resources(monkeypatch): """Monkey patch swh.scheduler's mock_pkg_resources.iter_entry_point call """ + def fake_iter_entry_points(*args, **kwargs): """Substitute fake function to return a fixed set of entrypoints """ from pkg_resources import EntryPoint, Distribution + d = Distribution() - return [EntryPoint.parse(entry, dist=d) - for entry in FAKE_MODULE_ENTRY_POINTS] + return [EntryPoint.parse(entry, dist=d) for entry in FAKE_MODULE_ENTRY_POINTS] original_method = pkg_resources.iter_entry_points - monkeypatch.setattr( - pkg_resources, "iter_entry_points", fake_iter_entry_points) + monkeypatch.setattr(pkg_resources, "iter_entry_points", fake_iter_entry_points) yield # reset monkeypatch: is that needed? - monkeypatch.setattr( - pkg_resources, "iter_entry_points", original_method) + monkeypatch.setattr(pkg_resources, "iter_entry_points", original_method) @pytest.fixture def local_sched_config(swh_scheduler_config): """Expose the local scheduler configuration """ - return { - 'scheduler': { - 'cls': 'local', - 'args': swh_scheduler_config - } - } + return {"scheduler": {"cls": "local", "args": swh_scheduler_config}} @pytest.fixture def local_sched_configfile(local_sched_config, tmp_path): """Write in temporary location the local scheduler configuration """ - configfile = tmp_path / 'config.yml' + configfile = tmp_path / "config.yml" configfile.write_text(yaml.dump(local_sched_config)) return configfile.as_posix() def test_register_ttypes_all( - mock_pkg_resources, local_sched_config, local_sched_configfile): + mock_pkg_resources, local_sched_config, local_sched_configfile +): """Registering all task types""" for command in [ - ['--config-file', local_sched_configfile, 'task-type', 'register'], - ['--config-file', local_sched_configfile, 'task-type', 'register', - '-p', 'all'], - ['--config-file', local_sched_configfile, 'task-type', 'register', - '-p', 'lister.gnu', - '-p', 'lister.pypi'], + ["--config-file", local_sched_configfile, "task-type", "register"], + ["--config-file", local_sched_configfile, "task-type", "register", "-p", "all"], + [ + "--config-file", + local_sched_configfile, + "task-type", + "register", + "-p", + "lister.gnu", + "-p", + "lister.pypi", + ], ]: result = CliRunner().invoke(cli, command) - assert result.exit_code == 0, traceback.print_exception( - *result.exc_info) + assert result.exit_code == 0, traceback.print_exception(*result.exc_info) - scheduler = get_scheduler(**local_sched_config['scheduler']) + scheduler = get_scheduler(**local_sched_config["scheduler"]) all_tasks = [ - 'list-gnu-full', - 'list-pypi', + "list-gnu-full", + "list-pypi", ] for task in all_tasks: task_type_desc = scheduler.get_task_type(task) assert task_type_desc - assert task_type_desc['type'] == task - assert task_type_desc['backoff_factor'] == 1 + assert task_type_desc["type"] == task + assert task_type_desc["backoff_factor"] == 1 def test_register_ttypes_filter( - mock_pkg_resources, local_sched_config, local_sched_configfile): + mock_pkg_resources, local_sched_config, local_sched_configfile +): """Filtering on one worker should only register its associated task type """ - result = CliRunner().invoke(cli, [ - '--config-file', local_sched_configfile, - 'task-type', 'register', '--plugins', 'lister.gnu' - ]) + result = CliRunner().invoke( + cli, + [ + "--config-file", + local_sched_configfile, + "task-type", + "register", + "--plugins", + "lister.gnu", + ], + ) assert result.exit_code == 0, traceback.print_exception(*result.exc_info) - scheduler = get_scheduler(**local_sched_config['scheduler']) + scheduler = get_scheduler(**local_sched_config["scheduler"]) all_tasks = [ - 'list-gnu-full', + "list-gnu-full", ] for task in all_tasks: task_type_desc = scheduler.get_task_type(task) assert task_type_desc - assert task_type_desc['type'] == task - assert task_type_desc['backoff_factor'] == 1 + assert task_type_desc["type"] == task + assert task_type_desc["backoff_factor"] == 1 diff --git a/swh/scheduler/tests/test_common.py b/swh/scheduler/tests/test_common.py index e27b8da..b2bc8d4 100644 --- a/swh/scheduler/tests/test_common.py +++ b/swh/scheduler/tests/test_common.py @@ -1,62 +1,65 @@ # Copyright (C) 2017-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from .common import tasks_from_template, TEMPLATES def test_tasks_from_template_no_priority(): nb_tasks = 3 - template = TEMPLATES['git'] + template = TEMPLATES["git"] next_run = datetime.datetime.utcnow() tasks = tasks_from_template(template, next_run, nb_tasks) assert len(tasks) == nb_tasks for i, t in enumerate(tasks): - assert t['type'] == template['type'] - assert t['arguments'] is not None - assert t.get('policy') is None # not defined in template - assert len(t['arguments']['args']) == 1 - assert len(t['arguments']['kwargs'].keys()) == 1 - assert t['next_run'] == next_run - datetime.timedelta(microseconds=i) - assert t.get('priority') is None + assert t["type"] == template["type"] + assert t["arguments"] is not None + assert t.get("policy") is None # not defined in template + assert len(t["arguments"]["args"]) == 1 + assert len(t["arguments"]["kwargs"].keys()) == 1 + assert t["next_run"] == next_run - datetime.timedelta(microseconds=i) + assert t.get("priority") is None def test_tasks_from_template_priority(): nb_tasks_no_priority = 3 nb_tasks_priority = 10 - template = TEMPLATES['hg'] + template = TEMPLATES["hg"] priorities = { - 'high': 0.5, - 'normal': 0.3, - 'low': 0.2, + "high": 0.5, + "normal": 0.3, + "low": 0.2, } next_run = datetime.datetime.utcnow() tasks = tasks_from_template( - template, next_run, - nb_tasks_no_priority, num_priority=nb_tasks_priority, - priorities=priorities) + template, + next_run, + nb_tasks_no_priority, + num_priority=nb_tasks_priority, + priorities=priorities, + ) assert len(tasks) == nb_tasks_no_priority + nb_tasks_priority repartition_priority = {k: 0 for k in priorities.keys()} for i, t in enumerate(tasks): - assert t['type'] == template['type'] - assert t['arguments'] is not None - assert t['policy'] == template['policy'] - assert len(t['arguments']['args']) == 1 - assert len(t['arguments']['kwargs'].keys()) == 1 - assert t['next_run'] == next_run - datetime.timedelta(microseconds=i) - priority = t.get('priority') + assert t["type"] == template["type"] + assert t["arguments"] is not None + assert t["policy"] == template["policy"] + assert len(t["arguments"]["args"]) == 1 + assert len(t["arguments"]["kwargs"].keys()) == 1 + assert t["next_run"] == next_run - datetime.timedelta(microseconds=i) + priority = t.get("priority") if priority: assert priority in priorities repartition_priority[priority] += 1 assert repartition_priority == { k: v * nb_tasks_priority for k, v in priorities.items() } diff --git a/swh/scheduler/tests/test_scheduler.py b/swh/scheduler/tests/test_scheduler.py index 55637bd..f666079 100644 --- a/swh/scheduler/tests/test_scheduler.py +++ b/swh/scheduler/tests/test_scheduler.py @@ -1,566 +1,587 @@ # Copyright (C) 2017-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import datetime import random import uuid from collections import defaultdict from typing import Any, Dict from arrow import utcnow import pytest from .common import tasks_from_template, TEMPLATES, TASK_TYPES def subdict(d, keys=None, excl=()): if keys is None: keys = [k for k in d.keys()] return {k: d[k] for k in keys if k not in excl} @pytest.mark.db class TestScheduler: def test_get_priority_ratios(self, swh_scheduler): assert swh_scheduler.get_priority_ratios() == { - 'high': 0.5, - 'normal': 0.3, - 'low': 0.2, - } + "high": 0.5, + "normal": 0.3, + "low": 0.2, + } def test_add_task_type(self, swh_scheduler): - tt = TASK_TYPES['git'] + tt = TASK_TYPES["git"] swh_scheduler.create_task_type(tt) - assert tt == swh_scheduler.get_task_type(tt['type']) - tt2 = TASK_TYPES['hg'] + assert tt == swh_scheduler.get_task_type(tt["type"]) + tt2 = TASK_TYPES["hg"] swh_scheduler.create_task_type(tt2) - assert tt == swh_scheduler.get_task_type(tt['type']) - assert tt2 == swh_scheduler.get_task_type(tt2['type']) + assert tt == swh_scheduler.get_task_type(tt["type"]) + assert tt2 == swh_scheduler.get_task_type(tt2["type"]) def test_create_task_type_idempotence(self, swh_scheduler): - tt = TASK_TYPES['git'] + tt = TASK_TYPES["git"] swh_scheduler.create_task_type(tt) swh_scheduler.create_task_type(tt) - assert tt == swh_scheduler.get_task_type(tt['type']) + assert tt == swh_scheduler.get_task_type(tt["type"]) def test_get_task_types(self, swh_scheduler): - tt, tt2 = TASK_TYPES['git'], TASK_TYPES['hg'] + tt, tt2 = TASK_TYPES["git"], TASK_TYPES["hg"] swh_scheduler.create_task_type(tt) swh_scheduler.create_task_type(tt2) actual_task_types = swh_scheduler.get_task_types() assert tt in actual_task_types assert tt2 in actual_task_types def test_create_tasks(self, swh_scheduler): priority_ratio = self._priority_ratio(swh_scheduler) self._create_task_types(swh_scheduler) num_tasks_priority = 100 - tasks_1 = tasks_from_template( - TEMPLATES['git'], utcnow(), 100) + tasks_1 = tasks_from_template(TEMPLATES["git"], utcnow(), 100) tasks_2 = tasks_from_template( - TEMPLATES['hg'], utcnow(), 100, - num_tasks_priority, priorities=priority_ratio) + TEMPLATES["hg"], + utcnow(), + 100, + num_tasks_priority, + priorities=priority_ratio, + ) tasks = tasks_1 + tasks_2 # tasks are returned only once with their ids ret1 = swh_scheduler.create_tasks(tasks + tasks_1 + tasks_2) - set_ret1 = set([t['id'] for t in ret1]) + set_ret1 = set([t["id"] for t in ret1]) # creating the same set result in the same ids ret = swh_scheduler.create_tasks(tasks) - set_ret = set([t['id'] for t in ret]) + set_ret = set([t["id"] for t in ret]) # Idempotence results assert set_ret == set_ret1 assert len(ret) == len(ret1) ids = set() actual_priorities = defaultdict(int) for task, orig_task in zip(ret, tasks): task = copy.deepcopy(task) - task_type = TASK_TYPES[orig_task['type'].split('-')[-1]] - assert task['id'] not in ids - assert task['status'] == 'next_run_not_scheduled' - assert task['current_interval'] == task_type['default_interval'] - assert task['policy'] == orig_task.get('policy', 'recurring') - priority = task.get('priority') + task_type = TASK_TYPES[orig_task["type"].split("-")[-1]] + assert task["id"] not in ids + assert task["status"] == "next_run_not_scheduled" + assert task["current_interval"] == task_type["default_interval"] + assert task["policy"] == orig_task.get("policy", "recurring") + priority = task.get("priority") if priority: actual_priorities[priority] += 1 - assert task['retries_left'] == (task_type['num_retries'] or 0) - ids.add(task['id']) - del task['id'] - del task['status'] - del task['current_interval'] - del task['retries_left'] - if 'policy' not in orig_task: - del task['policy'] - if 'priority' not in orig_task: - del task['priority'] + assert task["retries_left"] == (task_type["num_retries"] or 0) + ids.add(task["id"]) + del task["id"] + del task["status"] + del task["current_interval"] + del task["retries_left"] + if "policy" not in orig_task: + del task["policy"] + if "priority" not in orig_task: + del task["priority"] assert task == orig_task assert dict(actual_priorities) == { priority: int(ratio * num_tasks_priority) for priority, ratio in priority_ratio.items() } def test_peek_ready_tasks_no_priority(self, swh_scheduler): self._create_task_types(swh_scheduler) t = utcnow() - task_type = TEMPLATES['git']['type'] - tasks = tasks_from_template(TEMPLATES['git'], t, 100) + task_type = TEMPLATES["git"]["type"] + tasks = tasks_from_template(TEMPLATES["git"], t, 100) random.shuffle(tasks) swh_scheduler.create_tasks(tasks) ready_tasks = swh_scheduler.peek_ready_tasks(task_type) assert len(ready_tasks) == len(tasks) for i in range(len(ready_tasks) - 1): - assert ready_tasks[i]['next_run'] <= ready_tasks[i+1]['next_run'] + assert ready_tasks[i]["next_run"] <= ready_tasks[i + 1]["next_run"] # Only get the first few ready tasks - limit = random.randrange(5, 5 + len(tasks)//2) - ready_tasks_limited = swh_scheduler.peek_ready_tasks( - task_type, num_tasks=limit) + limit = random.randrange(5, 5 + len(tasks) // 2) + ready_tasks_limited = swh_scheduler.peek_ready_tasks(task_type, num_tasks=limit) assert len(ready_tasks_limited) == limit assert ready_tasks_limited == ready_tasks[:limit] # Limit by timestamp - max_ts = tasks[limit-1]['next_run'] + max_ts = tasks[limit - 1]["next_run"] ready_tasks_timestamped = swh_scheduler.peek_ready_tasks( - task_type, timestamp=max_ts) + task_type, timestamp=max_ts + ) for ready_task in ready_tasks_timestamped: - assert ready_task['next_run'] <= max_ts + assert ready_task["next_run"] <= max_ts # Make sure we get proper behavior for the first ready tasks - assert ready_tasks[:len(ready_tasks_timestamped)] \ - == ready_tasks_timestamped + assert ready_tasks[: len(ready_tasks_timestamped)] == ready_tasks_timestamped # Limit by both ready_tasks_both = swh_scheduler.peek_ready_tasks( - task_type, timestamp=max_ts, num_tasks=limit//3) - assert len(ready_tasks_both) <= limit//3 + task_type, timestamp=max_ts, num_tasks=limit // 3 + ) + assert len(ready_tasks_both) <= limit // 3 for ready_task in ready_tasks_both: - assert ready_task['next_run'] <= max_ts - assert ready_task in ready_tasks[:limit//3] + assert ready_task["next_run"] <= max_ts + assert ready_task in ready_tasks[: limit // 3] def _priority_ratio(self, swh_scheduler): return swh_scheduler.get_priority_ratios() def test_peek_ready_tasks_mixed_priorities(self, swh_scheduler): priority_ratio = self._priority_ratio(swh_scheduler) self._create_task_types(swh_scheduler) t = utcnow() - task_type = TEMPLATES['git']['type'] + task_type = TEMPLATES["git"]["type"] num_tasks_priority = 100 num_tasks_no_priority = 100 # Create tasks with and without priorities tasks = tasks_from_template( - TEMPLATES['git'], t, + TEMPLATES["git"], + t, num=num_tasks_no_priority, num_priority=num_tasks_priority, - priorities=priority_ratio) + priorities=priority_ratio, + ) random.shuffle(tasks) swh_scheduler.create_tasks(tasks) # take all available tasks - ready_tasks = swh_scheduler.peek_ready_tasks( - task_type) + ready_tasks = swh_scheduler.peek_ready_tasks(task_type) assert len(ready_tasks) == len(tasks) - assert num_tasks_priority + num_tasks_no_priority \ - == len(ready_tasks) + assert num_tasks_priority + num_tasks_no_priority == len(ready_tasks) count_tasks_per_priority = defaultdict(int) for task in ready_tasks: - priority = task.get('priority') + priority = task.get("priority") if priority: count_tasks_per_priority[priority] += 1 assert dict(count_tasks_per_priority) == { priority: int(ratio * num_tasks_priority) for priority, ratio in priority_ratio.items() } # Only get some ready tasks - num_tasks = random.randrange(5, 5 + num_tasks_no_priority//2) - num_tasks_priority = random.randrange(5, num_tasks_priority//2) + num_tasks = random.randrange(5, 5 + num_tasks_no_priority // 2) + num_tasks_priority = random.randrange(5, num_tasks_priority // 2) ready_tasks_limited = swh_scheduler.peek_ready_tasks( - task_type, num_tasks=num_tasks, - num_tasks_priority=num_tasks_priority) + task_type, num_tasks=num_tasks, num_tasks_priority=num_tasks_priority + ) count_tasks_per_priority = defaultdict(int) for task in ready_tasks_limited: - priority = task.get('priority') + priority = task.get("priority") count_tasks_per_priority[priority] += 1 import math + for priority, ratio in priority_ratio.items(): expected_count = math.ceil(ratio * num_tasks_priority) actual_prio = count_tasks_per_priority[priority] - assert (actual_prio == expected_count or - actual_prio == expected_count + 1) + assert actual_prio == expected_count or actual_prio == expected_count + 1 assert count_tasks_per_priority[None] == num_tasks def test_grab_ready_tasks(self, swh_scheduler): priority_ratio = self._priority_ratio(swh_scheduler) self._create_task_types(swh_scheduler) t = utcnow() - task_type = TEMPLATES['git']['type'] + task_type = TEMPLATES["git"]["type"] num_tasks_priority = 100 num_tasks_no_priority = 100 # Create tasks with and without priorities tasks = tasks_from_template( - TEMPLATES['git'], t, + TEMPLATES["git"], + t, num=num_tasks_no_priority, num_priority=num_tasks_priority, - priorities=priority_ratio) + priorities=priority_ratio, + ) random.shuffle(tasks) swh_scheduler.create_tasks(tasks) first_ready_tasks = swh_scheduler.peek_ready_tasks( - task_type, num_tasks=10, num_tasks_priority=10) + task_type, num_tasks=10, num_tasks_priority=10 + ) grabbed_tasks = swh_scheduler.grab_ready_tasks( - task_type, num_tasks=10, num_tasks_priority=10) + task_type, num_tasks=10, num_tasks_priority=10 + ) for peeked, grabbed in zip(first_ready_tasks, grabbed_tasks): - assert peeked['status'] == 'next_run_not_scheduled' - del peeked['status'] - assert grabbed['status'] == 'next_run_scheduled' - del grabbed['status'] + assert peeked["status"] == "next_run_not_scheduled" + del peeked["status"] + assert grabbed["status"] == "next_run_scheduled" + del grabbed["status"] assert peeked == grabbed - assert peeked['priority'] == grabbed['priority'] + assert peeked["priority"] == grabbed["priority"] def test_get_tasks(self, swh_scheduler): self._create_task_types(swh_scheduler) t = utcnow() - tasks = tasks_from_template(TEMPLATES['git'], t, 100) + tasks = tasks_from_template(TEMPLATES["git"], t, 100) tasks = swh_scheduler.create_tasks(tasks) random.shuffle(tasks) while len(tasks) > 1: length = random.randrange(1, len(tasks)) - cur_tasks = sorted(tasks[:length], key=lambda x: x['id']) + cur_tasks = sorted(tasks[:length], key=lambda x: x["id"]) tasks[:length] = [] - ret = swh_scheduler.get_tasks(task['id'] for task in cur_tasks) + ret = swh_scheduler.get_tasks(task["id"] for task in cur_tasks) # result is not guaranteed to be sorted - ret.sort(key=lambda x: x['id']) + ret.sort(key=lambda x: x["id"]) assert ret == cur_tasks def test_search_tasks(self, swh_scheduler): def make_real_dicts(l): """RealDictRow is not a real dict.""" return [dict(d.items()) for d in l] + self._create_task_types(swh_scheduler) t = utcnow() - tasks = tasks_from_template(TEMPLATES['git'], t, 100) + tasks = tasks_from_template(TEMPLATES["git"], t, 100) tasks = swh_scheduler.create_tasks(tasks) - assert make_real_dicts(swh_scheduler.search_tasks()) \ - == make_real_dicts(tasks) + assert make_real_dicts(swh_scheduler.search_tasks()) == make_real_dicts(tasks) def assert_filtered_task_ok( - self, task: Dict[str, Any], - after: datetime.datetime, - before: datetime.datetime) -> None: + self, task: Dict[str, Any], after: datetime.datetime, before: datetime.datetime + ) -> None: """Ensure filtered tasks have the right expected properties (within the range, recurring disabled, etc..) """ - started = task['started'] - date = started if started is not None else task['scheduled'] + started = task["started"] + date = started if started is not None else task["scheduled"] assert after <= date and date <= before - if task['task_policy'] == 'oneshot': - assert task['task_status'] in ['completed', 'disabled'] - if task['task_policy'] == 'recurring': - assert task['task_status'] in ['disabled'] + if task["task_policy"] == "oneshot": + assert task["task_status"] in ["completed", "disabled"] + if task["task_policy"] == "recurring": + assert task["task_status"] in ["disabled"] def test_filter_task_to_archive(self, swh_scheduler): """Filtering only list disabled recurring or completed oneshot tasks """ self._create_task_types(swh_scheduler) _time = utcnow() - recurring = tasks_from_template(TEMPLATES['git'], _time, 12) - oneshots = tasks_from_template(TEMPLATES['hg'], _time, 12) + recurring = tasks_from_template(TEMPLATES["git"], _time, 12) + oneshots = tasks_from_template(TEMPLATES["hg"], _time, 12) total_tasks = len(recurring) + len(oneshots) # simulate scheduling tasks pending_tasks = swh_scheduler.create_tasks(recurring + oneshots) - backend_tasks = [{ - 'task': task['id'], - 'backend_id': str(uuid.uuid4()), - 'scheduled': utcnow(), - } for task in pending_tasks] + backend_tasks = [ + { + "task": task["id"], + "backend_id": str(uuid.uuid4()), + "scheduled": utcnow(), + } + for task in pending_tasks + ] swh_scheduler.mass_schedule_task_runs(backend_tasks) # we simulate the task are being done _tasks = [] for task in backend_tasks: - t = swh_scheduler.end_task_run( - task['backend_id'], status='eventful') + t = swh_scheduler.end_task_run(task["backend_id"], status="eventful") _tasks.append(t) # Randomly update task's status per policy - status_per_policy = {'recurring': 0, 'oneshot': 0} + status_per_policy = {"recurring": 0, "oneshot": 0} status_choice = { # policy: [tuple (1-for-filtering, 'associated-status')] - 'recurring': [(1, 'disabled'), - (0, 'completed'), - (0, 'next_run_not_scheduled')], - 'oneshot': [(0, 'next_run_not_scheduled'), - (1, 'disabled'), - (1, 'completed')] + "recurring": [ + (1, "disabled"), + (0, "completed"), + (0, "next_run_not_scheduled"), + ], + "oneshot": [ + (0, "next_run_not_scheduled"), + (1, "disabled"), + (1, "completed"), + ], } tasks_to_update = defaultdict(list) _task_ids = defaultdict(list) # randomize 'disabling' recurring task or 'complete' oneshot task for task in pending_tasks: - policy = task['policy'] - _task_ids[policy].append(task['id']) + policy = task["policy"] + _task_ids[policy].append(task["id"]) status = random.choice(status_choice[policy]) if status[0] != 1: continue # elected for filtering status_per_policy[policy] += status[0] - tasks_to_update[policy].append(task['id']) + tasks_to_update[policy].append(task["id"]) - swh_scheduler.disable_tasks(tasks_to_update['recurring']) + swh_scheduler.disable_tasks(tasks_to_update["recurring"]) # hack: change the status to something else than completed/disabled swh_scheduler.set_status_tasks( - _task_ids['oneshot'], status='next_run_not_scheduled') + _task_ids["oneshot"], status="next_run_not_scheduled" + ) # complete the tasks to update - swh_scheduler.set_status_tasks( - tasks_to_update['oneshot'], status='completed') + swh_scheduler.set_status_tasks(tasks_to_update["oneshot"], status="completed") - total_tasks_filtered = (status_per_policy['recurring'] + - status_per_policy['oneshot']) + total_tasks_filtered = ( + status_per_policy["recurring"] + status_per_policy["oneshot"] + ) # no pagination scenario # retrieve tasks to archive after = _time.shift(days=-1) - after_ts = after.format('YYYY-MM-DD') + after_ts = after.format("YYYY-MM-DD") before = utcnow().shift(days=1) - before_ts = before.format('YYYY-MM-DD') + before_ts = before.format("YYYY-MM-DD") tasks_result = swh_scheduler.filter_task_to_archive( - after_ts=after_ts, before_ts=before_ts, limit=total_tasks) + after_ts=after_ts, before_ts=before_ts, limit=total_tasks + ) - tasks_to_archive = tasks_result['tasks'] + tasks_to_archive = tasks_result["tasks"] assert len(tasks_to_archive) == total_tasks_filtered - assert tasks_result.get('next_page_token') is None + assert tasks_result.get("next_page_token") is None - actual_filtered_per_status = {'recurring': 0, 'oneshot': 0} + actual_filtered_per_status = {"recurring": 0, "oneshot": 0} for task in tasks_to_archive: self.assert_filtered_task_ok(task, after, before) - actual_filtered_per_status[task['task_policy']] += 1 + actual_filtered_per_status[task["task_policy"]] += 1 assert actual_filtered_per_status == status_per_policy # pagination scenario nb_tasks = 3 tasks_result = swh_scheduler.filter_task_to_archive( - after_ts=after_ts, before_ts=before_ts, limit=nb_tasks) + after_ts=after_ts, before_ts=before_ts, limit=nb_tasks + ) - tasks_to_archive2 = tasks_result['tasks'] + tasks_to_archive2 = tasks_result["tasks"] assert len(tasks_to_archive2) == nb_tasks - next_page_token = tasks_result['next_page_token'] + next_page_token = tasks_result["next_page_token"] assert next_page_token is not None all_tasks = tasks_to_archive2 while next_page_token is not None: # Retrieve paginated results tasks_result = swh_scheduler.filter_task_to_archive( - after_ts=after_ts, before_ts=before_ts, limit=nb_tasks, - page_token=next_page_token) - tasks_to_archive2 = tasks_result['tasks'] + after_ts=after_ts, + before_ts=before_ts, + limit=nb_tasks, + page_token=next_page_token, + ) + tasks_to_archive2 = tasks_result["tasks"] assert len(tasks_to_archive2) <= nb_tasks all_tasks.extend(tasks_to_archive2) - next_page_token = tasks_result.get('next_page_token') + next_page_token = tasks_result.get("next_page_token") - actual_filtered_per_status = {'recurring': 0, 'oneshot': 0} + actual_filtered_per_status = {"recurring": 0, "oneshot": 0} for task in all_tasks: self.assert_filtered_task_ok(task, after, before) - actual_filtered_per_status[task['task_policy']] += 1 + actual_filtered_per_status[task["task_policy"]] += 1 assert actual_filtered_per_status == status_per_policy def test_delete_archived_tasks(self, swh_scheduler): self._create_task_types(swh_scheduler) _time = utcnow() - recurring = tasks_from_template( - TEMPLATES['git'], _time, 12) - oneshots = tasks_from_template( - TEMPLATES['hg'], _time, 12) + recurring = tasks_from_template(TEMPLATES["git"], _time, 12) + oneshots = tasks_from_template(TEMPLATES["hg"], _time, 12) total_tasks = len(recurring) + len(oneshots) pending_tasks = swh_scheduler.create_tasks(recurring + oneshots) - backend_tasks = [{ - 'task': task['id'], - 'backend_id': str(uuid.uuid4()), - 'scheduled': utcnow(), - } for task in pending_tasks] + backend_tasks = [ + { + "task": task["id"], + "backend_id": str(uuid.uuid4()), + "scheduled": utcnow(), + } + for task in pending_tasks + ] swh_scheduler.mass_schedule_task_runs(backend_tasks) _tasks = [] percent = random.randint(0, 100) # random election removal boundary for task in backend_tasks: - t = swh_scheduler.end_task_run( - task['backend_id'], status='eventful') + t = swh_scheduler.end_task_run(task["backend_id"], status="eventful") c = random.randint(0, 100) if c <= percent: - _tasks.append({'task_id': t['task'], 'task_run_id': t['id']}) + _tasks.append({"task_id": t["task"], "task_run_id": t["id"]}) swh_scheduler.delete_archived_tasks(_tasks) - all_tasks = [task['id'] for task in swh_scheduler.search_tasks()] + all_tasks = [task["id"] for task in swh_scheduler.search_tasks()] tasks_count = len(all_tasks) tasks_run_count = len(swh_scheduler.get_task_runs(all_tasks)) assert tasks_count == total_tasks - len(_tasks) assert tasks_run_count == total_tasks - len(_tasks) def test_get_task_runs_no_task(self, swh_scheduler): - '''No task exist in the scheduler's db, get_task_runs() should always return an + """No task exist in the scheduler's db, get_task_runs() should always return an empty list. - ''' + """ assert not swh_scheduler.get_task_runs(task_ids=()) assert not swh_scheduler.get_task_runs(task_ids=(1, 2, 3)) - assert not swh_scheduler.get_task_runs(task_ids=(1, 2, 3), - limit=10) + assert not swh_scheduler.get_task_runs(task_ids=(1, 2, 3), limit=10) def test_get_task_runs_no_task_executed(self, swh_scheduler): - '''No task has been executed yet, get_task_runs() should always return an empty + """No task has been executed yet, get_task_runs() should always return an empty list. - ''' + """ self._create_task_types(swh_scheduler) _time = utcnow() - recurring = tasks_from_template( - TEMPLATES['git'], _time, 12) - oneshots = tasks_from_template( - TEMPLATES['hg'], _time, 12) + recurring = tasks_from_template(TEMPLATES["git"], _time, 12) + oneshots = tasks_from_template(TEMPLATES["hg"], _time, 12) swh_scheduler.create_tasks(recurring + oneshots) assert not swh_scheduler.get_task_runs(task_ids=()) assert not swh_scheduler.get_task_runs(task_ids=(1, 2, 3)) assert not swh_scheduler.get_task_runs(task_ids=(1, 2, 3), limit=10) def test_get_task_runs_with_scheduled(self, swh_scheduler): - '''Some tasks have been scheduled but not executed yet, get_task_runs() should + """Some tasks have been scheduled but not executed yet, get_task_runs() should not return an empty list. limit should behave as expected. - ''' + """ self._create_task_types(swh_scheduler) _time = utcnow() - recurring = tasks_from_template( - TEMPLATES['git'], _time, 12) - oneshots = tasks_from_template( - TEMPLATES['hg'], _time, 12) + recurring = tasks_from_template(TEMPLATES["git"], _time, 12) + oneshots = tasks_from_template(TEMPLATES["hg"], _time, 12) total_tasks = len(recurring) + len(oneshots) pending_tasks = swh_scheduler.create_tasks(recurring + oneshots) - backend_tasks = [{ - 'task': task['id'], - 'backend_id': str(uuid.uuid4()), - 'scheduled': utcnow(), - } for task in pending_tasks] + backend_tasks = [ + { + "task": task["id"], + "backend_id": str(uuid.uuid4()), + "scheduled": utcnow(), + } + for task in pending_tasks + ] swh_scheduler.mass_schedule_task_runs(backend_tasks) - assert not swh_scheduler.get_task_runs( - task_ids=[total_tasks + 1]) + assert not swh_scheduler.get_task_runs(task_ids=[total_tasks + 1]) btask = backend_tasks[0] - runs = swh_scheduler.get_task_runs( - task_ids=[btask['task']]) + runs = swh_scheduler.get_task_runs(task_ids=[btask["task"]]) assert len(runs) == 1 run = runs[0] - assert subdict(run, excl=('id',)) == { - 'task': btask['task'], - 'backend_id': btask['backend_id'], - 'scheduled': btask['scheduled'], - 'started': None, - 'ended': None, - 'metadata': None, - 'status': 'scheduled', + assert subdict(run, excl=("id",)) == { + "task": btask["task"], + "backend_id": btask["backend_id"], + "scheduled": btask["scheduled"], + "started": None, + "ended": None, + "metadata": None, + "status": "scheduled", } runs = swh_scheduler.get_task_runs( - task_ids=[bt['task'] for bt in backend_tasks], limit=2) + task_ids=[bt["task"] for bt in backend_tasks], limit=2 + ) assert len(runs) == 2 runs = swh_scheduler.get_task_runs( - task_ids=[bt['task'] for bt in backend_tasks]) + task_ids=[bt["task"] for bt in backend_tasks] + ) assert len(runs) == total_tasks - keys = ('task', 'backend_id', 'scheduled') - assert sorted([subdict(x, keys) for x in runs], - key=lambda x: x['task']) == backend_tasks + keys = ("task", "backend_id", "scheduled") + assert ( + sorted([subdict(x, keys) for x in runs], key=lambda x: x["task"]) + == backend_tasks + ) def test_get_task_runs_with_executed(self, swh_scheduler): - '''Some tasks have been executed, get_task_runs() should + """Some tasks have been executed, get_task_runs() should not return an empty list. limit should behave as expected. - ''' + """ self._create_task_types(swh_scheduler) _time = utcnow() - recurring = tasks_from_template( - TEMPLATES['git'], _time, 12) - oneshots = tasks_from_template( - TEMPLATES['hg'], _time, 12) + recurring = tasks_from_template(TEMPLATES["git"], _time, 12) + oneshots = tasks_from_template(TEMPLATES["hg"], _time, 12) pending_tasks = swh_scheduler.create_tasks(recurring + oneshots) - backend_tasks = [{ - 'task': task['id'], - 'backend_id': str(uuid.uuid4()), - 'scheduled': utcnow(), - } for task in pending_tasks] + backend_tasks = [ + { + "task": task["id"], + "backend_id": str(uuid.uuid4()), + "scheduled": utcnow(), + } + for task in pending_tasks + ] swh_scheduler.mass_schedule_task_runs(backend_tasks) btask = backend_tasks[0] ts = utcnow() - swh_scheduler.start_task_run(btask['backend_id'], - metadata={'something': 'stupid'}, - timestamp=ts) - runs = swh_scheduler.get_task_runs(task_ids=[btask['task']]) + swh_scheduler.start_task_run( + btask["backend_id"], metadata={"something": "stupid"}, timestamp=ts + ) + runs = swh_scheduler.get_task_runs(task_ids=[btask["task"]]) assert len(runs) == 1 - assert subdict(runs[0], excl=('id')) == { - 'task': btask['task'], - 'backend_id': btask['backend_id'], - 'scheduled': btask['scheduled'], - 'started': ts, - 'ended': None, - 'metadata': {'something': 'stupid'}, - 'status': 'started', - } + assert subdict(runs[0], excl=("id")) == { + "task": btask["task"], + "backend_id": btask["backend_id"], + "scheduled": btask["scheduled"], + "started": ts, + "ended": None, + "metadata": {"something": "stupid"}, + "status": "started", + } ts2 = utcnow() - swh_scheduler.end_task_run(btask['backend_id'], - metadata={'other': 'stuff'}, - timestamp=ts2, - status='eventful') - runs = swh_scheduler.get_task_runs(task_ids=[btask['task']]) + swh_scheduler.end_task_run( + btask["backend_id"], + metadata={"other": "stuff"}, + timestamp=ts2, + status="eventful", + ) + runs = swh_scheduler.get_task_runs(task_ids=[btask["task"]]) assert len(runs) == 1 - assert subdict(runs[0], excl=('id')) == { - 'task': btask['task'], - 'backend_id': btask['backend_id'], - 'scheduled': btask['scheduled'], - 'started': ts, - 'ended': ts2, - 'metadata': {'something': 'stupid', 'other': 'stuff'}, - 'status': 'eventful', - } + assert subdict(runs[0], excl=("id")) == { + "task": btask["task"], + "backend_id": btask["backend_id"], + "scheduled": btask["scheduled"], + "started": ts, + "ended": ts2, + "metadata": {"something": "stupid", "other": "stuff"}, + "status": "eventful", + } def _create_task_types(self, scheduler): for tt in TASK_TYPES.values(): scheduler.create_task_type(tt) diff --git a/swh/scheduler/tests/test_server.py b/swh/scheduler/tests/test_server.py index ae4e660..e41c80c 100644 --- a/swh/scheduler/tests/test_server.py +++ b/swh/scheduler/tests/test_server.py @@ -1,133 +1,110 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import pytest import yaml from swh.scheduler.api.server import load_and_check_config -def prepare_config_file(tmpdir, content, name='config.yml'): +def prepare_config_file(tmpdir, content, name="config.yml"): """Prepare configuration file in `$tmpdir/name` with content `content`. Args: tmpdir (LocalPath): root directory content (str/dict): Content of the file either as string or as a dict. If a dict, converts the dict into a yaml string. name (str): configuration filename Returns path (str) of the configuration file prepared. """ config_path = tmpdir / name if isinstance(content, dict): # convert if needed content = yaml.dump(content) - config_path.write_text(content, encoding='utf-8') + config_path.write_text(content, encoding="utf-8") # pytest on python3.5 does not support LocalPath manipulation, so # convert path to string return str(config_path) def test_load_and_check_config_no_configuration(): """Inexistent configuration files raises""" with pytest.raises(EnvironmentError) as e: load_and_check_config(None) - assert e.value.args[0] == 'Configuration file must be defined' + assert e.value.args[0] == "Configuration file must be defined" - config_path = '/some/inexistent/config.yml' + config_path = "/some/inexistent/config.yml" with pytest.raises(FileNotFoundError) as e: load_and_check_config(config_path) - assert e.value.args[0] == 'Configuration file %s does not exist' % ( - config_path, ) + assert e.value.args[0] == "Configuration file %s does not exist" % (config_path,) def test_load_and_check_config_wrong_configuration(tmpdir): """Wrong configuration raises""" - config_path = prepare_config_file(tmpdir, 'something: useless') + config_path = prepare_config_file(tmpdir, "something: useless") with pytest.raises(KeyError) as e: load_and_check_config(config_path) - assert e.value.args[0] == 'Missing \'%scheduler\' configuration' + assert e.value.args[0] == "Missing '%scheduler' configuration" def test_load_and_check_config_remote_config_local_type_raise(tmpdir): """'local' configuration without 'local' storage raises""" - config = { - 'scheduler': { - 'cls': 'remote', - 'args': {} - } - } + config = {"scheduler": {"cls": "remote", "args": {}}} config_path = prepare_config_file(tmpdir, config) with pytest.raises(ValueError) as e: - load_and_check_config(config_path, type='local') + load_and_check_config(config_path, type="local") assert ( - e.value.args[0] == - "The scheduler backend can only be started with a 'local'" + e.value.args[0] == "The scheduler backend can only be started with a 'local'" " configuration" ) def test_load_and_check_config_local_incomplete_configuration(tmpdir): """Incomplete 'local' configuration should raise""" config = { - 'scheduler': { - 'cls': 'local', - 'args': { - 'db': 'database', - 'something': 'needed-for-test', - } + "scheduler": { + "cls": "local", + "args": {"db": "database", "something": "needed-for-test",}, } } - for key in ['db', 'args']: + for key in ["db", "args"]: c = copy.deepcopy(config) - if key == 'db': - source = c['scheduler']['args'] + if key == "db": + source = c["scheduler"]["args"] else: - source = c['scheduler'] + source = c["scheduler"] source.pop(key) config_path = prepare_config_file(tmpdir, c) with pytest.raises(KeyError) as e: load_and_check_config(config_path) assert ( - e.value.args[0] == - "Invalid configuration; missing '%s' config entry" % key + e.value.args[0] == "Invalid configuration; missing '%s' config entry" % key ) def test_load_and_check_config_local_config_fine(tmpdir): """Local configuration is fine""" - config = { - 'scheduler': { - 'cls': 'local', - 'args': { - 'db': 'db', - } - } - } + config = {"scheduler": {"cls": "local", "args": {"db": "db",}}} config_path = prepare_config_file(tmpdir, config) - cfg = load_and_check_config(config_path, type='local') + cfg = load_and_check_config(config_path, type="local") assert cfg == config def test_load_and_check_config_remote_config_fine(tmpdir): """'Remote configuration is fine""" - config = { - 'scheduler': { - 'cls': 'remote', - 'args': {} - } - } + config = {"scheduler": {"cls": "remote", "args": {}}} config_path = prepare_config_file(tmpdir, config) - cfg = load_and_check_config(config_path, type='any') + cfg = load_and_check_config(config_path, type="any") assert cfg == config diff --git a/swh/scheduler/tests/test_utils.py b/swh/scheduler/tests/test_utils.py index 2e34570..655f644 100644 --- a/swh/scheduler/tests/test_utils.py +++ b/swh/scheduler/tests/test_utils.py @@ -1,79 +1,79 @@ # Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from datetime import timezone from unittest.mock import patch from swh.scheduler import utils class UtilsTest(unittest.TestCase): - - @patch('swh.scheduler.utils.datetime') + @patch("swh.scheduler.utils.datetime") def test_create_oneshot_task_dict_simple(self, mock_datetime): - mock_datetime.now.return_value = 'some-date' + mock_datetime.now.return_value = "some-date" - actual_task = utils.create_oneshot_task_dict('some-task-type') + actual_task = utils.create_oneshot_task_dict("some-task-type") expected_task = { - 'policy': 'oneshot', - 'type': 'some-task-type', - 'next_run': 'some-date', - 'arguments': { - 'args': [], - 'kwargs': {}, - }, + "policy": "oneshot", + "type": "some-task-type", + "next_run": "some-date", + "arguments": {"args": [], "kwargs": {},}, } self.assertEqual(actual_task, expected_task) mock_datetime.now.assert_called_once_with(tz=timezone.utc) - @patch('swh.scheduler.utils.datetime') + @patch("swh.scheduler.utils.datetime") def test_create_oneshot_task_dict_other_call(self, mock_datetime): - mock_datetime.now.return_value = 'some-other-date' + mock_datetime.now.return_value = "some-other-date" actual_task = utils.create_oneshot_task_dict( - 'some-task-type', 'arg0', 'arg1', - priority='high', other_stuff='normal' + "some-task-type", "arg0", "arg1", priority="high", other_stuff="normal" ) expected_task = { - 'policy': 'oneshot', - 'type': 'some-task-type', - 'next_run': 'some-other-date', - 'arguments': { - 'args': ('arg0', 'arg1'), - 'kwargs': {'other_stuff': 'normal'}, + "policy": "oneshot", + "type": "some-task-type", + "next_run": "some-other-date", + "arguments": { + "args": ("arg0", "arg1"), + "kwargs": {"other_stuff": "normal"}, }, - 'priority': 'high', + "priority": "high", } self.assertEqual(actual_task, expected_task) mock_datetime.now.assert_called_once_with(tz=timezone.utc) - @patch('swh.scheduler.utils.datetime') + @patch("swh.scheduler.utils.datetime") def test_create_task_dict(self, mock_datetime): - mock_datetime.now.return_value = 'date' + mock_datetime.now.return_value = "date" actual_task = utils.create_task_dict( - 'task-type', 'recurring', 'arg0', 'arg1', - priority='low', other_stuff='normal', retries_left=3 + "task-type", + "recurring", + "arg0", + "arg1", + priority="low", + other_stuff="normal", + retries_left=3, ) expected_task = { - 'policy': 'recurring', - 'type': 'task-type', - 'next_run': 'date', - 'arguments': { - 'args': ('arg0', 'arg1'), - 'kwargs': {'other_stuff': 'normal'}, + "policy": "recurring", + "type": "task-type", + "next_run": "date", + "arguments": { + "args": ("arg0", "arg1"), + "kwargs": {"other_stuff": "normal"}, }, - 'priority': 'low', - 'retries_left': 3, + "priority": "low", + "retries_left": 3, } self.assertEqual(actual_task, expected_task) mock_datetime.now.assert_called_once_with(tz=timezone.utc) diff --git a/swh/scheduler/utils.py b/swh/scheduler/utils.py index 6f61cef..9f6718a 100644 --- a/swh/scheduler/utils.py +++ b/swh/scheduler/utils.py @@ -1,75 +1,76 @@ # Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from datetime import datetime, timezone def get_task(task_name): """Retrieve task object in our application instance by its fully qualified python name. Args: task_name (str): task's name (e.g swh.loader.git.tasks.LoadDiskGitRepository) Returns: Instance of task """ from swh.scheduler.celery_backend.config import app + for module in app.conf.CELERY_IMPORTS: __import__(module) return app.tasks[task_name] def create_task_dict(type, policy, *args, **kwargs): """Create a task with type and policy, scheduled for as soon as possible. Args: type (str): Type of oneshot task as per swh-scheduler's db table task_type's column (Ex: load-git, check-deposit) policy (str): oneshot or recurring policy Returns: Expected dictionary for the one-shot task scheduling api (swh.scheduler.backend.create_tasks) """ task_extra = {} - for extra_key in ['priority', 'retries_left']: + for extra_key in ["priority", "retries_left"]: if extra_key in kwargs: extra_val = kwargs.pop(extra_key) task_extra[extra_key] = extra_val task = { - 'policy': policy, - 'type': type, - 'next_run': datetime.now(tz=timezone.utc), - 'arguments': { - 'args': args if args else [], - 'kwargs': kwargs if kwargs else {}, + "policy": policy, + "type": type, + "next_run": datetime.now(tz=timezone.utc), + "arguments": { + "args": args if args else [], + "kwargs": kwargs if kwargs else {}, }, } task.update(task_extra) return task def create_oneshot_task_dict(type, *args, **kwargs): """Create a oneshot task scheduled for as soon as possible. Args: type (str): Type of oneshot task as per swh-scheduler's db table task_type's column (Ex: load-git, check-deposit) Returns: Expected dictionary for the one-shot task scheduling api (swh.scheduler.backend.create_tasks) """ - return create_task_dict(type, 'oneshot', *args, **kwargs) + return create_task_dict(type, "oneshot", *args, **kwargs) diff --git a/tox.ini b/tox.ini index 272b1ff..6c0c1f7 100644 --- a/tox.ini +++ b/tox.ini @@ -1,37 +1,44 @@ [tox] -envlist=flake8,mypy,py3 +envlist=black,flake8,mypy,py3 [testenv] extras = testing deps = pytest-cov pifpaf dev: ipdb setenv = LC_ALL=C.UTF-8 LC_CTYPE=C.UTF-8 LANG=C.UTF-8 commands = pifpaf run postgresql -- \ pytest --doctest-modules \ !slow: --hypothesis-profile=fast \ slow: --hypothesis-profile=slow \ --cov={envsitepackagesdir}/swh/scheduler \ {envsitepackagesdir}/swh/scheduler \ --cov-branch {posargs} +[testenv:black] +skip_install = true +deps = + black +commands = + {envpython} -m black --check swh + [testenv:flake8] skip_install = true deps = flake8 commands = {envpython} -m flake8 [testenv:mypy] extras = testing deps = mypy commands = mypy swh