diff --git a/PKG-INFO b/PKG-INFO index 7b703f4..7626a5b 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,28 +1,28 @@ Metadata-Version: 2.1 Name: swh.scheduler -Version: 0.0.56 +Version: 0.0.57 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN -Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate +Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Source, https://forge.softwareheritage.org/source/swh-scheduler Description: swh-scheduler ============= Job scheduler for the Software Heritage project. Task manager for asynchronous/delayed tasks, used for both recurrent (e.g., listing a forge, loading new stuff from a Git repository) and one-off activities (e.g., loading a specific version of a source package). Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/requirements-swh.txt b/requirements-swh.txt index 2ad8f8c..6d30cae 100644 --- a/requirements-swh.txt +++ b/requirements-swh.txt @@ -1,2 +1,2 @@ -swh.core[db,http] >= 0.0.60 +swh.core[db,http] >= 0.0.61 swh.storage >= 0.0.129 diff --git a/swh.scheduler.egg-info/PKG-INFO b/swh.scheduler.egg-info/PKG-INFO index 7b703f4..7626a5b 100644 --- a/swh.scheduler.egg-info/PKG-INFO +++ b/swh.scheduler.egg-info/PKG-INFO @@ -1,28 +1,28 @@ Metadata-Version: 2.1 Name: swh.scheduler -Version: 0.0.56 +Version: 0.0.57 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN -Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate +Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Source, https://forge.softwareheritage.org/source/swh-scheduler Description: swh-scheduler ============= Job scheduler for the Software Heritage project. Task manager for asynchronous/delayed tasks, used for both recurrent (e.g., listing a forge, loading new stuff from a Git repository) and one-off activities (e.g., loading a specific version of a source package). Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/swh.scheduler.egg-info/requires.txt b/swh.scheduler.egg-info/requires.txt index 5fbe3a5..9048ea4 100644 --- a/swh.scheduler.egg-info/requires.txt +++ b/swh.scheduler.egg-info/requires.txt @@ -1,16 +1,16 @@ arrow celery>=4 Click elasticsearch>5.4 flask psycopg2 pyyaml vcversioner -swh.core[db,http]>=0.0.60 +swh.core[db,http]>=0.0.61 swh.storage>=0.0.129 [testing] pytest<4 pytest-postgresql celery>=4 hypothesis>=3.11.0 diff --git a/swh/scheduler/cli/__init__.py b/swh/scheduler/cli/__init__.py index 78a90c6..0a25ba6 100644 --- a/swh/scheduler/cli/__init__.py +++ b/swh/scheduler/cli/__init__.py @@ -1,76 +1,81 @@ # Copyright (C) 2016-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -import click import logging +import click -CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) +from swh.core.cli import CONTEXT_SETTINGS, AliasedGroup -@click.group(context_settings=CONTEXT_SETTINGS) +@click.group(name='scheduler', context_settings=CONTEXT_SETTINGS, + cls=AliasedGroup) @click.option('--config-file', '-C', default=None, type=click.Path(exists=True, dir_okay=False,), help="Configuration file.") @click.option('--database', '-d', default=None, help="Scheduling database DSN (imply cls is 'local')") @click.option('--url', '-u', default=None, help="Scheduler's url access (imply cls is 'remote')") @click.option('--no-stdout', is_flag=True, default=False, help="Do NOT output logs on the console") @click.pass_context def cli(ctx, config_file, database, url, no_stdout): - """Scheduler CLI interface. + """Software Heritage Scheduler tools. - Default to use the the local scheduler instance (plugged to the + Use a local scheduler instance by default (plugged to the main scheduler db). - """ from swh.core import config from swh.scheduler.celery_backend.config import setup_log_handler from swh.scheduler import get_scheduler, DEFAULT_CONFIG + ctx.ensure_object(dict) + log_level = ctx.obj.get('log_level', logging.INFO) + setup_log_handler( - loglevel=ctx.obj['log_level'], colorize=False, + loglevel=log_level, colorize=False, format='[%(levelname)s] %(name)s -- %(message)s', log_console=not no_stdout) - ctx.ensure_object(dict) - logger = logging.getLogger(__name__) scheduler = None conf = config.read(config_file, DEFAULT_CONFIG) if 'scheduler' not in conf: raise ValueError("missing 'scheduler' configuration") if database: conf['scheduler']['cls'] = 'local' conf['scheduler']['args']['db'] = database elif url: conf['scheduler']['cls'] = 'remote' conf['scheduler']['args'] = {'url': url} sched_conf = conf['scheduler'] try: logger.debug('Instanciating scheduler with %s' % ( sched_conf)) scheduler = get_scheduler(**sched_conf) except ValueError: # it's the subcommand to decide whether not having a proper # scheduler instance is a problem. pass ctx.obj['scheduler'] = scheduler ctx.obj['config'] = conf - from . import admin, task, task_type # noqa def main(): + import click.core + click.core.DEPRECATED_HELP_NOTICE = ''' + +DEPRECATED! Please use the command 'swh scheduler'.''' + cli.deprecated = True return cli(auto_envvar_prefix='SWH_SCHEDULER') if __name__ == '__main__': main() diff --git a/swh/scheduler/cli/admin.py b/swh/scheduler/cli/admin.py index ba8b133..113ac64 100644 --- a/swh/scheduler/cli/admin.py +++ b/swh/scheduler/cli/admin.py @@ -1,119 +1,133 @@ # Copyright (C) 2016-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging import time import click from . import cli -@cli.command('runner') +@cli.command('start-runner') @click.option('--period', '-p', default=0, help=('Period (in s) at witch pending tasks are checked and ' 'executed. Set to 0 (default) for a one shot.')) @click.pass_context def runner(ctx, period): """Starts a swh-scheduler runner service. This process is responsible for checking for ready-to-run tasks and schedule them.""" from swh.scheduler.celery_backend.runner import run_ready_tasks from swh.scheduler.celery_backend.config import build_app app = build_app(ctx.obj['config'].get('celery')) app.set_current() logger = logging.getLogger(__name__ + '.runner') scheduler = ctx.obj['scheduler'] logger.debug('Scheduler %s' % scheduler) try: while True: logger.debug('Run ready tasks') try: ntasks = len(run_ready_tasks(scheduler, app)) if ntasks: logger.info('Scheduled %s tasks', ntasks) except Exception: logger.exception('Unexpected error in run_ready_tasks()') if not period: break time.sleep(period) except KeyboardInterrupt: ctx.exit(0) -@cli.command('listener') +@cli.command('start-listener') @click.pass_context def listener(ctx): """Starts a swh-scheduler listener service. This service is responsible for listening at task lifecycle events and handle their workflow status in the database.""" scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') from swh.scheduler.celery_backend.config import build_app app = build_app(ctx.obj['config'].get('celery')) app.set_current() from swh.scheduler.celery_backend.listener import event_monitor event_monitor(app, backend=scheduler) -@cli.command('api-server') +@cli.command('rpc-serve') @click.option('--host', default='0.0.0.0', help="Host to run the scheduler server api") @click.option('--port', default=5008, type=click.INT, help="Binding port of the server") @click.option('--debug/--nodebug', default=None, help=("Indicates if the server should run in debug mode. " "Defaults to True if log-level is DEBUG, False otherwise.") ) @click.pass_context -def api_server(ctx, host, port, debug): +def rpc_server(ctx, host, port, debug): """Starts a swh-scheduler API HTTP server. """ if ctx.obj['config']['scheduler']['cls'] == 'remote': click.echo("The API server can only be started with a 'local' " "configuration", err=True) ctx.exit(1) from swh.scheduler.api import server server.app.config.update(ctx.obj['config']) if debug is None: debug = ctx.obj['log_level'] <= logging.DEBUG server.app.run(host, port=port, debug=bool(debug)) -@cli.command('updater') +@cli.command('start-updater') @click.option('--verbose/--no-verbose', '-v', default=False, help='Verbose mode') @click.pass_context def updater(ctx, verbose): - """Insert tasks in the scheduler from the scheduler-updater's events + """Starts a scheduler-updater service. + + Insert tasks in the scheduler from the scheduler-updater's events read from + the db cache (filled e.g. by the ghtorrent consumer service) . """ from swh.scheduler.updater.writer import UpdaterWriter UpdaterWriter(**ctx.obj['config']).run() -@cli.command('ghtorrent') +@cli.command('start-ghtorrent') @click.option('--verbose/--no-verbose', '-v', default=False, help='Verbose mode') @click.pass_context def ghtorrent(ctx, verbose): - """Consume events from ghtorrent and write them to cache. + """Starts a ghtorrent consumer service. + + Consumes events from ghtorrent and write them to a cache. """ from swh.scheduler.updater.ghtorrent import GHTorrentConsumer from swh.scheduler.updater.backend import SchedulerUpdaterBackend ght_config = ctx.obj['config'].get('ghtorrent', {}) back_config = ctx.obj['config'].get('scheduler_updater', {}) backend = SchedulerUpdaterBackend(**back_config) GHTorrentConsumer(backend, **ght_config).run() + + +# for bw compat +cli.add_alias(ghtorrent, 'ghtorrent') +cli.add_alias(listener, 'listener') +cli.add_alias(runner, 'runner') +cli.add_alias(updater, 'updater') +cli.add_alias(rpc_server, 'serve') +cli.add_alias(rpc_server, 'api-server') diff --git a/swh/scheduler/cli/task.py b/swh/scheduler/cli/task.py index 88f5de4..ddc8604 100644 --- a/swh/scheduler/cli/task.py +++ b/swh/scheduler/cli/task.py @@ -1,564 +1,564 @@ # Copyright (C) 2016-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import json import itertools import locale import logging import arrow import csv import click from . import cli locale.setlocale(locale.LC_ALL, '') ARROW_LOCALE = locale.getlocale(locale.LC_TIME)[0] class DateTimeType(click.ParamType): name = 'time and date' def convert(self, value, param, ctx): if not isinstance(value, arrow.Arrow): value = arrow.get(value) return value DATETIME = DateTimeType() CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) def format_dict(d): ret = {} for k, v in d.items(): if isinstance(v, (arrow.Arrow, datetime.date, datetime.datetime)): v = arrow.get(v).format() elif isinstance(v, dict): v = format_dict(v) ret[k] = v return ret def pretty_print_list(list, indent=0): """Pretty-print a list""" return ''.join('%s%r\n' % (' ' * indent, item) for item in list) def pretty_print_dict(dict, indent=0): """Pretty-print a list""" return ''.join('%s%s: %r\n' % (' ' * indent, click.style(key, bold=True), value) for key, value in sorted(dict.items())) def pretty_print_run(run, indent=4): fmt = ('{indent}{backend_id} [{status}]\n' '{indent} scheduled: {scheduled} [{started}:{ended}]') return fmt.format(indent=' '*indent, **format_dict(run)) def pretty_print_task(task, full=False): """Pretty-print a task If 'full' is True, also print the status and priority fields. >>> task = { ... 'id': 1234, ... 'arguments': { ... 'args': ['foo', 'bar', True], ... 'kwargs': {'key': 'value', 'key2': 42}, ... }, ... 'current_interval': datetime.timedelta(hours=1), ... 'next_run': datetime.datetime(2019, 2, 21, 13, 52, 35, 407818), ... 'policy': 'oneshot', ... 'priority': None, ... 'status': 'next_run_not_scheduled', ... 'type': 'test_task', ... } >>> print(click.unstyle(pretty_print_task(task))) Task 1234 Next run: ... (2019-02-21 13:52:35+00:00) Interval: 1:00:00 Type: test_task Policy: oneshot Args: 'foo' 'bar' True Keyword args: key: 'value' key2: 42 >>> print(click.unstyle(pretty_print_task(task, full=True))) Task 1234 Next run: ... (2019-02-21 13:52:35+00:00) Interval: 1:00:00 Type: test_task Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: 'foo' 'bar' True Keyword args: key: 'value' key2: 42 """ next_run = arrow.get(task['next_run']) lines = [ '%s %s\n' % (click.style('Task', bold=True), task['id']), click.style(' Next run: ', bold=True), "%s (%s)" % (next_run.humanize(locale=ARROW_LOCALE), next_run.format()), '\n', click.style(' Interval: ', bold=True), str(task['current_interval']), '\n', click.style(' Type: ', bold=True), task['type'] or '', '\n', click.style(' Policy: ', bold=True), task['policy'] or '', '\n', ] if full: lines += [ click.style(' Status: ', bold=True), task['status'] or '', '\n', click.style(' Priority: ', bold=True), task['priority'] or '', '\n', ] lines += [ click.style(' Args:\n', bold=True), pretty_print_list(task['arguments']['args'], indent=4), click.style(' Keyword args:\n', bold=True), pretty_print_dict(task['arguments']['kwargs'], indent=4), ] return ''.join(lines) @cli.group('task') @click.pass_context def task(ctx): """Manipulate tasks.""" pass @task.command('schedule') @click.option('--columns', '-c', multiple=True, default=['type', 'args', 'kwargs', 'next_run'], type=click.Choice([ 'type', 'args', 'kwargs', 'policy', 'next_run']), help='columns present in the CSV file') @click.option('--delimiter', '-d', default=',') @click.argument('file', type=click.File(encoding='utf-8')) @click.pass_context def schedule_tasks(ctx, columns, delimiter, file): """Schedule tasks from a CSV input file. The following columns are expected, and can be set through the -c option: - type: the type of the task to be scheduled (mandatory) - args: the arguments passed to the task (JSON list, defaults to an empty list) - kwargs: the keyword arguments passed to the task (JSON object, defaults to an empty dict) - next_run: the date at which the task should run (datetime, defaults to now) The CSV can be read either from a named file, or from stdin (use - as filename). Use sample: cat scheduling-task.txt | \ python3 -m swh.scheduler.cli \ --database 'service=swh-scheduler-dev' \ task schedule \ --columns type --columns kwargs --columns policy \ --delimiter ';' - """ tasks = [] now = arrow.utcnow() scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') reader = csv.reader(file, delimiter=delimiter) for line in reader: task = dict(zip(columns, line)) args = json.loads(task.pop('args', '[]')) kwargs = json.loads(task.pop('kwargs', '{}')) task['arguments'] = { 'args': args, 'kwargs': kwargs, } task['next_run'] = DATETIME.convert(task.get('next_run', now), None, None) tasks.append(task) created = scheduler.create_tasks(tasks) output = [ 'Created %d tasks\n' % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo_via_pager('\n'.join(output)) @task.command('add') @click.argument('type', nargs=1, required=True) @click.argument('options', nargs=-1) @click.option('--policy', '-p', default='recurring', type=click.Choice(['recurring', 'oneshot'])) @click.option('--priority', '-P', default=None, type=click.Choice(['low', 'normal', 'high'])) @click.option('--next-run', '-n', default=None) @click.pass_context def schedule_task(ctx, type, options, policy, priority, next_run): """Schedule one task from arguments. The first argument is the name of the task type, further ones are positional and keyword argument(s) of the task, in YAML format. Keyword args are of the form key=value. Usage sample: swh-scheduler --database 'service=swh-scheduler' \ - task add swh-lister-pypi + task add list-pypi swh-scheduler --database 'service=swh-scheduler' \ - task add swh-lister-debian --policy=oneshot distribution=stretch + task add list-debian-distribution --policy=oneshot distribution=stretch Note: if the priority is not given, the task won't have the priority set, which is considered as the lowest priority level. """ from .utils import parse_options scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') now = arrow.utcnow() (args, kw) = parse_options(options) task = {'type': type, 'policy': policy, 'priority': priority, 'arguments': { 'args': args, 'kwargs': kw, }, 'next_run': DATETIME.convert(next_run or now, None, None), } created = scheduler.create_tasks([task]) output = [ 'Created %d tasks\n' % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo('\n'.join(output)) @task.command('schedule_origins') @click.argument('type', nargs=1, required=True) @click.argument('options', nargs=-1) @click.option('--batch-size', '-b', 'origin_batch_size', default=10, show_default=True, type=int, help="Number of origins per task") @click.option('--min-id', default=0, show_default=True, type=int, help="Only schedule tasks for origins whose ID is greater") @click.option('--max-id', default=None, type=int, help="Only schedule tasks for origins whose ID is lower") @click.option('--storage-url', '-g', help="URL of the (graph) storage API") @click.option('--dry-run/--no-dry-run', is_flag=True, default=False, help='List only what would be scheduled.') @click.pass_context def schedule_origin_metadata_index( ctx, type, options, storage_url, origin_batch_size, min_id, max_id, dry_run): """Schedules tasks for origins that are already known. The first argument is the name of the task type, further ones are keyword argument(s) of the task in the form key=value, where value is in YAML format. Usage sample: swh-scheduler --database 'service=swh-scheduler' \ - task schedule_origins indexer_origin_metadata + task schedule_origins index-origin-metadata """ from swh.storage import get_storage from swh.storage.algos.origin import iter_origins from .utils import parse_options, schedule_origin_batches scheduler = ctx.obj['scheduler'] storage = get_storage('remote', {'url': storage_url}) if dry_run: scheduler = None (args, kw) = parse_options(options) if args: raise click.ClickException('Only keywords arguments are allowed.') origins = iter_origins(storage, origin_from=min_id, origin_to=max_id) origin_ids = (origin['id'] for origin in origins) schedule_origin_batches( scheduler, type, origin_ids, origin_batch_size, kw) @task.command('list-pending') @click.argument('task-types', required=True, nargs=-1) @click.option('--limit', '-l', required=False, type=click.INT, help='The maximum number of tasks to fetch') @click.option('--before', '-b', required=False, type=DATETIME, help='List all jobs supposed to run before the given date') @click.pass_context def list_pending_tasks(ctx, task_types, limit, before): """List the tasks that are going to be run. You can override the number of tasks to fetch """ from swh.scheduler import compute_nb_tasks_from scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') num_tasks, num_tasks_priority = compute_nb_tasks_from(limit) output = [] for task_type in task_types: pending = scheduler.peek_ready_tasks( task_type, timestamp=before, num_tasks=num_tasks, num_tasks_priority=num_tasks_priority) output.append('Found %d %s tasks\n' % ( len(pending), task_type)) for task in pending: output.append(pretty_print_task(task)) click.echo('\n'.join(output)) @task.command('list') @click.option('--task-id', '-i', default=None, multiple=True, metavar='ID', help='List only tasks whose id is ID.') @click.option('--task-type', '-t', default=None, multiple=True, metavar='TYPE', help='List only tasks of type TYPE') @click.option('--limit', '-l', required=False, type=click.INT, help='The maximum number of tasks to fetch.') @click.option('--status', '-s', multiple=True, metavar='STATUS', default=None, help='List tasks whose status is STATUS.') @click.option('--policy', '-p', default=None, type=click.Choice(['recurring', 'oneshot']), help='List tasks whose policy is POLICY.') @click.option('--priority', '-P', default=None, multiple=True, type=click.Choice(['all', 'low', 'normal', 'high']), help='List tasks whose priority is PRIORITY.') @click.option('--before', '-b', required=False, type=DATETIME, metavar='DATETIME', help='Limit to tasks supposed to run before the given date.') @click.option('--after', '-a', required=False, type=DATETIME, metavar='DATETIME', help='Limit to tasks supposed to run after the given date.') @click.option('--list-runs', '-r', is_flag=True, default=False, help='Also list past executions of each task.') @click.pass_context def list_tasks(ctx, task_id, task_type, limit, status, policy, priority, before, after, list_runs): """List tasks. """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') if not task_type: task_type = [x['type'] for x in scheduler.get_task_types()] # if task_id is not given, default value for status is # 'next_run_not_scheduled' # if task_id is given, default status is 'all' if task_id is None and status is None: status = ['next_run_not_scheduled'] if status and 'all' in status: status = None if priority and 'all' in priority: priority = None output = [] tasks = scheduler.search_tasks( task_id=task_id, task_type=task_type, status=status, priority=priority, policy=policy, before=before, after=after, limit=limit) if list_runs: runs = {t['id']: [] for t in tasks} for r in scheduler.get_task_runs([task['id'] for task in tasks]): runs[r['task']].append(r) else: runs = {} output.append('Found %d tasks\n' % ( len(tasks))) for task in tasks: output.append(pretty_print_task(task, full=True)) if runs.get(task['id']): output.append(click.style(' Executions:', bold=True)) for run in runs[task['id']]: output.append(pretty_print_run(run, indent=4)) click.echo('\n'.join(output)) @task.command('respawn') @click.argument('task-ids', required=True, nargs=-1) @click.option('--next-run', '-n', required=False, type=DATETIME, metavar='DATETIME', default=None, help='Re spawn the selected tasks at this date') @click.pass_context def respawn_tasks(ctx, task_ids, next_run): """Respawn tasks. Respawn tasks given by their ids (see the 'task list' command to find task ids) at the given date (immediately by default). Eg. swh-scheduler task respawn 1 3 12 """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') if next_run is None: next_run = arrow.utcnow() output = [] scheduler.set_status_tasks( task_ids, status='next_run_not_scheduled', next_run=next_run) output.append('Respawn tasks %s\n' % (task_ids,)) click.echo('\n'.join(output)) @task.command('archive') @click.option('--before', '-b', default=None, help='''Task whose ended date is anterior will be archived. Default to current month's first day.''') @click.option('--after', '-a', default=None, help='''Task whose ended date is after the specified date will be archived. Default to prior month's first day.''') @click.option('--batch-index', default=1000, type=click.INT, help='Batch size of tasks to read from db to archive') @click.option('--bulk-index', default=200, type=click.INT, help='Batch size of tasks to bulk index') @click.option('--batch-clean', default=1000, type=click.INT, help='Batch size of task to clean after archival') @click.option('--dry-run/--no-dry-run', is_flag=True, default=False, help='Default to list only what would be archived.') @click.option('--verbose', is_flag=True, default=False, help='Verbose mode') @click.option('--cleanup/--no-cleanup', is_flag=True, default=True, help='Clean up archived tasks (default)') @click.option('--start-from', type=click.INT, default=-1, help='(Optional) default task id to start from. Default is -1.') @click.pass_context def archive_tasks(ctx, before, after, batch_index, bulk_index, batch_clean, dry_run, verbose, cleanup, start_from): """Archive task/task_run whose (task_type is 'oneshot' and task_status is 'completed') or (task_type is 'recurring' and task_status is 'disabled'). With --dry-run flag set (default), only list those. """ from swh.core.utils import grouper - from .backend_es import SWHElasticSearchClient + from swh.scheduler.backend_es import SWHElasticSearchClient scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') es_client = SWHElasticSearchClient() logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) log = logging.getLogger('swh.scheduler.cli.archive') logging.getLogger('urllib3').setLevel(logging.WARN) logging.getLogger('elasticsearch').setLevel(logging.WARN) if dry_run: log.info('**DRY-RUN** (only reading db)') if not cleanup: log.info('**NO CLEANUP**') now = arrow.utcnow() # Default to archive tasks from a rolling month starting the week # prior to the current one if not before: before = now.shift(weeks=-1).format('YYYY-MM-DD') if not after: after = now.shift(weeks=-1).shift(months=-1).format('YYYY-MM-DD') log.debug('index: %s; cleanup: %s; period: [%s ; %s]' % ( not dry_run, not dry_run and cleanup, after, before)) def group_by_index_name(data, es_client=es_client): """Given a data record, determine the index's name through its ending date. This varies greatly depending on the task_run's status. """ date = data.get('started') if not date: date = data['scheduled'] return es_client.compute_index_name(date.year, date.month) def index_data(before, last_id, batch_index): tasks_in = scheduler.filter_task_to_archive( after, before, last_id=last_id, limit=batch_index) for index_name, tasks_group in itertools.groupby( tasks_in, key=group_by_index_name): log.debug('Index tasks to %s' % index_name) if dry_run: for task in tasks_group: yield task continue yield from es_client.streaming_bulk( index_name, tasks_group, source=['task_id', 'task_run_id'], chunk_size=bulk_index, log=log) gen = index_data(before, last_id=start_from, batch_index=batch_index) if cleanup: for task_ids in grouper(gen, n=batch_clean): task_ids = list(task_ids) log.info('Clean up %s tasks: [%s, ...]' % ( len(task_ids), task_ids[0])) if dry_run: # no clean up continue ctx.obj['scheduler'].delete_archived_tasks(task_ids) else: for task_ids in grouper(gen, n=batch_index): task_ids = list(task_ids) log.info('Indexed %s tasks: [%s, ...]' % ( len(task_ids), task_ids[0])) diff --git a/swh/scheduler/sql/30-swh-schema.sql b/swh/scheduler/sql/30-swh-schema.sql index eec3c6b..642ce4f 100644 --- a/swh/scheduler/sql/30-swh-schema.sql +++ b/swh/scheduler/sql/30-swh-schema.sql @@ -1,102 +1,113 @@ create table dbversion ( version int primary key, release timestamptz not null, description text not null ); comment on table dbversion is 'Schema update tracking'; +comment on column dbversion.version is 'SQL schema version'; +comment on column dbversion.release is 'Version deployment timestamp'; +comment on column dbversion.description is 'Version description'; insert into dbversion (version, release, description) - values (12, now(), 'Work In Progress'); + values (13, now(), 'Work In Progress'); create table task_type ( type text primary key, description text not null, backend_name text not null, default_interval interval, min_interval interval, max_interval interval, backoff_factor float, max_queue_length bigint, num_retries bigint, retry_delay interval ); comment on table task_type is 'Types of schedulable tasks'; comment on column task_type.type is 'Short identifier for the task type'; comment on column task_type.description is 'Human-readable task description'; comment on column task_type.backend_name is 'Name of the task in the job-running backend'; comment on column task_type.default_interval is 'Default interval for newly scheduled tasks'; comment on column task_type.min_interval is 'Minimum interval between two runs of a task'; comment on column task_type.max_interval is 'Maximum interval between two runs of a task'; comment on column task_type.backoff_factor is 'Adjustment factor for the backoff between two task runs'; comment on column task_type.max_queue_length is 'Maximum length of the queue for this type of tasks'; comment on column task_type.num_retries is 'Default number of retries on transient failures'; comment on column task_type.retry_delay is 'Retry delay for the task'; create type task_status as enum ('next_run_not_scheduled', 'next_run_scheduled', 'completed', 'disabled'); comment on type task_status is 'Status of a given task'; create type task_policy as enum ('recurring', 'oneshot'); comment on type task_policy is 'Recurrence policy of the given task'; create type task_priority as enum('high', 'normal', 'low'); comment on type task_priority is 'Priority of the given task'; create table priority_ratio( id task_priority primary key, ratio float not null ); comment on table priority_ratio is 'Oneshot task''s reading ratio per priority'; comment on column priority_ratio.id is 'Task priority id'; comment on column priority_ratio.ratio is 'Percentage of tasks to read per priority'; insert into priority_ratio (id, ratio) values ('high', 0.5); insert into priority_ratio (id, ratio) values ('normal', 0.3); insert into priority_ratio (id, ratio) values ('low', 0.2); create table task ( id bigserial primary key, type text not null references task_type(type), arguments jsonb not null, next_run timestamptz not null, current_interval interval, status task_status not null, policy task_policy not null default 'recurring', retries_left bigint not null default 0, priority task_priority references priority_ratio(id), check (policy <> 'recurring' or current_interval is not null) ); comment on table task is 'Schedule of recurring tasks'; comment on column task.arguments is 'Arguments passed to the underlying job scheduler. ' 'Contains two keys, ''args'' (list) and ''kwargs'' (object).'; comment on column task.next_run is 'The next run of this task should be run on or after that time'; comment on column task.current_interval is 'The interval between two runs of this task, ' 'taking into account the backoff factor'; comment on column task.policy is 'Whether the task is one-shot or recurring'; comment on column task.retries_left is 'The number of "short delay" retries of the task in case of ' 'transient failure'; comment on column task.priority is 'Policy of the given task'; +comment on column task.id is 'Task Identifier'; +comment on column task.type is 'References task_type table'; +comment on column task.status is 'Task status (''next_run_not_scheduled'', ''next_run_scheduled'', ''completed'', ''disabled'')'; create type task_run_status as enum ('scheduled', 'started', 'eventful', 'uneventful', 'failed', 'permfailed', 'lost'); comment on type task_run_status is 'Status of a given task run'; create table task_run ( id bigserial primary key, task bigint not null references task(id), backend_id text, scheduled timestamptz, started timestamptz, ended timestamptz, metadata jsonb, status task_run_status not null default 'scheduled' ); comment on table task_run is 'History of task runs sent to the job-running backend'; comment on column task_run.backend_id is 'id of the task run in the job-running backend'; comment on column task_run.metadata is 'Useful metadata for the given task run. ' 'For instance, the worker that took on the job, ' 'or the logs for the run.'; +comment on column task_run.id is 'Task run identifier'; +comment on column task_run.task is 'References task table'; +comment on column task_run.scheduled is 'Scheduled run time for task'; +comment on column task_run.started is 'Task starting time'; +comment on column task_run.ended is 'Task ending time'; diff --git a/swh/scheduler/sql/50-swh-data.sql b/swh/scheduler/sql/50-swh-data.sql index 9ee3b08..512a844 100644 --- a/swh/scheduler/sql/50-swh-data.sql +++ b/swh/scheduler/sql/50-swh-data.sql @@ -1,450 +1,390 @@ insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'load-svn-from-archive', 'Loading svn repositories from svn dump', 'swh.loader.svn.tasks.MountAndLoadSvnRepository', '1 day', '1 day', '1 day', 1, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'load-svn', 'Create dump of a remote svn repository, mount it and load it', 'swh.loader.svn.tasks.DumpMountAndLoadSvnRepository', '1 day', '1 day', '1 day', 1, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, num_retries, max_queue_length) values ( - 'load-deposit-from-archive', + 'load-deposit', 'Loading deposit archive into swh through swh-loader-tar', 'swh.deposit.loader.tasks.LoadDepositArchiveTsk', '1 day', '1 day', '1 day', 1, 3, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, num_retries, max_queue_length) values ( - 'check-deposit-archive', + 'check-deposit', 'Pre-checking deposit step before loading into swh archive', 'swh.deposit.loader.tasks.ChecksDepositTsk', '1 day', '1 day', '1 day', 1, 3, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'cook-vault-bundle', 'Cook a Vault bundle', 'swh.vault.cooking_tasks.SWHCookingTask', '1 day', '1 day', '1 day', 1, 10000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'load-hg', 'Loading mercurial repository swh-loader-mercurial', 'swh.loader.mercurial.tasks.LoadMercurial', '1 day', '1 day', '1 day', 1, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'load-hg-from-archive', 'Loading archive mercurial repository swh-loader-mercurial', 'swh.loader.mercurial.tasks.LoadArchiveMercurial', '1 day', '1 day', '1 day', 1, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'load-git', 'Update an origin of type git', 'swh.loader.git.tasks.UpdateGitRepository', '64 days', '12:00:00', '64 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor) values ( 'list-bitbucket-incremental', 'Incrementally list BitBucket', 'swh.lister.bitbucket.tasks.IncrementalBitBucketLister', '1 day', '1 day', '1 day', 1); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor) values ( 'list-bitbucket-full', 'Full update of Bitbucket repos list', 'swh.lister.bitbucket.tasks.FullBitBucketRelister', '90 days', '90 days', '90 days', 1); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor) values ( 'list-github-incremental', 'Incrementally list GitHub', 'swh.lister.github.tasks.IncrementalGitHubLister', '1 day', '1 day', '1 day', 1); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor) values ( 'list-github-full', 'Full update of GitHub repos list', 'swh.lister.github.tasks.FullGitHubRelister', '90 days', '90 days', '90 days', 1); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor) values ( 'list-debian-distribution', 'List a Debian distribution', 'swh.lister.debian.tasks.DebianListerTask', '1 day', '1 day', '1 day', 1); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length, num_retries, retry_delay) values ( 'load-debian-package', 'Load a Debian package', 'swh.loader.debian.tasks.LoadDebianPackage', NULL, NULL, NULL, NULL, 5000, 5, '1 hour'); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor) values ( 'list-gitlab-incremental', 'Incrementally list a Gitlab instance', 'swh.lister.gitlab.tasks.IncrementalGitLabLister', '1 day', '1 day', '1 day', 1); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor) values ( 'list-gitlab-full', 'Full update of a Gitlab instance''s repos list', 'swh.lister.gitlab.tasks.FullGitLabRelister', '90 days', '90 days', '90 days', 1); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor) values ( 'list-pypi', 'Full pypi lister', 'swh.lister.pypi.tasks.PyPIListerTask', '1 days', '1 days', '1 days', 1); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'load-pypi', 'Load Pypi origin', 'swh.loader.pypi.tasks.LoadPyPI', '64 days', '12:00:00', '64 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-mimetype', 'Mimetype indexer task', 'swh.indexer.tasks.ContentMimetype', '1 day', '12:00:00', '1 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-mimetype-for-range', 'Mimetype Range indexer task', 'swh.indexer.tasks.ContentRangeMimetype', '1 day', '12:00:00', '1 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-fossology-license', 'Fossology license indexer task', 'swh.indexer.tasks.ContentFossologyLicense', '1 day', '12:00:00', '1 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-fossology-license-for-range', 'Fossology license range indexer task', 'swh.indexer.tasks.ContentRangeFossologyLicense', '1 day', '12:00:00', '1 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-origin-head', 'Origin Head indexer task', 'swh.indexer.tasks.OriginHead', '1 day', '12:00:00', '1 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-revision-metadata', 'Revision Metadata indexer task', 'swh.indexer.tasks.RevisionMetadata', '1 day', '12:00:00', '1 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-origin-metadata', 'Origin Metadata indexer task', 'swh.indexer.tasks.OriginMetadata', '1 day', '12:00:00', '1 days', 2, 20000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor) values ( 'list-npm-full', 'Full npm lister', 'swh.lister.npm.tasks.NpmListerTask', '1 week', '1 week', '1 week', 1); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'load-npm', 'Load npm origin', 'swh.loader.npm.tasks.LoadNpm', '64 days', '12:00:00', '64 days', 2, 5000); ---- For backward compatibility with previous task names ---- TODO: remove this once all swh components have been migrated to use the ---- new task names - -create or replace function swh_add_backward_compatible_task_name( - old_task_name text, new_task_name text) - returns void - language sql -as $$ - insert into task_type ( - type, description, backend_name, - default_interval, min_interval, - max_interval, backoff_factor, - max_queue_length, num_retries, - retry_delay) - (select old_task_name, description, - backend_name, default_interval, - min_interval, max_interval, backoff_factor, - max_queue_length, num_retries, retry_delay - from task_type where type = new_task_name); -$$; - -select swh_add_backward_compatible_task_name('swh-loader-mount-dump-and-load-svn-repository', - 'load-svn-from-archive'); - -select swh_add_backward_compatible_task_name('origin-update-svn', - 'load-svn'); - -select swh_add_backward_compatible_task_name('swh-deposit-archive-loading', - 'load-deposit-from-archive'); - -select swh_add_backward_compatible_task_name('swh-deposit-archive-checks', - 'check-deposit-archive'); - -select swh_add_backward_compatible_task_name('swh-vault-cooking', - 'cook-vault-bundle'); - -select swh_add_backward_compatible_task_name('origin-update-hg', - 'load-hg'); - -select swh_add_backward_compatible_task_name('origin-load-archive-hg', - 'load-hg-from-archive'); - -select swh_add_backward_compatible_task_name('origin-update-git', - 'load-git'); - -select swh_add_backward_compatible_task_name('swh-lister-bitbucket-incremental', - 'list-bitbucket-incremental'); - -select swh_add_backward_compatible_task_name('swh-lister-bitbucket-full', - 'list-bitbucket-full'); - -select swh_add_backward_compatible_task_name('swh-lister-github-incremental', - 'list-github-incremental'); - -select swh_add_backward_compatible_task_name('swh-lister-github-full', - 'list-github-full'); - -select swh_add_backward_compatible_task_name('swh-lister-debian', - 'list-debian-distribution'); - -select swh_add_backward_compatible_task_name('load-deb-package', - 'load-debian-package'); - -select swh_add_backward_compatible_task_name('swh-lister-gitlab-incremental', - 'list-gitlab-incremental'); - -select swh_add_backward_compatible_task_name('swh-lister-gitlab-full', - 'list-gitlab-full'); - -select swh_add_backward_compatible_task_name('swh-lister-pypi', - 'list-pypi'); - -select swh_add_backward_compatible_task_name('origin-update-pypi', - 'load-pypi'); - -select swh_add_backward_compatible_task_name('indexer_mimetype', - 'index-mimetype'); - -select swh_add_backward_compatible_task_name('indexer_range_mimetype', - 'index-mimetype-for-range'); - -select swh_add_backward_compatible_task_name('indexer_fossology_license', - 'index-fossology-license'); - -select swh_add_backward_compatible_task_name('indexer_range_fossology_license', - 'index-fossology-license-for-range'); - -select swh_add_backward_compatible_task_name('indexer_origin_head', - 'index-origin-head'); - -select swh_add_backward_compatible_task_name('indexer_revision_metadata', - 'index-revision-metadata'); +insert into task_type( + type, + description, + backend_name, + default_interval, min_interval, max_interval, backoff_factor) +values ( + 'list-gnu-full', + 'Full gnu lister', + 'swh.lister.gnu.tasks.GNUListerTask', + '90 days', + '90 days', + '90 days', 1); -select swh_add_backward_compatible_task_name('indexer_origin_metadata', - 'index-origin-metadata'); +insert into task_type( + type, + description, + backend_name, + default_interval, min_interval, max_interval, backoff_factor) +values ( + 'list-phabricator-full', + 'Full Phabricator instance lister', + 'swh.lister.phabricator.tasks.FullPhabricatorLister', + '90 days', + '90 days', + '90 days', 1); -drop function swh_add_backward_compatible_task_name(text, text); +insert into task_type( + type, + description, + backend_name, + default_interval, min_interval, max_interval, backoff_factor) +values ( + 'list-phabricator-incremental', + 'Incremental Phabricator instance lister', + 'swh.lister.phabricator.tasks.IncrementalPhabricatorLister', + '1 week', + '1 week', + '1 week', 1); diff --git a/swh/scheduler/tests/test_api_client.py b/swh/scheduler/tests/test_api_client.py index 5b52588..e3c194b 100644 --- a/swh/scheduler/tests/test_api_client.py +++ b/swh/scheduler/tests/test_api_client.py @@ -1,53 +1,54 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest import requests -from swh.core.tests.server_testing import ServerTestFixture +from swh.core.api.tests.server_testing import ServerTestFixture + from swh.scheduler import get_scheduler from swh.scheduler.api.server import app from swh.scheduler.tests.test_scheduler import CommonSchedulerTest class RemoteSchedulerTest(CommonSchedulerTest, ServerTestFixture, unittest.TestCase): """Test the remote scheduler API. This class doesn't define any tests as we want identical functionality between local and remote scheduler. All the tests are therefore defined in CommonSchedulerTest. """ def setUp(self): self.config = { 'scheduler': { 'cls': 'local', 'args': { 'db': 'dbname=%s' % self.TEST_DB_NAME, } } } self.app = app # this will setup the local scheduler... super().setUp() # accessible through a remote scheduler accessible on the # given port self.backend = get_scheduler('remote', {'url': self.url()}) def test_site_map(self): sitemap = requests.get(self.url() + 'site-map') assert sitemap.headers['Content-Type'] == 'application/json' sitemap = sitemap.json() rules = set(x['rule'] for x in sitemap) # we expect at least these rules expected_rules = set('/'+rule for rule in ( 'set_status_tasks', 'create_task_type', 'get_task_type', 'get_task_types', 'create_tasks', 'disable_tasks', 'get_tasks', 'search_tasks', 'peek_ready_tasks', 'grab_ready_tasks', 'schedule_task_run', 'mass_schedule_task_runs', 'start_task_run', 'end_task_run', 'filter_task_to_archive', 'delete_archived_tasks')) assert rules.issuperset(expected_rules), expected_rules - rules diff --git a/swh/scheduler/tests/test_scheduler.py b/swh/scheduler/tests/test_scheduler.py index c553782..712bf5d 100644 --- a/swh/scheduler/tests/test_scheduler.py +++ b/swh/scheduler/tests/test_scheduler.py @@ -1,637 +1,637 @@ # Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import datetime import os import random import unittest import uuid from collections import defaultdict import psycopg2 from arrow import utcnow import pytest -from swh.core.tests.db_testing import SingleDbTestFixture +from swh.core.db.tests.db_testing import SingleDbTestFixture from swh.scheduler import get_scheduler from . import SQL_DIR TASK_TYPES = { 'git': { 'type': 'update-git', 'description': 'Update a git repository', 'backend_name': 'swh.loader.git.tasks.UpdateGitRepository', 'default_interval': datetime.timedelta(days=64), 'min_interval': datetime.timedelta(hours=12), 'max_interval': datetime.timedelta(days=64), 'backoff_factor': 2, 'max_queue_length': None, 'num_retries': 7, 'retry_delay': datetime.timedelta(hours=2), }, 'hg': { 'type': 'update-hg', 'description': 'Update a mercurial repository', 'backend_name': 'swh.loader.mercurial.tasks.UpdateHgRepository', 'default_interval': datetime.timedelta(days=64), 'min_interval': datetime.timedelta(hours=12), 'max_interval': datetime.timedelta(days=64), 'backoff_factor': 2, 'max_queue_length': None, 'num_retries': 7, 'retry_delay': datetime.timedelta(hours=2), }, } TEMPLATES = { 'git': { 'type': 'update-git', 'arguments': { 'args': [], 'kwargs': {}, }, 'next_run': None, }, 'hg': { 'type': 'update-hg', 'arguments': { 'args': [], 'kwargs': {}, }, 'next_run': None, 'policy': 'oneshot', } } def subdict(d, keys=None, excl=()): if keys is None: keys = [k for k in d.keys()] return {k: d[k] for k in keys if k not in excl} @pytest.mark.db class CommonSchedulerTest(SingleDbTestFixture): TEST_DB_NAME = 'softwareheritage-scheduler-test' TEST_DB_DUMP = os.path.join(SQL_DIR, '*.sql') def tearDown(self): self.empty_tables() super().tearDown() def empty_tables(self, whitelist=["priority_ratio"]): query = """SELECT table_name FROM information_schema.tables WHERE table_schema = %%s and table_name not in (%s) """ % ','.join(map(lambda t: "'%s'" % t, whitelist)) self.cursor.execute(query, ('public', )) tables = set(table for (table,) in self.cursor.fetchall()) for table in tables: self.cursor.execute('truncate table %s cascade' % table) self.conn.commit() def test_add_task_type(self): tt = TASK_TYPES['git'] self.backend.create_task_type(tt) self.assertEqual(tt, self.backend.get_task_type(tt['type'])) with self.assertRaisesRegex(psycopg2.IntegrityError, r'\(type\)=\(%s\)' % tt['type']): self.backend.create_task_type(tt) tt2 = TASK_TYPES['hg'] self.backend.create_task_type(tt2) self.assertEqual(tt, self.backend.get_task_type(tt['type'])) self.assertEqual(tt2, self.backend.get_task_type(tt2['type'])) def test_get_task_types(self): tt, tt2 = TASK_TYPES['git'], TASK_TYPES['hg'] self.backend.create_task_type(tt) self.backend.create_task_type(tt2) self.assertCountEqual([tt2, tt], self.backend.get_task_types()) @staticmethod def _task_from_template(template, next_run, priority, *args, **kwargs): ret = copy.deepcopy(template) ret['next_run'] = next_run if priority: ret['priority'] = priority if args: ret['arguments']['args'] = list(args) if kwargs: ret['arguments']['kwargs'] = kwargs return ret def _pop_priority(self, priorities): if not priorities: return None for priority, remains in priorities.items(): if remains > 0: priorities[priority] = remains - 1 return priority return None def _tasks_from_template(self, template, max_timestamp, num, num_priority=0, priorities=None): if num_priority and priorities: priorities = { priority: ratio * num_priority for priority, ratio in priorities.items() } tasks = [] for i in range(num + num_priority): priority = self._pop_priority(priorities) tasks.append(self._task_from_template( template, max_timestamp - datetime.timedelta(microseconds=i), priority, 'argument-%03d' % i, **{'kwarg%03d' % i: 'bogus-kwarg'} )) return tasks def _create_task_types(self): for tt in TASK_TYPES.values(): self.backend.create_task_type(tt) def test_create_tasks(self): priority_ratio = self._priority_ratio() self._create_task_types() num_tasks_priority = 100 tasks_1 = self._tasks_from_template( TEMPLATES['git'], utcnow(), 100) tasks_2 = self._tasks_from_template( TEMPLATES['hg'], utcnow(), 100, num_tasks_priority, priorities=priority_ratio) tasks = tasks_1 + tasks_2 # tasks are returned only once with their ids ret1 = self.backend.create_tasks(tasks + tasks_1 + tasks_2) set_ret1 = set([t['id'] for t in ret1]) # creating the same set result in the same ids ret = self.backend.create_tasks(tasks) set_ret = set([t['id'] for t in ret]) # Idempotence results self.assertEqual(set_ret, set_ret1) self.assertEqual(len(ret), len(ret1)) ids = set() actual_priorities = defaultdict(int) for task, orig_task in zip(ret, tasks): task = copy.deepcopy(task) task_type = TASK_TYPES[orig_task['type'].split('-')[-1]] self.assertNotIn(task['id'], ids) self.assertEqual(task['status'], 'next_run_not_scheduled') self.assertEqual(task['current_interval'], task_type['default_interval']) self.assertEqual(task['policy'], orig_task.get('policy', 'recurring')) priority = task.get('priority') if priority: actual_priorities[priority] += 1 self.assertEqual(task['retries_left'], task_type['num_retries'] or 0) ids.add(task['id']) del task['id'] del task['status'] del task['current_interval'] del task['retries_left'] if 'policy' not in orig_task: del task['policy'] if 'priority' not in orig_task: del task['priority'] self.assertEqual(task, orig_task) self.assertEqual(dict(actual_priorities), { priority: int(ratio * num_tasks_priority) for priority, ratio in priority_ratio.items() }) def test_peek_ready_tasks_no_priority(self): self._create_task_types() t = utcnow() task_type = TEMPLATES['git']['type'] tasks = self._tasks_from_template(TEMPLATES['git'], t, 100) random.shuffle(tasks) self.backend.create_tasks(tasks) ready_tasks = self.backend.peek_ready_tasks(task_type) self.assertEqual(len(ready_tasks), len(tasks)) for i in range(len(ready_tasks) - 1): self.assertLessEqual(ready_tasks[i]['next_run'], ready_tasks[i+1]['next_run']) # Only get the first few ready tasks limit = random.randrange(5, 5 + len(tasks)//2) ready_tasks_limited = self.backend.peek_ready_tasks( task_type, num_tasks=limit) self.assertEqual(len(ready_tasks_limited), limit) self.assertCountEqual(ready_tasks_limited, ready_tasks[:limit]) # Limit by timestamp max_ts = tasks[limit-1]['next_run'] ready_tasks_timestamped = self.backend.peek_ready_tasks( task_type, timestamp=max_ts) for ready_task in ready_tasks_timestamped: self.assertLessEqual(ready_task['next_run'], max_ts) # Make sure we get proper behavior for the first ready tasks self.assertCountEqual( ready_tasks[:len(ready_tasks_timestamped)], ready_tasks_timestamped, ) # Limit by both ready_tasks_both = self.backend.peek_ready_tasks( task_type, timestamp=max_ts, num_tasks=limit//3) self.assertLessEqual(len(ready_tasks_both), limit//3) for ready_task in ready_tasks_both: self.assertLessEqual(ready_task['next_run'], max_ts) self.assertIn(ready_task, ready_tasks[:limit//3]) def _priority_ratio(self): self.cursor.execute('select id, ratio from priority_ratio') priority_ratio = {} for row in self.cursor.fetchall(): priority_ratio[row[0]] = row[1] return priority_ratio def test_peek_ready_tasks_mixed_priorities(self): priority_ratio = self._priority_ratio() self._create_task_types() t = utcnow() task_type = TEMPLATES['git']['type'] num_tasks_priority = 100 num_tasks_no_priority = 100 # Create tasks with and without priorities tasks = self._tasks_from_template( TEMPLATES['git'], t, num=num_tasks_no_priority, num_priority=num_tasks_priority, priorities=priority_ratio) random.shuffle(tasks) self.backend.create_tasks(tasks) # take all available tasks ready_tasks = self.backend.peek_ready_tasks( task_type) self.assertEqual(len(ready_tasks), len(tasks)) self.assertEqual(num_tasks_priority + num_tasks_no_priority, len(ready_tasks)) count_tasks_per_priority = defaultdict(int) for task in ready_tasks: priority = task.get('priority') if priority: count_tasks_per_priority[priority] += 1 self.assertEqual(dict(count_tasks_per_priority), { priority: int(ratio * num_tasks_priority) for priority, ratio in priority_ratio.items() }) # Only get some ready tasks num_tasks = random.randrange(5, 5 + num_tasks_no_priority//2) num_tasks_priority = random.randrange(5, num_tasks_priority//2) ready_tasks_limited = self.backend.peek_ready_tasks( task_type, num_tasks=num_tasks, num_tasks_priority=num_tasks_priority) count_tasks_per_priority = defaultdict(int) for task in ready_tasks_limited: priority = task.get('priority') count_tasks_per_priority[priority] += 1 import math for priority, ratio in priority_ratio.items(): expected_count = math.ceil(ratio * num_tasks_priority) actual_prio = count_tasks_per_priority[priority] self.assertTrue( actual_prio == expected_count or actual_prio == expected_count + 1) self.assertEqual(count_tasks_per_priority[None], num_tasks) def test_grab_ready_tasks(self): priority_ratio = self._priority_ratio() self._create_task_types() t = utcnow() task_type = TEMPLATES['git']['type'] num_tasks_priority = 100 num_tasks_no_priority = 100 # Create tasks with and without priorities tasks = self._tasks_from_template( TEMPLATES['git'], t, num=num_tasks_no_priority, num_priority=num_tasks_priority, priorities=priority_ratio) random.shuffle(tasks) self.backend.create_tasks(tasks) first_ready_tasks = self.backend.peek_ready_tasks( task_type, num_tasks=10, num_tasks_priority=10) grabbed_tasks = self.backend.grab_ready_tasks( task_type, num_tasks=10, num_tasks_priority=10) for peeked, grabbed in zip(first_ready_tasks, grabbed_tasks): self.assertEqual(peeked['status'], 'next_run_not_scheduled') del peeked['status'] self.assertEqual(grabbed['status'], 'next_run_scheduled') del grabbed['status'] self.assertEqual(peeked, grabbed) self.assertEqual(peeked['priority'], grabbed['priority']) def test_get_tasks(self): self._create_task_types() t = utcnow() tasks = self._tasks_from_template(TEMPLATES['git'], t, 100) tasks = self.backend.create_tasks(tasks) random.shuffle(tasks) while len(tasks) > 1: length = random.randrange(1, len(tasks)) cur_tasks = tasks[:length] tasks[:length] = [] ret = self.backend.get_tasks(task['id'] for task in cur_tasks) self.assertCountEqual(ret, cur_tasks) def test_search_tasks(self): def make_real_dicts(l): """RealDictRow is not a real dict.""" return [dict(d.items()) for d in l] self._create_task_types() t = utcnow() tasks = self._tasks_from_template(TEMPLATES['git'], t, 100) tasks = self.backend.create_tasks(tasks) self.assertCountEqual( make_real_dicts(self.backend.search_tasks()), make_real_dicts(tasks)) def test_filter_task_to_archive(self): """Filtering only list disabled recurring or completed oneshot tasks """ self._create_task_types() _time = utcnow() recurring = self._tasks_from_template(TEMPLATES['git'], _time, 12) oneshots = self._tasks_from_template(TEMPLATES['hg'], _time, 12) total_tasks = len(recurring) + len(oneshots) # simulate scheduling tasks pending_tasks = self.backend.create_tasks(recurring + oneshots) backend_tasks = [{ 'task': task['id'], 'backend_id': str(uuid.uuid4()), 'scheduled': utcnow(), } for task in pending_tasks] self.backend.mass_schedule_task_runs(backend_tasks) # we simulate the task are being done _tasks = [] for task in backend_tasks: t = self.backend.end_task_run( task['backend_id'], status='eventful') _tasks.append(t) # Randomly update task's status per policy status_per_policy = {'recurring': 0, 'oneshot': 0} status_choice = { # policy: [tuple (1-for-filtering, 'associated-status')] 'recurring': [(1, 'disabled'), (0, 'completed'), (0, 'next_run_not_scheduled')], 'oneshot': [(0, 'next_run_not_scheduled'), (1, 'disabled'), (1, 'completed')] } tasks_to_update = defaultdict(list) _task_ids = defaultdict(list) # randomize 'disabling' recurring task or 'complete' oneshot task for task in pending_tasks: policy = task['policy'] _task_ids[policy].append(task['id']) status = random.choice(status_choice[policy]) if status[0] != 1: continue # elected for filtering status_per_policy[policy] += status[0] tasks_to_update[policy].append(task['id']) self.backend.disable_tasks(tasks_to_update['recurring']) # hack: change the status to something else than completed/disabled self.backend.set_status_tasks( _task_ids['oneshot'], status='next_run_not_scheduled') # complete the tasks to update self.backend.set_status_tasks( tasks_to_update['oneshot'], status='completed') total_tasks_filtered = (status_per_policy['recurring'] + status_per_policy['oneshot']) # retrieve tasks to archive after = _time.shift(days=-1).format('YYYY-MM-DD') before = utcnow().shift(days=1).format('YYYY-MM-DD') tasks_to_archive = list(self.backend.filter_task_to_archive( after_ts=after, before_ts=before, limit=total_tasks)) self.assertEqual(len(tasks_to_archive), total_tasks_filtered) actual_filtered_per_status = {'recurring': 0, 'oneshot': 0} for task in tasks_to_archive: actual_filtered_per_status[task['task_policy']] += 1 self.assertEqual(actual_filtered_per_status, status_per_policy) def test_delete_archived_tasks(self): self._create_task_types() _time = utcnow() recurring = self._tasks_from_template( TEMPLATES['git'], _time, 12) oneshots = self._tasks_from_template( TEMPLATES['hg'], _time, 12) total_tasks = len(recurring) + len(oneshots) pending_tasks = self.backend.create_tasks(recurring + oneshots) backend_tasks = [{ 'task': task['id'], 'backend_id': str(uuid.uuid4()), 'scheduled': utcnow(), } for task in pending_tasks] self.backend.mass_schedule_task_runs(backend_tasks) _tasks = [] percent = random.randint(0, 100) # random election removal boundary for task in backend_tasks: t = self.backend.end_task_run( task['backend_id'], status='eventful') c = random.randint(0, 100) if c <= percent: _tasks.append({'task_id': t['task'], 'task_run_id': t['id']}) self.backend.delete_archived_tasks(_tasks) self.cursor.execute('select count(*) from task') tasks_count = self.cursor.fetchone() self.cursor.execute('select count(*) from task_run') tasks_run_count = self.cursor.fetchone() self.assertEqual(tasks_count[0], total_tasks - len(_tasks)) self.assertEqual(tasks_run_count[0], total_tasks - len(_tasks)) def test_get_task_runs_no_task(self): '''No task exist in the scheduler's db, get_task_runs() should always return an empty list. ''' self.assertFalse(self.backend.get_task_runs(task_ids=())) self.assertFalse(self.backend.get_task_runs(task_ids=(1, 2, 3))) self.assertFalse(self.backend.get_task_runs(task_ids=(1, 2, 3), limit=10)) def test_get_task_runs_no_task_executed(self): '''No task has been executed yet, get_task_runs() should always return an empty list. ''' self._create_task_types() _time = utcnow() recurring = self._tasks_from_template( TEMPLATES['git'], _time, 12) oneshots = self._tasks_from_template( TEMPLATES['hg'], _time, 12) self.backend.create_tasks(recurring + oneshots) self.assertFalse(self.backend.get_task_runs( task_ids=())) self.assertFalse(self.backend.get_task_runs( task_ids=(1, 2, 3))) self.assertFalse(self.backend.get_task_runs( task_ids=(1, 2, 3), limit=10)) def test_get_task_runs_with_scheduled(self): '''Some tasks have been scheduled but not executed yet, get_task_runs() should not return an empty list. limit should behave as expected. ''' self._create_task_types() _time = utcnow() recurring = self._tasks_from_template( TEMPLATES['git'], _time, 12) oneshots = self._tasks_from_template( TEMPLATES['hg'], _time, 12) total_tasks = len(recurring) + len(oneshots) pending_tasks = self.backend.create_tasks(recurring + oneshots) backend_tasks = [{ 'task': task['id'], 'backend_id': str(uuid.uuid4()), 'scheduled': utcnow(), } for task in pending_tasks] self.backend.mass_schedule_task_runs(backend_tasks) self.assertFalse(self.backend.get_task_runs( task_ids=[total_tasks + 1])) btask = backend_tasks[0] runs = self.backend.get_task_runs( task_ids=[btask['task']]) self.assertEqual(len(runs), 1) run = runs[0] self.assertEqual(subdict(run, excl=('id',)), {'task': btask['task'], 'backend_id': btask['backend_id'], 'scheduled': btask['scheduled'], 'started': None, 'ended': None, 'metadata': None, 'status': 'scheduled', }) runs = self.backend.get_task_runs( task_ids=[bt['task'] for bt in backend_tasks], limit=2) self.assertEqual(len(runs), 2) runs = self.backend.get_task_runs( task_ids=[bt['task'] for bt in backend_tasks]) self.assertEqual(len(runs), total_tasks) keys = ('task', 'backend_id', 'scheduled') self.assertEqual(sorted([subdict(x, keys) for x in runs], key=lambda x: x['task']), backend_tasks) def test_get_task_runs_with_executed(self): '''Some tasks have been executed, get_task_runs() should not return an empty list. limit should behave as expected. ''' self._create_task_types() _time = utcnow() recurring = self._tasks_from_template( TEMPLATES['git'], _time, 12) oneshots = self._tasks_from_template( TEMPLATES['hg'], _time, 12) pending_tasks = self.backend.create_tasks(recurring + oneshots) backend_tasks = [{ 'task': task['id'], 'backend_id': str(uuid.uuid4()), 'scheduled': utcnow(), } for task in pending_tasks] self.backend.mass_schedule_task_runs(backend_tasks) btask = backend_tasks[0] ts = utcnow() self.backend.start_task_run(btask['backend_id'], metadata={'something': 'stupid'}, timestamp=ts) runs = self.backend.get_task_runs(task_ids=[btask['task']]) self.assertEqual(len(runs), 1) self.assertEqual(subdict(runs[0], excl=('id')), { 'task': btask['task'], 'backend_id': btask['backend_id'], 'scheduled': btask['scheduled'], 'started': ts, 'ended': None, 'metadata': {'something': 'stupid'}, 'status': 'started', }) ts2 = utcnow() self.backend.end_task_run(btask['backend_id'], metadata={'other': 'stuff'}, timestamp=ts2, status='eventful') runs = self.backend.get_task_runs(task_ids=[btask['task']]) self.assertEqual(len(runs), 1) self.assertEqual(subdict(runs[0], excl=('id')), { 'task': btask['task'], 'backend_id': btask['backend_id'], 'scheduled': btask['scheduled'], 'started': ts, 'ended': ts2, 'metadata': {'something': 'stupid', 'other': 'stuff'}, 'status': 'eventful', }) class LocalSchedulerTest(CommonSchedulerTest, unittest.TestCase): def setUp(self): super().setUp() self.config = {'db': 'dbname=' + self.TEST_DB_NAME} self.backend = get_scheduler('local', self.config) diff --git a/swh/scheduler/tests/updater/test_writer.py b/swh/scheduler/tests/updater/test_writer.py index 77dee54..2e084e9 100644 --- a/swh/scheduler/tests/updater/test_writer.py +++ b/swh/scheduler/tests/updater/test_writer.py @@ -1,158 +1,153 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import unittest from glob import glob import pytest from swh.core.utils import numfile_sortkey as sortkey -from swh.core.tests.db_testing import DbTestFixture +from swh.core.db.tests.db_testing import DbTestFixture from swh.scheduler.tests import SQL_DIR from swh.scheduler.updater.events import LISTENED_EVENTS, SWHEvent from swh.scheduler.updater.writer import UpdaterWriter from . import UpdaterTestUtil @pytest.mark.db class CommonSchedulerTest(DbTestFixture): TEST_SCHED_DB = 'softwareheritage-scheduler-test' TEST_SCHED_DUMP = os.path.join(SQL_DIR, '*.sql') TEST_SCHED_UPDATER_DB = 'softwareheritage-scheduler-updater-test' TEST_SCHED_UPDATER_DUMP = os.path.join(SQL_DIR, 'updater', '*.sql') @classmethod def setUpClass(cls): cls.add_db(cls.TEST_SCHED_DB, [(sqlfn, 'psql') for sqlfn in sorted(glob(cls.TEST_SCHED_DUMP), key=sortkey)]) cls.add_db(cls.TEST_SCHED_UPDATER_DB, [(sqlfn, 'psql') for sqlfn in sorted(glob(cls.TEST_SCHED_UPDATER_DUMP), key=sortkey)]) super().setUpClass() def tearDown(self): self.reset_db_tables(self.TEST_SCHED_UPDATER_DB) self.reset_db_tables(self.TEST_SCHED_DB, excluded=['task_type', 'priority_ratio']) super().tearDown() class UpdaterWriterTest(UpdaterTestUtil, CommonSchedulerTest, unittest.TestCase): def setUp(self): super().setUp() config = { 'scheduler': { 'cls': 'local', 'args': { 'db': 'dbname=softwareheritage-scheduler-test', }, }, 'scheduler_updater': { 'cls': 'local', 'args': { 'db': 'dbname=softwareheritage-scheduler-updater-test', 'cache_read_limit': 5, }, }, 'updater_writer': { 'pause': 0.1, 'verbose': False, }, } self.writer = UpdaterWriter(**config) self.scheduler_backend = self.writer.scheduler_backend self.scheduler_updater_backend = self.writer.scheduler_updater_backend def test_run_ko(self): """Only git tasks are supported for now, other types are dismissed. """ ready_events = [ SWHEvent( self._make_simple_event(event_type, 'origin-%s' % i, 'svn')) for i, event_type in enumerate(LISTENED_EVENTS) ] expected_length = len(ready_events) self.scheduler_updater_backend.cache_put(ready_events) data = list(self.scheduler_updater_backend.cache_read()) self.assertEqual(len(data), expected_length) - r = self.scheduler_backend.peek_ready_tasks( - 'origin-update-git') + r = self.scheduler_backend.peek_ready_tasks('load-git') # first read on an empty scheduling db results with nothing in it self.assertEqual(len(r), 0) # Read from cache to scheduler db self.writer.run() - r = self.scheduler_backend.peek_ready_tasks( - 'origin-update-git') + r = self.scheduler_backend.peek_ready_tasks('load-git') # other reads after writes are still empty since it's not supported self.assertEqual(len(r), 0) def test_run_ok(self): """Only git origin are supported for now """ ready_events = [ SWHEvent( self._make_simple_event(event_type, 'origin-%s' % i, 'git')) for i, event_type in enumerate(LISTENED_EVENTS) ] expected_length = len(ready_events) self.scheduler_updater_backend.cache_put(ready_events) data = list(self.scheduler_updater_backend.cache_read()) self.assertEqual(len(data), expected_length) - r = self.scheduler_backend.peek_ready_tasks( - 'origin-update-git') + r = self.scheduler_backend.peek_ready_tasks('load-git') # first read on an empty scheduling db results with nothing in it self.assertEqual(len(r), 0) # Read from cache to scheduler db self.writer.run() # now, we should have scheduling task ready - r = self.scheduler_backend.peek_ready_tasks( - 'origin-update-git') + r = self.scheduler_backend.peek_ready_tasks('load-git') self.assertEqual(len(r), expected_length) # Check the task has been scheduled for t in r: - self.assertEqual(t['type'], 'origin-update-git') + self.assertEqual(t['type'], 'load-git') self.assertEqual(t['priority'], 'normal') self.assertEqual(t['policy'], 'oneshot') self.assertEqual(t['status'], 'next_run_not_scheduled') # writer has nothing to do now self.writer.run() # so no more data in cache data = list(self.scheduler_updater_backend.cache_read()) self.assertEqual(len(data), 0) # provided, no runner is ran, still the same amount of scheduling tasks - r = self.scheduler_backend.peek_ready_tasks( - 'origin-update-git') + r = self.scheduler_backend.peek_ready_tasks('load-git') self.assertEqual(len(r), expected_length) diff --git a/swh/scheduler/updater/writer.py b/swh/scheduler/updater/writer.py index 99bab3a..cec13b5 100644 --- a/swh/scheduler/updater/writer.py +++ b/swh/scheduler/updater/writer.py @@ -1,96 +1,96 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import click import logging import time from arrow import utcnow from swh.core import utils from swh.scheduler import get_scheduler from swh.scheduler.utils import create_oneshot_task_dict from swh.scheduler.updater.backend import SchedulerUpdaterBackend class UpdaterWriter: """Updater writer in charge of updating the scheduler db with latest prioritized oneshot tasks In effect, this: - reads the events from scheduler updater's db - converts those events into priority oneshot tasks - dumps them into the scheduler db """ def __init__(self, **config): self.config = config if self.config['scheduler_updater']['cls'] != 'local': raise ValueError( 'The scheduler_updater can only be a cls=local for now') self.scheduler_updater_backend = SchedulerUpdaterBackend( **self.config['scheduler_updater']['args']) self.scheduler_backend = get_scheduler(**self.config['scheduler']) self.pause = self.config.get('updater_writer', {}).get('pause', 10) self.log = logging.getLogger( 'swh.scheduler.updater.writer.UpdaterWriter') def convert_to_oneshot_task(self, event): """Given an event, convert it into oneshot task with priority Args: event (dict): The event to convert to task """ if event['origin_type'] == 'git': return create_oneshot_task_dict( - 'origin-update-git', + 'load-git', event['url'], priority='normal') self.log.warning('Type %s is not supported for now, only git' % ( event['origin_type'], )) return None def write_event_to_scheduler(self, events): """Write events to the scheduler and yield ids when done""" # convert events to oneshot tasks oneshot_tasks = filter(lambda e: e is not None, map(self.convert_to_oneshot_task, events)) # write event to scheduler self.scheduler_backend.create_tasks(list(oneshot_tasks)) for e in events: yield e['url'] def run(self): """First retrieve events from cache (including origin_type, cnt), then convert them into oneshot tasks with priority, then write them to the scheduler db, at last remove them from cache. """ while True: timestamp = utcnow() events = list(self.scheduler_updater_backend.cache_read(timestamp)) if not events: break for urls in utils.grouper(self.write_event_to_scheduler(events), n=100): self.scheduler_updater_backend.cache_remove(urls) time.sleep(self.pause) @click.command() @click.option('--verbose/--no-verbose', '-v', default=False, help='Verbose mode') @click.pass_context def main(ctx, verbose): click.echo("Deprecated! Use 'swh-scheduler updater' instead.", err=True) ctx.exit(1) if __name__ == '__main__': main() diff --git a/swh/scheduler/utils.py b/swh/scheduler/utils.py index 07c4c7b..6f61cef 100644 --- a/swh/scheduler/utils.py +++ b/swh/scheduler/utils.py @@ -1,75 +1,75 @@ # Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from datetime import datetime, timezone def get_task(task_name): """Retrieve task object in our application instance by its fully qualified python name. Args: task_name (str): task's name (e.g swh.loader.git.tasks.LoadDiskGitRepository) Returns: Instance of task """ from swh.scheduler.celery_backend.config import app for module in app.conf.CELERY_IMPORTS: __import__(module) return app.tasks[task_name] def create_task_dict(type, policy, *args, **kwargs): """Create a task with type and policy, scheduled for as soon as possible. Args: type (str): Type of oneshot task as per swh-scheduler's db - table task_type's column (Ex: origin-update-git, - swh-deposit-archive-checks) + table task_type's column (Ex: load-git, + check-deposit) policy (str): oneshot or recurring policy Returns: Expected dictionary for the one-shot task scheduling api (swh.scheduler.backend.create_tasks) """ task_extra = {} for extra_key in ['priority', 'retries_left']: if extra_key in kwargs: extra_val = kwargs.pop(extra_key) task_extra[extra_key] = extra_val task = { 'policy': policy, 'type': type, 'next_run': datetime.now(tz=timezone.utc), 'arguments': { 'args': args if args else [], 'kwargs': kwargs if kwargs else {}, }, } task.update(task_extra) return task def create_oneshot_task_dict(type, *args, **kwargs): """Create a oneshot task scheduled for as soon as possible. Args: type (str): Type of oneshot task as per swh-scheduler's db - table task_type's column (Ex: origin-update-git, - swh-deposit-archive-checks) + table task_type's column (Ex: load-git, + check-deposit) Returns: Expected dictionary for the one-shot task scheduling api (swh.scheduler.backend.create_tasks) """ return create_task_dict(type, 'oneshot', *args, **kwargs) diff --git a/version.txt b/version.txt index 7007932..96f7eb4 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.56-0-gac1b8db \ No newline at end of file +v0.0.57-0-g09e7245 \ No newline at end of file