diff --git a/PKG-INFO b/PKG-INFO index 8dd20c8..a090d2b 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,10 +1,10 @@ Metadata-Version: 1.0 Name: swh.scheduler -Version: 0.0.16 +Version: 0.0.17 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN diff --git a/debian/control b/debian/control index 83ffa94..fb265f2 100644 --- a/debian/control +++ b/debian/control @@ -1,22 +1,22 @@ Source: swh-scheduler Maintainer: Software Heritage developers Section: python Priority: optional Build-Depends: debhelper (>= 9), - dh-python, + dh-python (>= 2), python3-all, python3-arrow, python3-celery, python3-click, python3-nose, python3-psycopg2, python3-setuptools, python3-swh.core (>= 0.0.34), python3-vcversioner Standards-Version: 3.9.6 Homepage: https://forge.softwareheritage.org/diffusion/DSCH/ Package: python3-swh.scheduler Architecture: all Depends: python3-swh.core (>= 0.0.34), ${misc:Depends}, ${python3:Depends} Description: Software Heritage Scheduler diff --git a/debian/rules b/debian/rules index 4f952c9..4fe0cf9 100755 --- a/debian/rules +++ b/debian/rules @@ -1,12 +1,11 @@ #!/usr/bin/make -f -export PYBUILD_NAME=swh-scheduler +export PYBUILD_NAME=swh.scheduler +export PYBUILD_TEST_ARGS=--with-doctest -sv -a !db,!fs %: dh $@ --with python3 --buildsystem=pybuild - -override_dh_auto_test: - PYBUILD_SYSTEM=custom \ - PYBUILD_TEST_ARGS="python{version} -m nose -sv swh.scheduler -a '!db'" \ - dh_auto_test +override_dh_install: + dh_install + rm -v $(CURDIR)/debian/python3-*/usr/lib/python*/dist-packages/swh/__init__.py diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 0000000..58a761e --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,3 @@ +_build/ +apidoc/ +*-stamp diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..c30c50a --- /dev/null +++ b/docs/Makefile @@ -0,0 +1 @@ +include ../../swh-docs/Makefile.sphinx diff --git a/docs/_static/.placeholder b/docs/_static/.placeholder new file mode 100644 index 0000000..e69de29 diff --git a/docs/_templates/.placeholder b/docs/_templates/.placeholder new file mode 100644 index 0000000..e69de29 diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..190deb7 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1 @@ +from swh.docs.sphinx.conf import * # NoQA diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..8b64117 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,15 @@ +Software Heritage - Development Documentation +============================================= + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/setup.py b/setup.py index 2f383a4..72825ac 100644 --- a/setup.py +++ b/setup.py @@ -1,34 +1,32 @@ -from setuptools import setup +from setuptools import setup, find_packages def parse_requirements(): requirements = [] for reqf in ('requirements.txt', 'requirements-swh.txt'): with open(reqf) as f: for line in f.readlines(): line = line.strip() if not line or line.startswith('#'): continue requirements.append(line) return requirements setup( name='swh.scheduler', description='Software Heritage Scheduler', author='Software Heritage developers', author_email='swh-devel@inria.fr', url='https://forge.softwareheritage.org/diffusion/DSCH/', - packages=[ - 'swh.scheduler', 'swh.scheduler.celery_backend', 'swh.scheduler.tests' - ], - scripts=['bin/swh-worker-control'], # scripts to package + packages=find_packages(), + scripts=['bin/swh-worker-control'], install_requires=parse_requirements(), entry_points=''' [console_scripts] swh-scheduler=swh.scheduler.cli:cli ''', setup_requires=['vcversioner'], vcversioner={}, include_package_data=True, ) diff --git a/sql/swh-scheduler-schema.sql b/sql/swh-scheduler-schema.sql index 6b1d403..89c85f3 100644 --- a/sql/swh-scheduler-schema.sql +++ b/sql/swh-scheduler-schema.sql @@ -1,300 +1,301 @@ create table dbversion ( version int primary key, release timestamptz not null, description text not null ); comment on table dbversion is 'Schema update tracking'; insert into dbversion (version, release, description) - values (5, now(), 'Work In Progress'); + values (6, now(), 'Work In Progress'); create table task_type ( type text primary key, description text not null, backend_name text not null, - default_interval interval not null, - min_interval interval not null, - max_interval interval not null, - backoff_factor float not null, + default_interval interval, + min_interval interval, + max_interval interval, + backoff_factor float, max_queue_length bigint, num_retries bigint, retry_delay interval ); comment on table task_type is 'Types of schedulable tasks'; comment on column task_type.type is 'Short identifier for the task type'; comment on column task_type.description is 'Human-readable task description'; comment on column task_type.backend_name is 'Name of the task in the job-running backend'; comment on column task_type.default_interval is 'Default interval for newly scheduled tasks'; comment on column task_type.min_interval is 'Minimum interval between two runs of a task'; comment on column task_type.max_interval is 'Maximum interval between two runs of a task'; comment on column task_type.backoff_factor is 'Adjustment factor for the backoff between two task runs'; comment on column task_type.max_queue_length is 'Maximum length of the queue for this type of tasks'; comment on column task_type.num_retries is 'Default number of retries on transient failures'; comment on column task_type.retry_delay is 'Retry delay for the task'; create type task_status as enum ('next_run_not_scheduled', 'next_run_scheduled', 'completed', 'disabled'); comment on type task_status is 'Status of a given task'; create type task_policy as enum ('recurring', 'oneshot'); comment on type task_policy is 'Recurrence policy of the given task'; create table task ( id bigserial primary key, type text not null references task_type(type), arguments jsonb not null, next_run timestamptz not null, - current_interval interval not null, + current_interval interval, status task_status not null, policy task_policy not null default 'recurring', - retries_left bigint not null default 0 + retries_left bigint not null default 0, + check (policy <> 'recurring' or current_interval is not null) ); comment on table task is 'Schedule of recurring tasks'; comment on column task.arguments is 'Arguments passed to the underlying job scheduler. ' 'Contains two keys, ''args'' (list) and ''kwargs'' (object).'; comment on column task.next_run is 'The next run of this task should be run on or after that time'; comment on column task.current_interval is 'The interval between two runs of this task, ' 'taking into account the backoff factor'; comment on column task.policy is 'Whether the task is one-shot or recurring'; comment on column task.retries_left is 'The number of "short delay" retries of the task in case of ' 'transient failure'; create index on task(type); create index on task(next_run); create index task_args on task using btree ((arguments -> 'args')); create index task_kwargs on task using gin ((arguments -> 'kwargs')); -create type task_run_status as enum ('scheduled', 'started', 'eventful', 'uneventful', 'failed', 'lost'); +create type task_run_status as enum ('scheduled', 'started', 'eventful', 'uneventful', 'failed', 'permfailed', 'lost'); comment on type task_run_status is 'Status of a given task run'; create table task_run ( id bigserial primary key, task bigint not null references task(id), backend_id text, scheduled timestamptz, started timestamptz, ended timestamptz, metadata jsonb, status task_run_status not null default 'scheduled' ); comment on table task_run is 'History of task runs sent to the job-running backend'; comment on column task_run.backend_id is 'id of the task run in the job-running backend'; comment on column task_run.metadata is 'Useful metadata for the given task run. ' 'For instance, the worker that took on the job, ' 'or the logs for the run.'; create index on task_run(task); create index on task_run(backend_id); create or replace function swh_scheduler_mktemp_task () returns void language sql as $$ create temporary table tmp_task ( like task excluding indexes ) on commit drop; alter table tmp_task drop column id, drop column current_interval, drop column status, alter column policy drop not null, alter column retries_left drop not null; $$; comment on function swh_scheduler_mktemp_task () is 'Create a temporary table for bulk task creation'; create or replace function swh_scheduler_create_tasks_from_temp () returns setof task language plpgsql as $$ begin return query insert into task (type, arguments, next_run, status, current_interval, policy, retries_left) select type, arguments, next_run, 'next_run_not_scheduled', (select default_interval from task_type tt where tt.type = tmp_task.type), coalesce(policy, 'recurring'), coalesce(retries_left, (select num_retries from task_type tt where tt.type = tmp_task.type), 0) from tmp_task returning task.*; end; $$; comment on function swh_scheduler_create_tasks_from_temp () is 'Create tasks in bulk from the temporary table'; create or replace function swh_scheduler_peek_ready_tasks (task_type text, ts timestamptz default now(), num_tasks bigint default NULL) returns setof task language sql stable as $$ select * from task where next_run <= ts and type = task_type and status = 'next_run_not_scheduled' order by next_run limit num_tasks; $$; create or replace function swh_scheduler_grab_ready_tasks (task_type text, ts timestamptz default now(), num_tasks bigint default NULL) returns setof task language sql as $$ update task set status='next_run_scheduled' from ( select id from task where next_run <= ts and type = task_type and status='next_run_not_scheduled' order by next_run limit num_tasks for update skip locked ) next_tasks where task.id = next_tasks.id returning task.*; $$; create or replace function swh_scheduler_schedule_task_run (task_id bigint, backend_id text, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ insert into task_run (task, backend_id, metadata, scheduled, status) values (task_id, backend_id, metadata, ts, 'scheduled') returning *; $$; create or replace function swh_scheduler_mktemp_task_run () returns void language sql as $$ create temporary table tmp_task_run ( like task_run excluding indexes ) on commit drop; alter table tmp_task_run drop column id, drop column status; $$; comment on function swh_scheduler_mktemp_task_run () is 'Create a temporary table for bulk task run scheduling'; create or replace function swh_scheduler_schedule_task_run_from_temp () returns void language plpgsql as $$ begin insert into task_run (task, backend_id, metadata, scheduled, status) select task, backend_id, metadata, scheduled, 'scheduled' from tmp_task_run; return; end; $$; create or replace function swh_scheduler_start_task_run (backend_id text, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ update task_run set started = ts, status = 'started', metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_start_task_run.metadata where task_run.backend_id = swh_scheduler_start_task_run.backend_id returning *; $$; create or replace function swh_scheduler_end_task_run (backend_id text, status task_run_status, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ update task_run set ended = ts, status = swh_scheduler_end_task_run.status, metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_end_task_run.metadata where task_run.backend_id = swh_scheduler_end_task_run.backend_id returning *; $$; create or replace function swh_scheduler_update_task_on_task_end () returns trigger language plpgsql as $$ declare cur_task task%rowtype; cur_task_type task_type%rowtype; adjustment_factor float; new_interval interval; begin select * from task where id = new.task into cur_task; select * from task_type where type = cur_task.type into cur_task_type; case when new.status = 'permfailed' then update task set status = 'disabled' where id = cur_task.id; when new.status in ('eventful', 'uneventful') then case when cur_task.policy = 'oneshot' then update task set status = 'completed' where id = cur_task.id; when cur_task.policy = 'recurring' then if new.status = 'uneventful' then adjustment_factor := 1/cur_task_type.backoff_factor; else adjustment_factor := 1/cur_task_type.backoff_factor; end if; new_interval := greatest( cur_task_type.min_interval, least( cur_task_type.max_interval, adjustment_factor * cur_task.current_interval)); update task set status = 'next_run_not_scheduled', next_run = now() + new_interval, - interval = new_interval, - retries_left = cur_task_type.max_retries + current_interval = new_interval, + retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; else -- new.status in 'failed', 'lost' - if coalesce(cur_task.retries_left, 0) > 0 then + if cur_task.retries_left > 0 then update task set status = 'next_run_not_scheduled', next_run = now() + cur_task_type.retry_delay, retries_left = cur_task.retries_left - 1 where id = cur_task.id; else -- no retries left case when cur_task.policy = 'oneshot' then update task set status = 'disabled' where id = cur_task.id; when cur_task.policy = 'recurring' then update task set status = 'next_run_not_scheduled', next_run = now() + cur_task.current_interval, - retries_left = cur_task_type.max_retries + retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; end if; -- retries end case; return null; end; $$; create trigger update_task_on_task_end after update of status on task_run for each row when (new.status NOT IN ('scheduled', 'started')) execute procedure swh_scheduler_update_task_on_task_end (); diff --git a/sql/updates/05.sql b/sql/updates/05.sql index ee40ebb..b55a533 100644 --- a/sql/updates/05.sql +++ b/sql/updates/05.sql @@ -1,143 +1,144 @@ -- SWH Scheduler Schema upgrade -- from_version: 04 -- to_version: 05 -- description: Add reccurrence logic for temporary failures and one-shot tasks +alter type task_status add value if not exists 'completed' before 'disabled'; +alter type task_run_status add value if not exists 'permfailed' after 'failed'; + begin; insert into dbversion (version, release, description) values (5, now(), 'Work In Progress'); alter table task_type add column num_retries bigint; alter table task_type add column retry_delay interval; comment on column task_type.num_retries is 'Default number of retries on transient failures'; comment on column task_type.retry_delay is 'Retry delay for the task'; -alter type task_status add value if not exists 'completed' before 'disabled'; - create type task_policy as enum ('recurring', 'oneshot'); comment on type task_policy is 'Recurrence policy of the given task'; alter table task add column policy task_policy not null default 'recurring'; alter table task add column retries_left bigint not null default 0; comment on column task.policy is 'Whether the task is one-shot or recurring'; comment on column task.retries_left is 'The number of "short delay" retries of the task in case of ' 'transient failure'; create or replace function swh_scheduler_mktemp_task () returns void language sql as $$ create temporary table tmp_task ( like task excluding indexes ) on commit drop; alter table tmp_task drop column id, drop column current_interval, drop column status, alter column policy drop not null, alter column retries_left drop not null; $$; comment on function swh_scheduler_mktemp_task () is 'Create a temporary table for bulk task creation'; create or replace function swh_scheduler_create_tasks_from_temp () returns setof task language plpgsql as $$ begin return query insert into task (type, arguments, next_run, status, current_interval, policy, retries_left) select type, arguments, next_run, 'next_run_not_scheduled', (select default_interval from task_type tt where tt.type = tmp_task.type), coalesce(policy, 'recurring'), coalesce(retries_left, (select num_retries from task_type tt where tt.type = tmp_task.type), 0) from tmp_task returning task.*; end; $$; comment on function swh_scheduler_create_tasks_from_temp () is 'Create tasks in bulk from the temporary table'; -drop trigger update_interval_on_task_end; -drop function swh_scheduler_compute_new_task_interval (task_type, current_interval, end_status) cascade; +drop trigger update_interval_on_task_end on task_run; +drop function swh_scheduler_compute_new_task_interval (text, interval, task_run_status) cascade; drop function swh_scheduler_update_task_interval () cascade; create or replace function swh_scheduler_update_task_on_task_end () returns trigger language plpgsql as $$ declare cur_task task%rowtype; cur_task_type task_type%rowtype; adjustment_factor float; new_interval interval; begin select * from task where id = new.task into cur_task; select * from task_type where type = cur_task.type into cur_task_type; case when new.status = 'permfailed' then update task set status = 'disabled' where id = cur_task.id; when new.status in ('eventful', 'uneventful') then case when cur_task.policy = 'oneshot' then update task set status = 'completed' where id = cur_task.id; when cur_task.policy = 'recurring' then if new.status = 'uneventful' then adjustment_factor := 1/cur_task_type.backoff_factor; else adjustment_factor := 1/cur_task_type.backoff_factor; end if; new_interval := greatest( cur_task_type.min_interval, least( cur_task_type.max_interval, adjustment_factor * cur_task.current_interval)); update task set status = 'next_run_not_scheduled', next_run = now() + new_interval, - interval = new_interval, - retries_left = cur_task_type.max_retries + current_interval = new_interval, + retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; else -- new.status in 'failed', 'lost' - if coalesce(cur_task.retries_left, 0) > 0 then + if cur_task.retries_left > 0 then update task set status = 'next_run_not_scheduled', next_run = now() + cur_task_type.retry_delay, retries_left = cur_task.retries_left - 1 where id = cur_task.id; else -- no retries left case when cur_task.policy = 'oneshot' then update task set status = 'disabled' where id = cur_task.id; when cur_task.policy = 'recurring' then update task set status = 'next_run_not_scheduled', next_run = now() + cur_task.current_interval, - retries_left = cur_task_type.max_retries + retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; end if; -- retries end case; return null; end; $$; create trigger update_task_on_task_end after update of status on task_run for each row when (new.status NOT IN ('scheduled', 'started')) execute procedure swh_scheduler_update_task_on_task_end (); commit; diff --git a/sql/updates/06.sql b/sql/updates/06.sql new file mode 100644 index 0000000..8aa1d9f --- /dev/null +++ b/sql/updates/06.sql @@ -0,0 +1,23 @@ +-- SWH Scheduler Schema upgrade +-- from_version: 05 +-- to_version: 06 +-- description: relax constraints on intervals for one-shot tasks + +begin; + +insert into dbversion (version, release, description) + values (6, now(), 'Work In Progress'); + + +alter table task_type + alter column default_interval drop not null, + alter column min_interval drop not null, + alter column max_interval drop not null, + alter column backoff_factor drop not null; + +alter table task + alter column current_interval drop not null, + add constraint task_check check (policy <> 'recurring' or current_interval is not null); + +commit; + diff --git a/swh.scheduler.egg-info/PKG-INFO b/swh.scheduler.egg-info/PKG-INFO index 8dd20c8..a090d2b 100644 --- a/swh.scheduler.egg-info/PKG-INFO +++ b/swh.scheduler.egg-info/PKG-INFO @@ -1,10 +1,10 @@ Metadata-Version: 1.0 Name: swh.scheduler -Version: 0.0.16 +Version: 0.0.17 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN diff --git a/swh.scheduler.egg-info/SOURCES.txt b/swh.scheduler.egg-info/SOURCES.txt index c69088f..4359080 100644 --- a/swh.scheduler.egg-info/SOURCES.txt +++ b/swh.scheduler.egg-info/SOURCES.txt @@ -1,41 +1,51 @@ .gitignore AUTHORS LICENSE LICENSE.Celery MANIFEST.in Makefile requirements-swh.txt requirements.txt setup.py version.txt bin/swh-worker-control debian/changelog debian/compat debian/control debian/copyright debian/rules debian/source/format +docs/.gitignore +docs/Makefile +docs/conf.py +docs/index.rst +docs/_static/.placeholder +docs/_templates/.placeholder sql/.gitignore sql/Makefile sql/swh-scheduler-schema.sql sql/swh-scheduler-testdata.sql sql/updates/02.sql sql/updates/03.sql sql/updates/04.sql sql/updates/05.sql +sql/updates/06.sql +swh/__init__.py swh.scheduler.egg-info/PKG-INFO swh.scheduler.egg-info/SOURCES.txt swh.scheduler.egg-info/dependency_links.txt swh.scheduler.egg-info/entry_points.txt swh.scheduler.egg-info/requires.txt swh.scheduler.egg-info/top_level.txt +swh/scheduler/__init__.py swh/scheduler/backend.py swh/scheduler/cli.py swh/scheduler/task.py swh/scheduler/utils.py swh/scheduler/celery_backend/__init__.py swh/scheduler/celery_backend/config.py swh/scheduler/celery_backend/listener.py swh/scheduler/celery_backend/runner.py +swh/scheduler/tests/__init__.py swh/scheduler/tests/test_scheduler.py swh/scheduler/tests/test_task.py \ No newline at end of file diff --git a/swh/__init__.py b/swh/__init__.py new file mode 100644 index 0000000..69e3be5 --- /dev/null +++ b/swh/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/swh/scheduler/__init__.py b/swh/scheduler/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/swh/scheduler/backend.py b/swh/scheduler/backend.py index df13e58..7b6914c 100644 --- a/swh/scheduler/backend.py +++ b/swh/scheduler/backend.py @@ -1,413 +1,435 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime from functools import wraps import json import tempfile from arrow import Arrow, utcnow import psycopg2 import psycopg2.extras from psycopg2.extensions import AsIs from swh.core.config import SWHConfig def adapt_arrow(arrow): return AsIs("'%s'::timestamptz" % arrow.isoformat()) psycopg2.extensions.register_adapter(dict, psycopg2.extras.Json) psycopg2.extensions.register_adapter(Arrow, adapt_arrow) def autocommit(fn): @wraps(fn) def wrapped(self, *args, **kwargs): autocommit = False if 'cursor' not in kwargs or not kwargs['cursor']: autocommit = True kwargs['cursor'] = self.cursor() try: ret = fn(self, *args, **kwargs) except: if autocommit: self.rollback() raise if autocommit: self.commit() return ret return wrapped class SchedulerBackend(SWHConfig): """ Backend for the Software Heritage scheduling database. """ CONFIG_BASE_FILENAME = 'scheduler.ini' DEFAULT_CONFIG = { 'scheduling_db': ('str', 'dbname=swh-scheduler'), } def __init__(self, **override_config): self.config = self.parse_config_file(global_config=False) self.config.update(override_config) self.db = None self.reconnect() def reconnect(self): if not self.db or self.db.closed: self.db = psycopg2.connect( dsn=self.config['scheduling_db'], cursor_factory=psycopg2.extras.RealDictCursor, ) def cursor(self): """Return a fresh cursor on the database, with auto-reconnection in case of failure""" cur = None # Get a fresh cursor and reconnect at most three times tries = 0 while True: tries += 1 try: cur = self.db.cursor() cur.execute('select 1') break except psycopg2.OperationalError: if tries < 3: self.reconnect() else: raise return cur def commit(self): """Commit a transaction""" self.db.commit() def rollback(self): """Rollback a transaction""" self.db.rollback() def copy_to(self, items, tblname, columns, cursor=None, item_cb=None): def escape(data): if data is None: return '' if isinstance(data, bytes): return '\\x%s' % binascii.hexlify(data).decode('ascii') elif isinstance(data, str): return '"%s"' % data.replace('"', '""') elif isinstance(data, (datetime.datetime, Arrow)): # We escape twice to make sure the string generated by # isoformat gets escaped return escape(data.isoformat()) elif isinstance(data, dict): return escape(json.dumps(data)) elif isinstance(data, list): return escape("{%s}" % ','.join(escape(d) for d in data)) elif isinstance(data, psycopg2.extras.Range): # We escape twice here too, so that we make sure # everything gets passed to copy properly return escape( '%s%s,%s%s' % ( '[' if data.lower_inc else '(', '-infinity' if data.lower_inf else escape(data.lower), 'infinity' if data.upper_inf else escape(data.upper), ']' if data.upper_inc else ')', ) ) else: # We don't escape here to make sure we pass literals properly return str(data) with tempfile.TemporaryFile('w+') as f: for d in items: if item_cb is not None: item_cb(d) line = [escape(d.get(k)) for k in columns] f.write(','.join(line)) f.write('\n') f.seek(0) cursor.copy_expert('COPY %s (%s) FROM STDIN CSV' % ( tblname, ', '.join(columns)), f) task_type_keys = [ 'type', 'description', 'backend_name', 'default_interval', 'min_interval', 'max_interval', 'backoff_factor', 'max_queue_length', 'num_retries', 'retry_delay', ] def _format_query(self, query, keys): """Format a query with the given keys""" query_keys = ', '.join(keys) placeholders = ', '.join(['%s'] * len(keys)) return query.format(keys=query_keys, placeholders=placeholders) def _format_multiquery(self, query, keys, values): """Format a query with placeholders generated for multiple values""" query_keys = ', '.join(keys) placeholders = '), ('.join( [', '.join(['%s'] * len(keys))] * len(values) ) ret_values = sum([[value[key] for key in keys] for value in values], []) return ( query.format(keys=query_keys, placeholders=placeholders), ret_values, ) @autocommit def create_task_type(self, task_type, cursor=None): """Create a new task type ready for scheduling. - A task type is a dictionary with the following keys: - type (str): an identifier for the task type - description (str): a human-readable description of what the task - does - backend_name (str): the name of the task in the job-scheduling - backend - default_interval (datetime.timedelta): the default interval - between two task runs - min_interval (datetime.timedelta): the minimum interval between - two task runs - max_interval (datetime.timedelta): the maximum interval between - two task runs - backoff_factor (float): the factor by which the interval changes - at each run - max_queue_length (int): the maximum length of the task queue for - this task type + Args: + task_type (dict): a dictionary with the following keys: + + - type (str): an identifier for the task type + - description (str): a human-readable description of what the + task does + - backend_name (str): the name of the task in the + job-scheduling backend + - default_interval (datetime.timedelta): the default interval + between two task runs + - min_interval (datetime.timedelta): the minimum interval + between two task runs + - max_interval (datetime.timedelta): the maximum interval + between two task runs + - backoff_factor (float): the factor by which the interval + changes at each run + - max_queue_length (int): the maximum length of the task queue + for this task type + """ query = self._format_query( """insert into task_type ({keys}) values ({placeholders})""", self.task_type_keys, ) cursor.execute(query, [task_type[key] for key in self.task_type_keys]) @autocommit def get_task_type(self, task_type_name, cursor=None): """Retrieve the task type with id task_type_name""" query = self._format_query( "select {keys} from task_type where type=%s", self.task_type_keys, ) cursor.execute(query, (task_type_name,)) ret = cursor.fetchone() return ret @autocommit def get_task_types(self, cursor=None): query = self._format_query( "select {keys} from task_type", self.task_type_keys, ) cursor.execute(query) ret = cursor.fetchall() return ret task_create_keys = [ 'type', 'arguments', 'next_run', 'policy', 'retries_left', ] task_keys = task_create_keys + ['id', 'current_interval', 'status'] @autocommit def create_tasks(self, tasks, cursor=None): """Create new tasks. - A task is a dictionary with the following keys: - type (str): the task type - arguments (dict): the arguments for the task runner - args (list of str): arguments - kwargs (dict str -> str): keyword arguments - next_run (datetime.datetime): the next scheduled run for the task - This returns a list of created task ids. + Args: + tasks (list): each task is a dictionary with the following keys: + + - type (str): the task type + - arguments (dict): the arguments for the task runner, keys: + + - args (list of str): arguments + - kwargs (dict str -> str): keyword arguments + + - next_run (datetime.datetime): the next scheduled run for the + task + + Returns: + a list of created task ids. + """ cursor.execute('select swh_scheduler_mktemp_task()') self.copy_to(tasks, 'tmp_task', self.task_create_keys, cursor) query = self._format_query( 'select {keys} from swh_scheduler_create_tasks_from_temp()', self.task_keys, ) cursor.execute(query) return cursor.fetchall() @autocommit def disable_tasks(self, task_ids, cursor=None): """Disable the tasks whose ids are listed.""" query = "UPDATE task SET status = 'disabled' WHERE id IN %s" cursor.execute(query, (tuple(task_ids),)) return None @autocommit def get_tasks(self, task_ids, cursor=None): """Retrieve the info of tasks whose ids are listed.""" query = self._format_query('select {keys} from task where id in %s', self.task_keys) cursor.execute(query, (tuple(task_ids),)) return cursor.fetchall() @autocommit def peek_ready_tasks(self, task_type, timestamp=None, num_tasks=None, cursor=None): """Fetch the list of ready tasks Args: + task_type (str): filtering task per their type timestamp (datetime.datetime): peek tasks that need to be executed before that timestamp num_tasks (int): only peek at num_tasks tasks Returns: a list of tasks """ if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_peek_ready_tasks(%s, %s, %s)', (task_type, timestamp, num_tasks) ) return cursor.fetchall() @autocommit def grab_ready_tasks(self, task_type, timestamp=None, num_tasks=None, cursor=None): """Fetch the list of ready tasks, and mark them as scheduled Args: + task_type (str): filtering task per their type timestamp (datetime.datetime): grab tasks that need to be executed before that timestamp num_tasks (int): only grab num_tasks tasks Returns: a list of tasks """ if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_grab_ready_tasks(%s, %s, %s)', (task_type, timestamp, num_tasks) ) return cursor.fetchall() task_run_create_keys = ['task', 'backend_id', 'scheduled', 'metadata'] @autocommit def schedule_task_run(self, task_id, backend_id, metadata=None, timestamp=None, cursor=None): """Mark a given task as scheduled, adding a task_run entry in the database. Args: task_id (int): the identifier for the task being scheduled backend_id (str): the identifier of the job in the backend metadata (dict): metadata to add to the task_run entry timestamp (datetime.datetime): the instant the event occurred + Returns: a fresh task_run entry + """ if metadata is None: metadata = {} if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_schedule_task_run(%s, %s, %s, %s)', (task_id, backend_id, metadata, timestamp) ) return cursor.fetchone() @autocommit def mass_schedule_task_runs(self, task_runs, cursor=None): """Schedule a bunch of task runs. Args: - task_runs: a list of dicts with keys: - task (int): the identifier for the task being scheduled - backend_id (str): the identifier of the job in the backend - metadata (dict): metadata to add to the task_run entry - scheduled (datetime.datetime): the instant the event occurred + task_runs (list): a list of dicts with keys: + + - task (int): the identifier for the task being scheduled + - backend_id (str): the identifier of the job in the backend + - metadata (dict): metadata to add to the task_run entry + - scheduled (datetime.datetime): the instant the event occurred + Returns: None """ cursor.execute('select swh_scheduler_mktemp_task_run()') self.copy_to(task_runs, 'tmp_task_run', self.task_run_create_keys, cursor) cursor.execute('select swh_scheduler_schedule_task_run_from_temp()') @autocommit def start_task_run(self, backend_id, metadata=None, timestamp=None, cursor=None): """Mark a given task as started, updating the corresponding task_run entry in the database. Args: backend_id (str): the identifier of the job in the backend metadata (dict): metadata to add to the task_run entry timestamp (datetime.datetime): the instant the event occurred + Returns: the updated task_run entry + """ if metadata is None: metadata = {} if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_start_task_run(%s, %s, %s)', (backend_id, metadata, timestamp) ) return cursor.fetchone() @autocommit def end_task_run(self, backend_id, status, metadata=None, timestamp=None, result=None, cursor=None): - """Mark a given task as ended, updating the corresponding task_run - entry in the database. + """Mark a given task as ended, updating the corresponding task_run entry in the + database. Args: backend_id (str): the identifier of the job in the backend - status ('eventful', 'uneventful', 'failed'): how the task ended + status (str): how the task ended; one of: 'eventful', 'uneventful', + 'failed' metadata (dict): metadata to add to the task_run entry timestamp (datetime.datetime): the instant the event occurred + Returns: the updated task_run entry + """ if metadata is None: metadata = {} if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_end_task_run(%s, %s, %s, %s)', (backend_id, status, metadata, timestamp) ) return cursor.fetchone() diff --git a/swh/scheduler/cli.py b/swh/scheduler/cli.py index 571b425..e56ff4e 100644 --- a/swh/scheduler/cli.py +++ b/swh/scheduler/cli.py @@ -1,173 +1,176 @@ # Copyright (C) 2016 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import csv import json import locale import arrow import click from .backend import SchedulerBackend locale.setlocale(locale.LC_ALL, '') ARROW_LOCALE = locale.getlocale(locale.LC_TIME)[0] class DateTimeType(click.ParamType): name = 'time and date' def convert(self, value, param, ctx): if not isinstance(value, arrow.Arrow): value = arrow.get(value) return value DATETIME = DateTimeType() CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) def pretty_print_list(list, indent): """Pretty-print a list""" return ''.join('%s%s\n' % (' ' * indent, item) for item in list) def pretty_print_dict(dict, indent): """Pretty-print a list""" return ''.join('%s%s:%s\n' % (' ' * indent, click.style(key, bold=True), value) for key, value in dict.items()) def pretty_print_task(task): """Pretty-print a task""" next_run = arrow.get(task['next_run']) lines = [ '%s %s\n' % (click.style('Task', bold=True), task['id']), click.style(' Next run: ', bold=True), "%s (%s)" % (next_run.humanize(locale=ARROW_LOCALE), next_run.format()), '\n', click.style(' Interval: ', bold=True), str(task['current_interval']), '\n', click.style(' Type: ', bold=True), task['type'], '\n', click.style(' Args:\n', bold=True), pretty_print_list(task['arguments']['args'], indent=4), click.style(' Keyword args:\n', bold=True), pretty_print_dict(task['arguments']['kwargs'], indent=4), ] return ''.join(lines) @click.group(context_settings=CONTEXT_SETTINGS) @click.option( '--database', '-d', help='Scheduling database DSN', default='host=db.internal.softwareheritage.org ' 'dbname=softwareheritage-scheduler user=guest') @click.pass_context def cli(ctx, database): """Software Heritage Scheduler CLI interface""" override_config = {} if database: override_config['scheduling_db'] = database ctx.obj = SchedulerBackend(**override_config) @cli.group('task') @click.pass_context def task(ctx): """Manipulate tasks.""" pass @task.command('schedule') @click.option('--columns', '-c', multiple=True, default=['type', 'args', 'kwargs', 'next_run'], type=click.Choice(['type', 'args', 'kwargs', 'next_run']), help='columns present in the CSV file') @click.argument('file', type=click.File(encoding='utf-8')) @click.pass_context def schedule_tasks(ctx, columns, file): """Schedule tasks from a CSV input file. The following columns are expected, and can be set through the -c option: - type: the type of the task to be scheduled (mandatory) - args: the arguments passed to the task (JSON list, defaults to an empty list) - kwargs: the keyword arguments passed to the task (JSON object, defaults to an empty dict) - next_run: the date at which the task should run (datetime, defaults to now) The CSV can be read either from a named file, or from stdin (use - as filename). """ tasks = [] now = arrow.utcnow() reader = csv.reader(file) for line in reader: task = dict(zip(columns, line)) args = json.loads(task.pop('args', '[]')) kwargs = json.loads(task.pop('kwargs', '{}')) task['arguments'] = { 'args': args, 'kwargs': kwargs, } task['next_run'] = DATETIME.convert(task.get('next_run', now), None, None) tasks.append(task) created = ctx.obj.create_tasks(tasks) output = [ 'Created %d tasks\n' % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo_via_pager('\n'.join(output)) @task.command('list-pending') +@click.option('--task-type', '-t', required=True, + help='The tasks\' type concerned by the listing') @click.option('--limit', '-l', required=False, type=click.INT, help='The maximum number of tasks to fetch') @click.option('--before', '-b', required=False, type=DATETIME, help='List all jobs supposed to run before the given date') @click.pass_context -def list_pending_tasks(ctx, limit, before): +def list_pending_tasks(ctx, task_type, limit, before): """List the tasks that are going to be run. You can override the number of tasks to fetch """ - pending = ctx.obj.peek_ready_tasks(timestamp=before, num_tasks=limit) + pending = ctx.obj.peek_ready_tasks(task_type, + timestamp=before, num_tasks=limit) output = [ 'Found %d tasks\n' % len(pending) ] for task in pending: output.append(pretty_print_task(task)) click.echo_via_pager('\n'.join(output)) @cli.group('task-run') @click.pass_context def task_run(ctx): """Manipulate task runs.""" pass if __name__ == '__main__': cli() diff --git a/swh/scheduler/tests/__init__.py b/swh/scheduler/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/version.txt b/version.txt index 45b2792..bfdf897 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.16-0-gee545d4 \ No newline at end of file +v0.0.17-0-gcd827b6 \ No newline at end of file