diff --git a/PKG-INFO b/PKG-INFO index ae0cda2..3d6878b 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,10 +1,10 @@ Metadata-Version: 1.0 Name: swh.scheduler -Version: 0.0.19 +Version: 0.0.20 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN diff --git a/data/elastic-template.json b/data/elastic-template.json new file mode 100644 index 0000000..4e06dfc --- /dev/null +++ b/data/elastic-template.json @@ -0,0 +1,52 @@ +{ + "order": 0, + "index_patterns": ["swh-tasks-*"], + "settings": { + "index": { + "number_of_shards": "1", + "codec": "best_compression", + "refresh_interval": "30s" + } + }, + "mappings" : { + "task" : { + "_source" : { "enabled": true}, + "properties": { + "task_id": {"type": "double"}, + "task_policy": {"type": "text"}, + "task_status": {"type": "text"}, + "task_run_id": {"type": "double"}, + "arguments": { + "type": "object", + "properties" : { + "args": { + "type": "nested" + }, + "kwargs": { + "type": "object" + } + } + }, + "type": {"type": "text"}, + "backend_id": {"type": "text"}, + "metadata": { + "type": "object", + "enabled" : false + }, + "scheduled": { + "type": "date", + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis" + }, + "started": { + "type": "date", + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis" + }, + "ended": { + "type": "date", + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis" + } + } + } + }, + "aliases": {} +} diff --git a/debian/control b/debian/control index fb265f2..8c2a9c4 100644 --- a/debian/control +++ b/debian/control @@ -1,22 +1,23 @@ Source: swh-scheduler Maintainer: Software Heritage developers Section: python Priority: optional Build-Depends: debhelper (>= 9), dh-python (>= 2), python3-all, python3-arrow, python3-celery, python3-click, + python3-elasticsearch, python3-nose, python3-psycopg2, python3-setuptools, python3-swh.core (>= 0.0.34), python3-vcversioner Standards-Version: 3.9.6 Homepage: https://forge.softwareheritage.org/diffusion/DSCH/ Package: python3-swh.scheduler Architecture: all Depends: python3-swh.core (>= 0.0.34), ${misc:Depends}, ${python3:Depends} Description: Software Heritage Scheduler diff --git a/requirements.txt b/requirements.txt index d8e1fee..2be6aa2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,11 +1,10 @@ # Add here external Python modules dependencies, one per line. Module names # should match https://pypi.python.org/pypi names. For the full spec or # dependency lines, see https://pip.readthedocs.org/en/1.1/requirements.html arrow celery Click psycopg2 vcversioner - - +elasticsearch diff --git a/sql/Makefile b/sql/Makefile index ae73de6..9f92954 100644 --- a/sql/Makefile +++ b/sql/Makefile @@ -1,48 +1,49 @@ # Depends: postgresql-client, postgresql-autodoc DBNAME = softwareheritage-scheduler-dev DOCDIR = autodoc SQL_SCHEMA = swh-scheduler-schema.sql -SQLS = $(SQL_SCHEMA) +SQL_DATA = swh-scheduler-data.sql +SQLS = $(SQL_SCHEMA) $(SQL_DATA) PSQL_BIN = psql PSQL_FLAGS = --echo-all -X -v ON_ERROR_STOP=1 PSQL = $(PSQL_BIN) $(PSQL_FLAGS) all: createdb: createdb-stamp createdb-stamp: $(SQL_INIT) createdb $(DBNAME) touch $@ filldb: filldb-stamp filldb-stamp: createdb-stamp cat $(SQLS) | $(PSQL) $(DBNAME) touch $@ dropdb: -dropdb $(DBNAME) dumpdb: swh-scheduler.dump swh-scheduler.dump: filldb-stamp pg_dump -Fc $(DBNAME) > $@ doc: autodoc-stamp $(DOCDIR)/swh-scheduler.pdf autodoc-stamp: filldb-stamp test -d $(DOCDIR)/ || mkdir $(DOCDIR) postgresql_autodoc -d $(DBNAME) -f $(DOCDIR)/swh cp -a $(DOCDIR)/swh-scheduler.dot $(DOCDIR)/swh-scheduler.dot.orig touch $@ $(DOCDIR)/swh-scheduler.pdf: autodoc-stamp dot -T pdf $(DOCDIR)/swh-scheduler.dot > $(DOCDIR)/swh-scheduler.pdf clean: rm -rf *-stamp $(DOCDIR)/ distclean: clean dropdb rm -f swh-scheduler.dump .PHONY: all initdb createdb dropdb doc clean diff --git a/sql/swh-scheduler-data.sql b/sql/swh-scheduler-data.sql new file mode 100644 index 0000000..50bd8db --- /dev/null +++ b/sql/swh-scheduler-data.sql @@ -0,0 +1,77 @@ +insert into task_type( + type, + description, + backend_name, + default_interval, min_interval, max_interval, backoff_factor, + max_queue_length) +values ( + 'swh-loader-mount-dump-and-load-svn-repository', + 'Loading svn repositories from svn dump', + 'swh.loader.svn.tasks.MountAndLoadSvnRepositoryTsk', + '1 day', '1 day', '1 day', 1, + 1000); + +insert into task_type( + type, + description, + backend_name, + default_interval, min_interval, max_interval, backoff_factor, + num_retries, + max_queue_length) +values ( + 'swh-deposit-archive-loading', + 'Loading deposit archive into swh through swh-loader-tar', + 'swh.deposit.loader.tasks.LoadDepositArchiveTsk', + '1 day', '1 day', '1 day', 1, 3, 1000); + + +insert into task_type( + type, + description, + backend_name, + default_interval, min_interval, max_interval, backoff_factor, + num_retries, max_queue_length) +values ( + 'swh-deposit-archive-checks', + 'Pre-checking deposit step before loading into swh archive', + 'swh.deposit.loader.tasks.ChecksDepositTsk', + '1 day', '1 day', '1 day', 1, 3, 1000); + +insert into task_type( + type, + description, + backend_name, + default_interval, min_interval, max_interval, backoff_factor, + max_queue_length) +values ( + 'swh-vault-cooking', + 'Cook a Vault bundle', + 'swh.vault.cooking_tasks.SWHCookingTask', + '1 day', '1 day', '1 day', 1, + 10000); + +insert into task_type( + type, + description, + backend_name, + default_interval, min_interval, max_interval, backoff_factor, + max_queue_length) +values ( + 'origin-load-hg', + 'Loading mercurial repository swh-loader-mercurial', + 'swh.loader.mercurial.tasks.LoadMercurialTsk', + '1 day', '1 day', '1 day', 1, + 1000); + +insert into task_type( + type, + description, + backend_name, + default_interval, min_interval, max_interval, backoff_factor, + max_queue_length) +values ( + 'origin-load-archive-hg', + 'Loading archive mercurial repository swh-loader-mercurial', + 'swh.loader.mercurial.tasks.LoadArchiveMercurialTsk', + '1 day', '1 day', '1 day', 1, + 1000); diff --git a/sql/swh-scheduler-schema.sql b/sql/swh-scheduler-schema.sql index 89c85f3..52c09cf 100644 --- a/sql/swh-scheduler-schema.sql +++ b/sql/swh-scheduler-schema.sql @@ -1,301 +1,348 @@ create table dbversion ( version int primary key, release timestamptz not null, description text not null ); comment on table dbversion is 'Schema update tracking'; insert into dbversion (version, release, description) - values (6, now(), 'Work In Progress'); + values (7, now(), 'Work In Progress'); create table task_type ( type text primary key, description text not null, backend_name text not null, default_interval interval, min_interval interval, max_interval interval, backoff_factor float, max_queue_length bigint, num_retries bigint, retry_delay interval ); comment on table task_type is 'Types of schedulable tasks'; comment on column task_type.type is 'Short identifier for the task type'; comment on column task_type.description is 'Human-readable task description'; comment on column task_type.backend_name is 'Name of the task in the job-running backend'; comment on column task_type.default_interval is 'Default interval for newly scheduled tasks'; comment on column task_type.min_interval is 'Minimum interval between two runs of a task'; comment on column task_type.max_interval is 'Maximum interval between two runs of a task'; comment on column task_type.backoff_factor is 'Adjustment factor for the backoff between two task runs'; comment on column task_type.max_queue_length is 'Maximum length of the queue for this type of tasks'; comment on column task_type.num_retries is 'Default number of retries on transient failures'; comment on column task_type.retry_delay is 'Retry delay for the task'; create type task_status as enum ('next_run_not_scheduled', 'next_run_scheduled', 'completed', 'disabled'); comment on type task_status is 'Status of a given task'; create type task_policy as enum ('recurring', 'oneshot'); comment on type task_policy is 'Recurrence policy of the given task'; create table task ( id bigserial primary key, type text not null references task_type(type), arguments jsonb not null, next_run timestamptz not null, current_interval interval, status task_status not null, policy task_policy not null default 'recurring', retries_left bigint not null default 0, check (policy <> 'recurring' or current_interval is not null) ); comment on table task is 'Schedule of recurring tasks'; comment on column task.arguments is 'Arguments passed to the underlying job scheduler. ' 'Contains two keys, ''args'' (list) and ''kwargs'' (object).'; comment on column task.next_run is 'The next run of this task should be run on or after that time'; comment on column task.current_interval is 'The interval between two runs of this task, ' 'taking into account the backoff factor'; comment on column task.policy is 'Whether the task is one-shot or recurring'; comment on column task.retries_left is 'The number of "short delay" retries of the task in case of ' 'transient failure'; create index on task(type); create index on task(next_run); create index task_args on task using btree ((arguments -> 'args')); create index task_kwargs on task using gin ((arguments -> 'kwargs')); create type task_run_status as enum ('scheduled', 'started', 'eventful', 'uneventful', 'failed', 'permfailed', 'lost'); comment on type task_run_status is 'Status of a given task run'; create table task_run ( id bigserial primary key, task bigint not null references task(id), backend_id text, scheduled timestamptz, started timestamptz, ended timestamptz, metadata jsonb, status task_run_status not null default 'scheduled' ); comment on table task_run is 'History of task runs sent to the job-running backend'; comment on column task_run.backend_id is 'id of the task run in the job-running backend'; comment on column task_run.metadata is 'Useful metadata for the given task run. ' 'For instance, the worker that took on the job, ' 'or the logs for the run.'; create index on task_run(task); create index on task_run(backend_id); create or replace function swh_scheduler_mktemp_task () returns void language sql as $$ create temporary table tmp_task ( like task excluding indexes ) on commit drop; alter table tmp_task drop column id, drop column current_interval, drop column status, alter column policy drop not null, alter column retries_left drop not null; $$; comment on function swh_scheduler_mktemp_task () is 'Create a temporary table for bulk task creation'; create or replace function swh_scheduler_create_tasks_from_temp () returns setof task language plpgsql as $$ begin return query insert into task (type, arguments, next_run, status, current_interval, policy, retries_left) select type, arguments, next_run, 'next_run_not_scheduled', (select default_interval from task_type tt where tt.type = tmp_task.type), coalesce(policy, 'recurring'), coalesce(retries_left, (select num_retries from task_type tt where tt.type = tmp_task.type), 0) from tmp_task returning task.*; end; $$; comment on function swh_scheduler_create_tasks_from_temp () is 'Create tasks in bulk from the temporary table'; create or replace function swh_scheduler_peek_ready_tasks (task_type text, ts timestamptz default now(), num_tasks bigint default NULL) returns setof task language sql stable as $$ select * from task where next_run <= ts and type = task_type and status = 'next_run_not_scheduled' order by next_run limit num_tasks; $$; create or replace function swh_scheduler_grab_ready_tasks (task_type text, ts timestamptz default now(), num_tasks bigint default NULL) returns setof task language sql as $$ update task set status='next_run_scheduled' from ( select id from task where next_run <= ts and type = task_type and status='next_run_not_scheduled' order by next_run limit num_tasks for update skip locked ) next_tasks where task.id = next_tasks.id returning task.*; $$; create or replace function swh_scheduler_schedule_task_run (task_id bigint, backend_id text, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ insert into task_run (task, backend_id, metadata, scheduled, status) values (task_id, backend_id, metadata, ts, 'scheduled') returning *; $$; create or replace function swh_scheduler_mktemp_task_run () returns void language sql as $$ create temporary table tmp_task_run ( like task_run excluding indexes ) on commit drop; alter table tmp_task_run drop column id, drop column status; $$; comment on function swh_scheduler_mktemp_task_run () is 'Create a temporary table for bulk task run scheduling'; create or replace function swh_scheduler_schedule_task_run_from_temp () returns void language plpgsql as $$ begin insert into task_run (task, backend_id, metadata, scheduled, status) select task, backend_id, metadata, scheduled, 'scheduled' from tmp_task_run; return; end; $$; create or replace function swh_scheduler_start_task_run (backend_id text, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ update task_run set started = ts, status = 'started', metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_start_task_run.metadata where task_run.backend_id = swh_scheduler_start_task_run.backend_id returning *; $$; create or replace function swh_scheduler_end_task_run (backend_id text, status task_run_status, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ update task_run set ended = ts, status = swh_scheduler_end_task_run.status, metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_end_task_run.metadata where task_run.backend_id = swh_scheduler_end_task_run.backend_id returning *; $$; +create type task_record as ( + task_id bigint, + task_policy task_policy, + task_status task_status, + task_run_id bigint, + arguments jsonb, + type text, + backend_id text, + metadata jsonb, + scheduled timestamptz, + started timestamptz, + ended timestamptz +); + +create index task_run_id_asc_idx on task_run(task asc, ended asc); + +create or replace function swh_scheduler_task_to_archive( + ts timestamptz, last_id bigint default -1, lim bigint default 10) + returns setof task_record + language sql stable +as $$ + select t.id as task_id, t.policy as task_policy, + t.status as task_status, tr.id as task_run_id, + t.arguments, t.type, tr.backend_id, tr.metadata, + tr.scheduled, tr.started, tr.ended + from task_run tr inner join task t on tr.task=t.id + where ((t.policy = 'oneshot' and t.status ='completed') or + (t.policy = 'recurring' and t.status ='disabled')) and + tr.ended < ts and + t.id > last_id + order by tr.task, tr.ended + limit lim; +$$; + +comment on function swh_scheduler_task_to_archive is 'Read archivable tasks function'; + +create or replace function swh_scheduler_delete_archive_tasks( + task_ids bigint[]) + returns void + language sql +as $$ + delete from task_run where task in (select * from unnest(task_ids)); + delete from task where id in (select * from unnest(task_ids)); +$$; + +comment on function swh_scheduler_delete_archive_tasks is 'Clean up archived tasks function'; + create or replace function swh_scheduler_update_task_on_task_end () returns trigger language plpgsql as $$ declare cur_task task%rowtype; cur_task_type task_type%rowtype; adjustment_factor float; new_interval interval; begin select * from task where id = new.task into cur_task; select * from task_type where type = cur_task.type into cur_task_type; case when new.status = 'permfailed' then update task set status = 'disabled' where id = cur_task.id; when new.status in ('eventful', 'uneventful') then case when cur_task.policy = 'oneshot' then update task set status = 'completed' where id = cur_task.id; when cur_task.policy = 'recurring' then if new.status = 'uneventful' then adjustment_factor := 1/cur_task_type.backoff_factor; else adjustment_factor := 1/cur_task_type.backoff_factor; end if; new_interval := greatest( cur_task_type.min_interval, least( cur_task_type.max_interval, adjustment_factor * cur_task.current_interval)); update task set status = 'next_run_not_scheduled', next_run = now() + new_interval, current_interval = new_interval, retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; else -- new.status in 'failed', 'lost' if cur_task.retries_left > 0 then update task set status = 'next_run_not_scheduled', - next_run = now() + cur_task_type.retry_delay, + next_run = now() + coalesce(cur_task_type.retry_delay, interval '1 hour'), retries_left = cur_task.retries_left - 1 where id = cur_task.id; else -- no retries left case when cur_task.policy = 'oneshot' then update task set status = 'disabled' where id = cur_task.id; when cur_task.policy = 'recurring' then update task set status = 'next_run_not_scheduled', next_run = now() + cur_task.current_interval, retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; end if; -- retries end case; return null; end; $$; create trigger update_task_on_task_end after update of status on task_run for each row when (new.status NOT IN ('scheduled', 'started')) execute procedure swh_scheduler_update_task_on_task_end (); diff --git a/sql/updates/07.sql b/sql/updates/07.sql new file mode 100644 index 0000000..6082e0a --- /dev/null +++ b/sql/updates/07.sql @@ -0,0 +1,54 @@ +-- SWH Scheduler Schema upgrade +-- from_version: 06 +-- to_version: 07 +-- description: Archive 'oneshot' and disabled 'recurring' tasks (status = 'disabled') + +insert into dbversion (version, release, description) +values (7, now(), 'Work In Progress'); + +create type task_record as ( + task_id bigint, + task_policy task_policy, + task_status task_status, + task_run_id bigint, + arguments jsonb, + type text, + backend_id text, + metadata jsonb, + scheduled timestamptz, + started timestamptz, + ended timestamptz +); + +create index task_run_id_asc_idx on task_run(task asc, ended asc); + +create or replace function swh_scheduler_task_to_archive( + ts timestamptz, last_id bigint default -1, lim bigint default 10) + returns setof task_record + language sql stable +as $$ + select t.id as task_id, t.policy as task_policy, + t.status as task_status, tr.id as task_run_id, + t.arguments, t.type, tr.backend_id, tr.metadata, + tr.scheduled, tr.started, tr.ended + from task_run tr inner join task t on tr.task=t.id + where ((t.policy = 'oneshot' and t.status ='completed') or + (t.policy = 'recurring' and t.status ='disabled')) and + tr.ended < ts and + t.id > last_id + order by tr.task, tr.ended + limit lim; +$$; + +comment on function swh_scheduler_task_to_archive is 'Read archivable tasks function'; + +create or replace function swh_scheduler_delete_archive_tasks( + task_ids bigint[]) + returns void + language sql +as $$ + delete from task_run where task in (select * from unnest(task_ids)); + delete from task where id in (select * from unnest(task_ids)); +$$; + +comment on function swh_scheduler_delete_archive_tasks is 'Clean up archived tasks function'; diff --git a/swh.scheduler.egg-info/PKG-INFO b/swh.scheduler.egg-info/PKG-INFO index ae0cda2..3d6878b 100644 --- a/swh.scheduler.egg-info/PKG-INFO +++ b/swh.scheduler.egg-info/PKG-INFO @@ -1,10 +1,10 @@ Metadata-Version: 1.0 Name: swh.scheduler -Version: 0.0.19 +Version: 0.0.20 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN diff --git a/swh.scheduler.egg-info/SOURCES.txt b/swh.scheduler.egg-info/SOURCES.txt index 4359080..352b2d1 100644 --- a/swh.scheduler.egg-info/SOURCES.txt +++ b/swh.scheduler.egg-info/SOURCES.txt @@ -1,51 +1,55 @@ .gitignore AUTHORS LICENSE LICENSE.Celery MANIFEST.in Makefile requirements-swh.txt requirements.txt setup.py version.txt bin/swh-worker-control +data/elastic-template.json debian/changelog debian/compat debian/control debian/copyright debian/rules debian/source/format docs/.gitignore docs/Makefile docs/conf.py docs/index.rst docs/_static/.placeholder docs/_templates/.placeholder sql/.gitignore sql/Makefile +sql/swh-scheduler-data.sql sql/swh-scheduler-schema.sql sql/swh-scheduler-testdata.sql sql/updates/02.sql sql/updates/03.sql sql/updates/04.sql sql/updates/05.sql sql/updates/06.sql +sql/updates/07.sql swh/__init__.py swh.scheduler.egg-info/PKG-INFO swh.scheduler.egg-info/SOURCES.txt swh.scheduler.egg-info/dependency_links.txt swh.scheduler.egg-info/entry_points.txt swh.scheduler.egg-info/requires.txt swh.scheduler.egg-info/top_level.txt swh/scheduler/__init__.py swh/scheduler/backend.py +swh/scheduler/backend_es.py swh/scheduler/cli.py swh/scheduler/task.py swh/scheduler/utils.py swh/scheduler/celery_backend/__init__.py swh/scheduler/celery_backend/config.py swh/scheduler/celery_backend/listener.py swh/scheduler/celery_backend/runner.py swh/scheduler/tests/__init__.py swh/scheduler/tests/test_scheduler.py swh/scheduler/tests/test_task.py \ No newline at end of file diff --git a/swh.scheduler.egg-info/requires.txt b/swh.scheduler.egg-info/requires.txt index 7f808ca..36a6bf7 100644 --- a/swh.scheduler.egg-info/requires.txt +++ b/swh.scheduler.egg-info/requires.txt @@ -1,6 +1,7 @@ Click arrow celery +elasticsearch psycopg2 swh.core>=0.0.34 vcversioner diff --git a/swh/scheduler/backend.py b/swh/scheduler/backend.py index 7b6914c..8d903ed 100644 --- a/swh/scheduler/backend.py +++ b/swh/scheduler/backend.py @@ -1,435 +1,470 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime from functools import wraps import json import tempfile from arrow import Arrow, utcnow import psycopg2 import psycopg2.extras from psycopg2.extensions import AsIs from swh.core.config import SWHConfig def adapt_arrow(arrow): return AsIs("'%s'::timestamptz" % arrow.isoformat()) psycopg2.extensions.register_adapter(dict, psycopg2.extras.Json) psycopg2.extensions.register_adapter(Arrow, adapt_arrow) def autocommit(fn): @wraps(fn) def wrapped(self, *args, **kwargs): autocommit = False if 'cursor' not in kwargs or not kwargs['cursor']: autocommit = True kwargs['cursor'] = self.cursor() try: ret = fn(self, *args, **kwargs) - except: + except Exception: if autocommit: self.rollback() raise if autocommit: self.commit() return ret return wrapped class SchedulerBackend(SWHConfig): """ Backend for the Software Heritage scheduling database. """ CONFIG_BASE_FILENAME = 'scheduler.ini' DEFAULT_CONFIG = { - 'scheduling_db': ('str', 'dbname=swh-scheduler'), + 'scheduling_db': ('str', 'dbname=softwareheritage-scheduler-dev'), } def __init__(self, **override_config): self.config = self.parse_config_file(global_config=False) self.config.update(override_config) self.db = None self.reconnect() def reconnect(self): if not self.db or self.db.closed: self.db = psycopg2.connect( dsn=self.config['scheduling_db'], cursor_factory=psycopg2.extras.RealDictCursor, ) def cursor(self): """Return a fresh cursor on the database, with auto-reconnection in case of failure""" cur = None # Get a fresh cursor and reconnect at most three times tries = 0 while True: tries += 1 try: cur = self.db.cursor() cur.execute('select 1') break except psycopg2.OperationalError: if tries < 3: self.reconnect() else: raise return cur def commit(self): """Commit a transaction""" self.db.commit() def rollback(self): """Rollback a transaction""" self.db.rollback() def copy_to(self, items, tblname, columns, cursor=None, item_cb=None): def escape(data): if data is None: return '' if isinstance(data, bytes): return '\\x%s' % binascii.hexlify(data).decode('ascii') elif isinstance(data, str): return '"%s"' % data.replace('"', '""') elif isinstance(data, (datetime.datetime, Arrow)): # We escape twice to make sure the string generated by # isoformat gets escaped return escape(data.isoformat()) elif isinstance(data, dict): return escape(json.dumps(data)) elif isinstance(data, list): return escape("{%s}" % ','.join(escape(d) for d in data)) elif isinstance(data, psycopg2.extras.Range): # We escape twice here too, so that we make sure # everything gets passed to copy properly return escape( '%s%s,%s%s' % ( '[' if data.lower_inc else '(', '-infinity' if data.lower_inf else escape(data.lower), 'infinity' if data.upper_inf else escape(data.upper), ']' if data.upper_inc else ')', ) ) else: # We don't escape here to make sure we pass literals properly return str(data) with tempfile.TemporaryFile('w+') as f: for d in items: if item_cb is not None: item_cb(d) line = [escape(d.get(k)) for k in columns] f.write(','.join(line)) f.write('\n') f.seek(0) cursor.copy_expert('COPY %s (%s) FROM STDIN CSV' % ( tblname, ', '.join(columns)), f) task_type_keys = [ 'type', 'description', 'backend_name', 'default_interval', 'min_interval', 'max_interval', 'backoff_factor', 'max_queue_length', 'num_retries', 'retry_delay', ] def _format_query(self, query, keys): """Format a query with the given keys""" query_keys = ', '.join(keys) placeholders = ', '.join(['%s'] * len(keys)) return query.format(keys=query_keys, placeholders=placeholders) def _format_multiquery(self, query, keys, values): """Format a query with placeholders generated for multiple values""" query_keys = ', '.join(keys) placeholders = '), ('.join( [', '.join(['%s'] * len(keys))] * len(values) ) ret_values = sum([[value[key] for key in keys] for value in values], []) return ( query.format(keys=query_keys, placeholders=placeholders), ret_values, ) @autocommit def create_task_type(self, task_type, cursor=None): """Create a new task type ready for scheduling. Args: task_type (dict): a dictionary with the following keys: - type (str): an identifier for the task type - description (str): a human-readable description of what the task does - backend_name (str): the name of the task in the job-scheduling backend - default_interval (datetime.timedelta): the default interval between two task runs - min_interval (datetime.timedelta): the minimum interval between two task runs - max_interval (datetime.timedelta): the maximum interval between two task runs - backoff_factor (float): the factor by which the interval changes at each run - max_queue_length (int): the maximum length of the task queue for this task type """ query = self._format_query( """insert into task_type ({keys}) values ({placeholders})""", self.task_type_keys, ) cursor.execute(query, [task_type[key] for key in self.task_type_keys]) @autocommit def get_task_type(self, task_type_name, cursor=None): """Retrieve the task type with id task_type_name""" query = self._format_query( "select {keys} from task_type where type=%s", self.task_type_keys, ) cursor.execute(query, (task_type_name,)) ret = cursor.fetchone() return ret @autocommit def get_task_types(self, cursor=None): query = self._format_query( "select {keys} from task_type", self.task_type_keys, ) cursor.execute(query) ret = cursor.fetchall() return ret task_create_keys = [ 'type', 'arguments', 'next_run', 'policy', 'retries_left', ] task_keys = task_create_keys + ['id', 'current_interval', 'status'] @autocommit def create_tasks(self, tasks, cursor=None): """Create new tasks. Args: tasks (list): each task is a dictionary with the following keys: - type (str): the task type - arguments (dict): the arguments for the task runner, keys: - args (list of str): arguments - kwargs (dict str -> str): keyword arguments - next_run (datetime.datetime): the next scheduled run for the task Returns: - a list of created task ids. + a list of created tasks. """ cursor.execute('select swh_scheduler_mktemp_task()') self.copy_to(tasks, 'tmp_task', self.task_create_keys, cursor) query = self._format_query( 'select {keys} from swh_scheduler_create_tasks_from_temp()', self.task_keys, ) cursor.execute(query) return cursor.fetchall() @autocommit def disable_tasks(self, task_ids, cursor=None): """Disable the tasks whose ids are listed.""" query = "UPDATE task SET status = 'disabled' WHERE id IN %s" cursor.execute(query, (tuple(task_ids),)) return None @autocommit def get_tasks(self, task_ids, cursor=None): """Retrieve the info of tasks whose ids are listed.""" query = self._format_query('select {keys} from task where id in %s', self.task_keys) cursor.execute(query, (tuple(task_ids),)) return cursor.fetchall() @autocommit def peek_ready_tasks(self, task_type, timestamp=None, num_tasks=None, cursor=None): """Fetch the list of ready tasks Args: task_type (str): filtering task per their type timestamp (datetime.datetime): peek tasks that need to be executed before that timestamp num_tasks (int): only peek at num_tasks tasks Returns: a list of tasks """ if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_peek_ready_tasks(%s, %s, %s)', (task_type, timestamp, num_tasks) ) return cursor.fetchall() @autocommit def grab_ready_tasks(self, task_type, timestamp=None, num_tasks=None, cursor=None): """Fetch the list of ready tasks, and mark them as scheduled Args: task_type (str): filtering task per their type timestamp (datetime.datetime): grab tasks that need to be executed before that timestamp num_tasks (int): only grab num_tasks tasks Returns: a list of tasks """ if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_grab_ready_tasks(%s, %s, %s)', (task_type, timestamp, num_tasks) ) return cursor.fetchall() task_run_create_keys = ['task', 'backend_id', 'scheduled', 'metadata'] @autocommit def schedule_task_run(self, task_id, backend_id, metadata=None, timestamp=None, cursor=None): """Mark a given task as scheduled, adding a task_run entry in the database. Args: task_id (int): the identifier for the task being scheduled backend_id (str): the identifier of the job in the backend metadata (dict): metadata to add to the task_run entry timestamp (datetime.datetime): the instant the event occurred Returns: a fresh task_run entry """ if metadata is None: metadata = {} if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_schedule_task_run(%s, %s, %s, %s)', (task_id, backend_id, metadata, timestamp) ) return cursor.fetchone() @autocommit def mass_schedule_task_runs(self, task_runs, cursor=None): """Schedule a bunch of task runs. Args: task_runs (list): a list of dicts with keys: - task (int): the identifier for the task being scheduled - backend_id (str): the identifier of the job in the backend - metadata (dict): metadata to add to the task_run entry - scheduled (datetime.datetime): the instant the event occurred Returns: None """ cursor.execute('select swh_scheduler_mktemp_task_run()') self.copy_to(task_runs, 'tmp_task_run', self.task_run_create_keys, cursor) cursor.execute('select swh_scheduler_schedule_task_run_from_temp()') @autocommit def start_task_run(self, backend_id, metadata=None, timestamp=None, cursor=None): """Mark a given task as started, updating the corresponding task_run entry in the database. Args: backend_id (str): the identifier of the job in the backend metadata (dict): metadata to add to the task_run entry timestamp (datetime.datetime): the instant the event occurred Returns: the updated task_run entry """ if metadata is None: metadata = {} if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_start_task_run(%s, %s, %s)', (backend_id, metadata, timestamp) ) return cursor.fetchone() @autocommit def end_task_run(self, backend_id, status, metadata=None, timestamp=None, result=None, cursor=None): """Mark a given task as ended, updating the corresponding task_run entry in the database. Args: backend_id (str): the identifier of the job in the backend status (str): how the task ended; one of: 'eventful', 'uneventful', 'failed' metadata (dict): metadata to add to the task_run entry timestamp (datetime.datetime): the instant the event occurred Returns: the updated task_run entry """ if metadata is None: metadata = {} if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_end_task_run(%s, %s, %s, %s)', (backend_id, status, metadata, timestamp) ) return cursor.fetchone() + + @autocommit + def filter_task_to_archive(self, timestamp, limit=10, last_id=-1, + cursor=None): + """Returns the list of task/task_run prior to a given date to archive. + + """ + last_task_run_id = None + while True: + row = None + cursor.execute( + "select * from swh_scheduler_task_to_archive(%s, %s, %s)", + (timestamp, last_id, limit) + ) + for row in cursor: + # nested type index does not accept bare values + # transform it as a dict to comply with this + row['arguments']['args'] = { + i: v for i, v in enumerate(row['arguments']['args']) + } + yield row + + if not row: + break + _id = row.get('task_id') + _task_run_id = row.get('task_run_id') + if last_id == _id and last_task_run_id == _task_run_id: + break + last_id = _id + last_task_run_id = _task_run_id + + @autocommit + def delete_archive_tasks(self, task_ids, cursor=None): + cursor.execute("select * from swh_scheduler_delete_archive_tasks(%s)", + (task_ids, )) diff --git a/swh/scheduler/backend_es.py b/swh/scheduler/backend_es.py new file mode 100644 index 0000000..e280e9f --- /dev/null +++ b/swh/scheduler/backend_es.py @@ -0,0 +1,166 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + + +"""Elastic Search backend + +""" + + +from swh.core import utils +from swh.core.config import SWHConfig +from elasticsearch import Elasticsearch +from elasticsearch.helpers import streaming_bulk + + +class SWHElasticSearchClient(SWHConfig): + DEFAULT_BASE_FILENAME = 'backend/elastic' + + DEFAULT_CONFIG = { + 'storage_nodes': ('[dict]', [{'host': 'localhost', 'port': 9200}]), + 'index_name_prefix': ('str', 'swh-tasks'), + 'client_options': ('dict', { + 'sniff': True, + 'http_compress': False, + }) + } + + def __init__(self, **config): + if config: + self.config = config + else: + self.config = self.parse_config_file() + + options = self.config['client_options'] + sniff = options['sniff'] + self.storage = Elasticsearch( + # nodes to use by default + self.config['storage_nodes'], + # auto detect cluster's status + sniff_on_start=sniff, sniff_on_connection_fail=sniff, + sniffer_timeout=60, + # compression or not + http_compress=options['http_compress']) + self.index_name_prefix = self.config['index_name_prefix'] + # document's index type (cf. ../../data/elastic-template.json) + self.doc_type = 'task' + + def compute_index_name(self, year, month): + """Given a year, month, compute the index's name. + + """ + return '%s-%s-%s' % ( + self.index_name_prefix, year, '%02d' % month) + + def index(self, data): + """Index given data to elasticsearch. + + The field 'ended' in data is used to compute the index to + index data to. + + """ + date = data['ended'] + index_name = self.compute_index_name(date.year, date.month) + return self.storage.index(index=index_name, + doc_type=self.doc_type, + body=data) + + def mget(self, index_name, doc_ids, chunk_size=500, + source=True, log=None): + """Retrieve document's full content according to their ids as per + source's setup. + + The `source` permits to retrieve only what's of interest to + us, e.g: + - source=True ; gives back the original indexed data + - source=False ; returns without the original _source field + - source=['task_id'] ; returns only task_id in the _source field + + Args: + index_name (str): Name of the concerned index. + doc_ids (generator): Generator of ids to retrieve + chunk_size (int): Number of documents chunk to send for retrieval + source (bool/[str]): Source of information to return + + Yields: + document indexed as per source's setup + + """ + if isinstance(source, list): + source = {'_source': ','.join(source)} + else: + source = {'_source': str(source).lower()} + + for ids in utils.grouper(doc_ids, n=1000): + res = self.storage.mget(body={'ids': list(ids)}, + index=index_name, + doc_type=self.doc_type, + params=source) + if not res: + if log: + log.error('Error during retrieval of data, skipping!') + continue + + for doc in res['docs']: + found = doc.get('found') + if not found: + msg = 'Doc id %s not found, not indexed yet' % doc['_id'] + if log: + log.warning(msg) + continue + yield doc['_source'] + + def _streaming_bulk(self, index_name, doc_stream, chunk_size=500, + log=None): + """Bulk index data and returns the successful indexed data's + identifier. + + Args: + index_name (str): Name of the concerned index. + doc_stream (generator): Generator of documents to index + chunk_size (int): Number of documents chunk to send for indexation + + Yields: + document id indexed + + """ + actions = ({'_index': index_name, + '_op_type': 'index', + '_type': self.doc_type, + '_source': data} for data in doc_stream) + for ok, result in streaming_bulk(client=self.storage, + actions=actions, + chunk_size=chunk_size, + raise_on_exception=False): + if not ok: + if log: + log.error('Error during %s indexation. Skipping.' % result) + continue + yield result['index']['_id'] + + def streaming_bulk(self, index_name, doc_stream, chunk_size=500, + source=True, log=None): + """Bulk index data and returns the successful indexed data as per + source's setup. + + the `source` permits to retrieve only what's of interest to + us, e.g: + + - source=True ; gives back the original indexed data + - source=False ; returns without the original _source field + - source=['task_id'] ; returns only task_id in the _source field + + Args: + index_name (str): Name of the concerned index. + doc_stream (generator): Document generator to index + chunk_size (int): Number of documents chunk to send + source (bool, [str]): the information to return + + """ + + indexed_ids = self._streaming_bulk( + index_name, doc_stream, chunk_size=chunk_size, log=log) + yield from self.mget(index_name, indexed_ids, chunk_size=chunk_size, + source=source, log=log) diff --git a/swh/scheduler/celery_backend/config.py b/swh/scheduler/celery_backend/config.py index b412618..b31bfc6 100644 --- a/swh/scheduler/celery_backend/config.py +++ b/swh/scheduler/celery_backend/config.py @@ -1,197 +1,201 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging import os import urllib.parse from celery import Celery from celery.signals import setup_logging from celery.utils.log import ColorFormatter from celery.worker.control import Panel from kombu import Exchange, Queue from kombu.five import monotonic as _monotonic import requests from swh.core.config import load_named_config from swh.core.logger import JournalHandler DEFAULT_CONFIG_NAME = 'worker' CONFIG_NAME_ENVVAR = 'SWH_WORKER_INSTANCE' CONFIG_NAME_TEMPLATE = 'worker/%s' DEFAULT_CONFIG = { 'task_broker': ('str', 'amqp://guest@localhost//'), 'task_modules': ('list[str]', []), 'task_queues': ('list[str]', []), 'task_soft_time_limit': ('int', 0), } @setup_logging.connect def setup_log_handler(loglevel=None, logfile=None, format=None, colorize=None, **kwargs): """Setup logging according to Software Heritage preferences. We use the command-line loglevel for tasks only, as we never really care about the debug messages from celery. """ if loglevel is None: loglevel = logging.DEBUG formatter = logging.Formatter(format) root_logger = logging.getLogger('') root_logger.setLevel(logging.INFO) if loglevel == logging.DEBUG: color_formatter = ColorFormatter(format) if colorize else formatter console = logging.StreamHandler() console.setLevel(logging.DEBUG) console.setFormatter(color_formatter) root_logger.addHandler(console) systemd_journal = JournalHandler() systemd_journal.setLevel(logging.DEBUG) systemd_journal.setFormatter(formatter) root_logger.addHandler(systemd_journal) celery_logger = logging.getLogger('celery') celery_logger.setLevel(logging.INFO) # Silence useless "Starting new HTTP connection" messages urllib3_logger = logging.getLogger('urllib3') urllib3_logger.setLevel(logging.WARNING) swh_logger = logging.getLogger('swh') swh_logger.setLevel(loglevel) # get_task_logger makes the swh tasks loggers children of celery.task celery_task_logger = logging.getLogger('celery.task') celery_task_logger.setLevel(loglevel) @Panel.register def monotonic(state): """Get the current value for the monotonic clock""" return {'monotonic': _monotonic()} class TaskRouter: """Route tasks according to the task_queue attribute in the task class""" def route_for_task(self, task, args=None, kwargs=None): task_class = app.tasks[task] if hasattr(task_class, 'task_queue'): return {'queue': task_class.task_queue} return None class CustomCelery(Celery): def get_queue_stats(self, queue_name): """Get the statistics regarding a queue on the broker. Arguments: queue_name: name of the queue to check Returns a dictionary raw from the RabbitMQ management API. Interesting keys: - consumers (number of consumers for the queue) - messages (number of messages in queue) - messages_unacknowledged (number of messages currently being processed) Documentation: https://www.rabbitmq.com/management.html#http-api """ conn_info = self.connection().info() url = 'http://{hostname}:{port}/api/queues/{vhost}/{queue}'.format( hostname=conn_info['hostname'], port=conn_info['port'] + 10000, vhost=urllib.parse.quote(conn_info['virtual_host'], safe=''), queue=urllib.parse.quote(queue_name, safe=''), ) credentials = (conn_info['userid'], conn_info['password']) r = requests.get(url, auth=credentials) if r.status_code != 200: raise ValueError('Got error %s when reading queue stats: %s' % ( r.status_code, r.json())) return r.json() def get_queue_length(self, queue_name): """Shortcut to get a queue's length""" return self.get_queue_stats(queue_name)['messages'] INSTANCE_NAME = os.environ.get(CONFIG_NAME_ENVVAR) if INSTANCE_NAME: CONFIG_NAME = CONFIG_NAME_TEMPLATE % INSTANCE_NAME else: CONFIG_NAME = DEFAULT_CONFIG_NAME # Load the Celery config CONFIG = load_named_config(CONFIG_NAME, DEFAULT_CONFIG) # Celery Queues CELERY_QUEUES = [Queue('celery', Exchange('celery'), routing_key='celery')] for queue in CONFIG['task_queues']: CELERY_QUEUES.append(Queue(queue, Exchange(queue), routing_key=queue)) # Instantiate the Celery app app = CustomCelery() app.conf.update( # The broker BROKER_URL=CONFIG['task_broker'], # Timezone configuration: all in UTC CELERY_ENABLE_UTC=True, CELERY_TIMEZONE='UTC', # Imported modules CELERY_IMPORTS=CONFIG['task_modules'], # Time (in seconds, or a timedelta object) for when after stored task # tombstones will be deleted. None means to never expire results. CELERY_TASK_RESULT_EXPIRES=None, + # A string identifying the default serialization method to use. Can + # be json (default), pickle, yaml, msgpack, or any custom + # serialization methods that have been registered with + CELERY_TASK_SERIALIZER='msgpack', # Late ack means the task messages will be acknowledged after the task has # been executed, not just before, which is the default behavior. CELERY_ACKS_LATE=True, # A string identifying the default serialization method to use. # Can be pickle (default), json, yaml, msgpack or any custom serialization # methods that have been registered with kombu.serialization.registry CELERY_ACCEPT_CONTENT=['msgpack', 'json', 'pickle'], # If True the task will report its status as “started” # when the task is executed by a worker. CELERY_TRACK_STARTED=True, # Default compression used for task messages. Can be gzip, bzip2 # (if available), or any custom compression schemes registered # in the Kombu compression registry. # CELERY_MESSAGE_COMPRESSION='bzip2', # Disable all rate limits, even if tasks has explicit rate limits set. # (Disabling rate limits altogether is recommended if you don’t have any # tasks using them.) CELERY_DISABLE_RATE_LIMITS=True, # Task hard time limit in seconds. The worker processing the task will be # killed and replaced with a new one when this is exceeded. # CELERYD_TASK_TIME_LIMIT=3600, # Task soft time limit in seconds. # The SoftTimeLimitExceeded exception will be raised when this is exceeded. # The task can catch this to e.g. clean up before the hard time limit # comes. CELERYD_TASK_SOFT_TIME_LIMIT=CONFIG['task_soft_time_limit'], # Task routing CELERY_ROUTES=TaskRouter(), # Task queues this worker will consume from CELERY_QUEUES=CELERY_QUEUES, # Allow pool restarts from remote CELERYD_POOL_RESTARTS=True, # Do not prefetch tasks CELERYD_PREFETCH_MULTIPLIER=1, # Send events CELERY_SEND_EVENTS=True, # Do not send useless task_sent events CELERY_SEND_TASK_SENT_EVENT=False, ) diff --git a/swh/scheduler/celery_backend/runner.py b/swh/scheduler/celery_backend/runner.py index 9d48c74..81f54fb 100644 --- a/swh/scheduler/celery_backend/runner.py +++ b/swh/scheduler/celery_backend/runner.py @@ -1,84 +1,84 @@ # Copyright (C) 2015-2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import time import arrow from celery import group from ..backend import SchedulerBackend from .config import app as main_app # Max batch size for tasks MAX_NUM_TASKS = 10000 def run_ready_tasks(backend, app): """Run all tasks that are ready""" while True: throttled = False cursor = backend.cursor() task_types = {} pending_tasks = [] for task_type in backend.get_task_types(cursor=cursor): task_type_name = task_type['type'] task_types[task_type_name] = task_type max_queue_length = task_type['max_queue_length'] if max_queue_length: backend_name = task_type['backend_name'] queue_name = app.tasks[backend_name].task_queue queue_length = app.get_queue_length(queue_name) num_tasks = min(max_queue_length - queue_length, MAX_NUM_TASKS) else: num_tasks = MAX_NUM_TASKS if num_tasks > 0: pending_tasks.extend( backend.grab_ready_tasks(task_type_name, num_tasks=num_tasks, cursor=cursor)) if not pending_tasks: break celery_tasks = [] for task in pending_tasks: args = task['arguments']['args'] kwargs = task['arguments']['kwargs'] celery_task = app.tasks[ task_types[task['type']]['backend_name'] ].s(*args, **kwargs) celery_tasks.append(celery_task) group_result = group(celery_tasks).delay() backend_tasks = [{ 'task': task['id'], 'backend_id': group_result.results[i].id, 'scheduled': arrow.utcnow(), } for i, task in enumerate(pending_tasks)] backend.mass_schedule_task_runs(backend_tasks, cursor=cursor) backend.commit() if throttled: time.sleep(10) if __name__ == '__main__': for module in main_app.conf.CELERY_IMPORTS: __import__(module) main_backend = SchedulerBackend() try: run_ready_tasks(main_backend, main_app) - except: + except Exception: main_backend.rollback() raise diff --git a/swh/scheduler/cli.py b/swh/scheduler/cli.py index e56ff4e..605fece 100644 --- a/swh/scheduler/cli.py +++ b/swh/scheduler/cli.py @@ -1,176 +1,260 @@ -# Copyright (C) 2016 The Software Heritage developers +# Copyright (C) 2016-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import arrow +import click import csv +import itertools import json import locale +import logging -import arrow -import click - +from swh.core import utils from .backend import SchedulerBackend +from .backend_es import SWHElasticSearchClient locale.setlocale(locale.LC_ALL, '') ARROW_LOCALE = locale.getlocale(locale.LC_TIME)[0] class DateTimeType(click.ParamType): name = 'time and date' def convert(self, value, param, ctx): if not isinstance(value, arrow.Arrow): value = arrow.get(value) return value DATETIME = DateTimeType() CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) def pretty_print_list(list, indent): """Pretty-print a list""" return ''.join('%s%s\n' % (' ' * indent, item) for item in list) def pretty_print_dict(dict, indent): """Pretty-print a list""" - return ''.join('%s%s:%s\n' % + return ''.join('%s%s: %s\n' % (' ' * indent, click.style(key, bold=True), value) for key, value in dict.items()) def pretty_print_task(task): """Pretty-print a task""" next_run = arrow.get(task['next_run']) lines = [ '%s %s\n' % (click.style('Task', bold=True), task['id']), click.style(' Next run: ', bold=True), "%s (%s)" % (next_run.humanize(locale=ARROW_LOCALE), next_run.format()), '\n', click.style(' Interval: ', bold=True), str(task['current_interval']), '\n', click.style(' Type: ', bold=True), task['type'], '\n', click.style(' Args:\n', bold=True), pretty_print_list(task['arguments']['args'], indent=4), click.style(' Keyword args:\n', bold=True), pretty_print_dict(task['arguments']['kwargs'], indent=4), ] return ''.join(lines) @click.group(context_settings=CONTEXT_SETTINGS) @click.option( '--database', '-d', help='Scheduling database DSN', default='host=db.internal.softwareheritage.org ' 'dbname=softwareheritage-scheduler user=guest') @click.pass_context def cli(ctx, database): """Software Heritage Scheduler CLI interface""" override_config = {} if database: override_config['scheduling_db'] = database ctx.obj = SchedulerBackend(**override_config) @cli.group('task') @click.pass_context def task(ctx): """Manipulate tasks.""" pass @task.command('schedule') @click.option('--columns', '-c', multiple=True, default=['type', 'args', 'kwargs', 'next_run'], - type=click.Choice(['type', 'args', 'kwargs', 'next_run']), + type=click.Choice([ + 'type', 'args', 'kwargs', 'policy', 'next_run']), help='columns present in the CSV file') +@click.option('--delimiter', '-d', default=',') @click.argument('file', type=click.File(encoding='utf-8')) @click.pass_context -def schedule_tasks(ctx, columns, file): +def schedule_tasks(ctx, columns, delimiter, file): """Schedule tasks from a CSV input file. The following columns are expected, and can be set through the -c option: - type: the type of the task to be scheduled (mandatory) - args: the arguments passed to the task (JSON list, defaults to an empty list) - kwargs: the keyword arguments passed to the task (JSON object, defaults to an empty dict) - next_run: the date at which the task should run (datetime, defaults to now) The CSV can be read either from a named file, or from stdin (use - as filename). + Use sample: + + cat scheduling-task.txt | \ + python3 -m swh.scheduler.cli \ + --database 'service=swh-scheduler-dev' \ + task schedule \ + --columns type --columns kwargs --columns policy \ + --delimiter ';' - + """ tasks = [] now = arrow.utcnow() - reader = csv.reader(file) - + reader = csv.reader(file, delimiter=delimiter) for line in reader: task = dict(zip(columns, line)) args = json.loads(task.pop('args', '[]')) kwargs = json.loads(task.pop('kwargs', '{}')) task['arguments'] = { 'args': args, 'kwargs': kwargs, } task['next_run'] = DATETIME.convert(task.get('next_run', now), None, None) tasks.append(task) created = ctx.obj.create_tasks(tasks) output = [ 'Created %d tasks\n' % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo_via_pager('\n'.join(output)) @task.command('list-pending') @click.option('--task-type', '-t', required=True, help='The tasks\' type concerned by the listing') @click.option('--limit', '-l', required=False, type=click.INT, help='The maximum number of tasks to fetch') @click.option('--before', '-b', required=False, type=DATETIME, help='List all jobs supposed to run before the given date') @click.pass_context def list_pending_tasks(ctx, task_type, limit, before): """List the tasks that are going to be run. You can override the number of tasks to fetch """ pending = ctx.obj.peek_ready_tasks(task_type, timestamp=before, num_tasks=limit) output = [ 'Found %d tasks\n' % len(pending) ] for task in pending: output.append(pretty_print_task(task)) click.echo_via_pager('\n'.join(output)) +@task.command('archive') +@click.option('--before', '-b', default=None, + help='Task whose ended date is anterior will be archived.') +@click.option('--batch-index', default=1000, type=click.INT, + help='Batch size of tasks to archive') +@click.option('--batch-clean', default=1000, type=click.INT, + help='Batch size of task to clean after archival') +@click.option('--dry-run/--no-dry-run', is_flag=True, default=False, + help='Default to list only what would be archived.') +@click.option('--verbose', is_flag=True, default=False, + help='Default to list only what would be archived.') +@click.option('--cleanup/--no-cleanup', is_flag=True, default=True, + help='Clean up archived tasks (default)') +@click.option('--start-from', type=click.INT, default=-1, + help='(Optional) default task id to start from. Default is -1.') +@click.pass_context +def archive_tasks(ctx, before, batch_index, batch_clean, + dry_run, verbose, cleanup, start_from): + """Archive task/task_run whose (task_type is 'oneshot' and task_status + is 'completed') or (task_type is 'recurring' and task_status is + 'disabled'). + + With --dry-run flag set (default), only list those. + + """ + es_client = SWHElasticSearchClient() + logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) + log = logging.getLogger('swh.scheduler.cli.archive') + logging.getLogger('urllib3').setLevel(logging.WARN) + logging.getLogger('elasticsearch').setLevel(logging.WARN) + if dry_run: + log.info('**DRY-RUN**, only reading the db') + + if not before: # Default to archive all tasks prior to now's last month + before = arrow.utcnow().format('YYYY-MM-01') + + log.debug('index: %s; cleanup: %s' % ( + not dry_run, not dry_run and cleanup)) + + def group_by_index_name(data, es_client=es_client): + ended = data['ended'] + return es_client.compute_index_name(ended.year, ended.month) + + def index_data(before, last_id, batch_index, backend=ctx.obj): + tasks_in = backend.filter_task_to_archive( + before, last_id=last_id, limit=batch_index) + for index_name, tasks_group in itertools.groupby( + tasks_in, key=group_by_index_name): + log.debug('Send for indexation to index %s' % index_name) + if dry_run: + for task in tasks_group: + yield task + continue + + yield from es_client.streaming_bulk( + index_name, tasks_group, source=['task_id'], + chunk_size=batch_index, log=log) + + gen = index_data(before, last_id=start_from, batch_index=batch_index) + if cleanup: + for task_ids in utils.grouper(gen, n=batch_clean): + _task_ids = [t['task_id'] for t in task_ids] + log.debug('Cleanup %s tasks' % (len(_task_ids, ))) + if dry_run: # no clean up + continue + ctx.obj.delete_archive_tasks(_task_ids) + else: + for task_id in gen: + log.info('Indexed: %s' % task_id) + + @cli.group('task-run') @click.pass_context def task_run(ctx): """Manipulate task runs.""" pass if __name__ == '__main__': cli() diff --git a/version.txt b/version.txt index fecc3cb..ad75280 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.19-0-g255d851 \ No newline at end of file +v0.0.20-0-gd6b393d \ No newline at end of file