diff --git a/.gitignore b/.gitignore index f5fc2ae..898ba80 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,9 @@ *.pyc *.sw? *~ .coverage .eggs/ __pycache__ *.egg-info/ -version.txt \ No newline at end of file +version.txt +/.hypothesis/ diff --git a/PKG-INFO b/PKG-INFO index 5c734d8..7d558c2 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,10 +1,10 @@ Metadata-Version: 1.0 Name: swh.scheduler -Version: 0.0.26 +Version: 0.0.27 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN diff --git a/data/elastic-template.json b/data/elastic-template.json index c12d602..3d9e75f 100644 --- a/data/elastic-template.json +++ b/data/elastic-template.json @@ -1,52 +1,53 @@ { "order": 0, "index_patterns": ["swh-tasks-*"], "settings": { "index": { "codec": "best_compression", - "refresh_interval": "30s" + "refresh_interval": "30s", + "number_of_shards": 1 } }, "mappings" : { "task" : { "_source" : { "enabled": true}, "properties": { "task_id": {"type": "double"}, "task_policy": {"type": "text"}, "task_status": {"type": "text"}, "task_run_id": {"type": "double"}, "arguments": { "type": "object", "properties" : { "args": { "type": "nested", "dynamic": false }, "kwargs": { "type": "text" } } }, "type": {"type": "text"}, "backend_id": {"type": "text"}, "metadata": { "type": "object", "enabled" : false }, "scheduled": { "type": "date", "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis" }, "started": { "type": "date", "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis" }, "ended": { "type": "date", "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis" } } } }, "aliases": {} } diff --git a/debian/control b/debian/control index 82c3ef3..f08c2a1 100644 --- a/debian/control +++ b/debian/control @@ -1,24 +1,32 @@ Source: swh-scheduler Maintainer: Software Heritage developers Section: python Priority: optional Build-Depends: debhelper (>= 9), dh-python (>= 2), python3-all, python3-arrow, python3-celery, python3-click, python3-elasticsearch (>= 5.4.0), python3-flask, + python3-hypothesis, + python3-kombu, python3-nose, python3-psycopg2, python3-setuptools, - python3-swh.core (>= 0.0.38~), + python3-swh.core (>= 0.0.40~), python3-vcversioner Standards-Version: 3.9.6 Homepage: https://forge.softwareheritage.org/diffusion/DSCH/ Package: python3-swh.scheduler Architecture: all -Depends: python3-swh.core (>= 0.0.38~), ${misc:Depends}, ${python3:Depends} +Depends: python3-swh.core (>= 0.0.40~), ${misc:Depends}, ${python3:Depends} Description: Software Heritage Scheduler + +Package: python3-swh.scheduler.updater +Architecture: all +Depends: python3-swh.scheduler (= ${binary:Version}), + ${misc:Depends}, ${python3:Depends} +Description: Software Heritage Scheduler Updater diff --git a/debian/rules b/debian/rules index d5e8b00..0f2363a 100755 --- a/debian/rules +++ b/debian/rules @@ -1,11 +1,21 @@ #!/usr/bin/make -f export PYBUILD_NAME=swh.scheduler export PYBUILD_TEST_ARGS=-sv -a !db,!fs %: dh $@ --with python3 --buildsystem=pybuild override_dh_install: dh_install rm -v $(CURDIR)/debian/python3-*/usr/lib/python*/dist-packages/swh/__init__.py + rm -rf $(CURDIR)/debian/python3-*/usr/lib/python*/dist-packages/.hypothesis + + for pyvers in $(shell py3versions -vr); do \ + mkdir -p $(CURDIR)/debian/python3-swh.scheduler.updater/usr/lib/python$$pyvers/dist-packages/swh/scheduler/updater/ ; \ + mv $(CURDIR)/debian/python3-swh.scheduler/usr/lib/python$$pyvers/dist-packages/swh/scheduler/updater/ \ + $(CURDIR)/debian/python3-swh.scheduler.updater/usr/lib/python$$pyvers/dist-packages/swh/scheduler/ ; \ + mkdir -p $(CURDIR)/debian/python3-swh.scheduler.updater/usr/lib/python$$pyvers/dist-packages/swh/scheduler/updater/tests/ ; \ + mv $(CURDIR)/debian/python3-swh.scheduler/usr/lib/python$$pyvers/dist-packages/swh/scheduler/tests/updater/ \ + $(CURDIR)/debian/python3-swh.scheduler.updater/usr/lib/python$$pyvers/dist-packages/swh/scheduler/updater/tests/ ; \ + done diff --git a/requirements-swh.txt b/requirements-swh.txt index a152b02..9e6b6d1 100644 --- a/requirements-swh.txt +++ b/requirements-swh.txt @@ -1 +1 @@ -swh.core >= 0.0.38 +swh.core >= 0.0.40 diff --git a/requirements.txt b/requirements.txt index df47e53..0bb53f8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,11 +1,13 @@ # Add here external Python modules dependencies, one per line. Module names # should match https://pypi.python.org/pypi names. For the full spec or # dependency lines, see https://pip.readthedocs.org/en/1.1/requirements.html arrow celery Click elasticsearch>5.4 flask +kombu psycopg2 vcversioner +hypothesis diff --git a/sql/swh-scheduler-data.sql b/sql/swh-scheduler-data.sql index 50bd8db..c39b6bc 100644 --- a/sql/swh-scheduler-data.sql +++ b/sql/swh-scheduler-data.sql @@ -1,77 +1,91 @@ insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'swh-loader-mount-dump-and-load-svn-repository', 'Loading svn repositories from svn dump', 'swh.loader.svn.tasks.MountAndLoadSvnRepositoryTsk', '1 day', '1 day', '1 day', 1, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, num_retries, max_queue_length) values ( 'swh-deposit-archive-loading', 'Loading deposit archive into swh through swh-loader-tar', 'swh.deposit.loader.tasks.LoadDepositArchiveTsk', '1 day', '1 day', '1 day', 1, 3, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, num_retries, max_queue_length) values ( 'swh-deposit-archive-checks', 'Pre-checking deposit step before loading into swh archive', 'swh.deposit.loader.tasks.ChecksDepositTsk', '1 day', '1 day', '1 day', 1, 3, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'swh-vault-cooking', 'Cook a Vault bundle', 'swh.vault.cooking_tasks.SWHCookingTask', '1 day', '1 day', '1 day', 1, 10000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'origin-load-hg', 'Loading mercurial repository swh-loader-mercurial', 'swh.loader.mercurial.tasks.LoadMercurialTsk', '1 day', '1 day', '1 day', 1, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'origin-load-archive-hg', 'Loading archive mercurial repository swh-loader-mercurial', 'swh.loader.mercurial.tasks.LoadArchiveMercurialTsk', '1 day', '1 day', '1 day', 1, 1000); + +insert into task_type( + type, + description, + backend_name, + default_interval, min_interval, max_interval, backoff_factor, + max_queue_length) +values ( + 'origin-update-git', + 'Update an origin of type git', + 'swh.loader.git.tasks.UpdateGitRepository', + '64 days', + '12:00:00', + '64 days', 2, 100000); diff --git a/sql/swh-scheduler-schema.sql b/sql/swh-scheduler-schema.sql index f1f0d14..4d39319 100644 --- a/sql/swh-scheduler-schema.sql +++ b/sql/swh-scheduler-schema.sql @@ -1,357 +1,505 @@ create table dbversion ( version int primary key, release timestamptz not null, description text not null ); comment on table dbversion is 'Schema update tracking'; insert into dbversion (version, release, description) - values (8, now(), 'Work In Progress'); + values (9, now(), 'Work In Progress'); create table task_type ( type text primary key, description text not null, backend_name text not null, default_interval interval, min_interval interval, max_interval interval, backoff_factor float, max_queue_length bigint, num_retries bigint, retry_delay interval ); comment on table task_type is 'Types of schedulable tasks'; comment on column task_type.type is 'Short identifier for the task type'; comment on column task_type.description is 'Human-readable task description'; comment on column task_type.backend_name is 'Name of the task in the job-running backend'; comment on column task_type.default_interval is 'Default interval for newly scheduled tasks'; comment on column task_type.min_interval is 'Minimum interval between two runs of a task'; comment on column task_type.max_interval is 'Maximum interval between two runs of a task'; comment on column task_type.backoff_factor is 'Adjustment factor for the backoff between two task runs'; comment on column task_type.max_queue_length is 'Maximum length of the queue for this type of tasks'; comment on column task_type.num_retries is 'Default number of retries on transient failures'; comment on column task_type.retry_delay is 'Retry delay for the task'; create type task_status as enum ('next_run_not_scheduled', 'next_run_scheduled', 'completed', 'disabled'); comment on type task_status is 'Status of a given task'; create type task_policy as enum ('recurring', 'oneshot'); comment on type task_policy is 'Recurrence policy of the given task'; +create type task_priority as enum('high', 'normal', 'low'); +comment on type task_priority is 'Priority of the given task'; + +create table priority_ratio( + id task_priority primary key, + ratio float not null +); + +comment on table priority_ratio is 'Oneshot task''s reading ratio per priority'; +comment on column priority_ratio.id is 'Task priority id'; +comment on column priority_ratio.ratio is 'Percentage of tasks to read per priority'; + +insert into priority_ratio (id, ratio) values ('high', 0.5); +insert into priority_ratio (id, ratio) values ('normal', 0.3); +insert into priority_ratio (id, ratio) values ('low', 0.2); + create table task ( id bigserial primary key, type text not null references task_type(type), arguments jsonb not null, next_run timestamptz not null, current_interval interval, status task_status not null, policy task_policy not null default 'recurring', retries_left bigint not null default 0, + priority task_priority references priority_ratio(id), check (policy <> 'recurring' or current_interval is not null) ); + comment on table task is 'Schedule of recurring tasks'; comment on column task.arguments is 'Arguments passed to the underlying job scheduler. ' 'Contains two keys, ''args'' (list) and ''kwargs'' (object).'; comment on column task.next_run is 'The next run of this task should be run on or after that time'; comment on column task.current_interval is 'The interval between two runs of this task, ' 'taking into account the backoff factor'; comment on column task.policy is 'Whether the task is one-shot or recurring'; comment on column task.retries_left is 'The number of "short delay" retries of the task in case of ' 'transient failure'; +comment on column task.priority is 'Policy of the given task'; create index on task(type); create index on task(next_run); create index task_args on task using btree ((arguments -> 'args')); create index task_kwargs on task using gin ((arguments -> 'kwargs')); +create index on task(priority); create type task_run_status as enum ('scheduled', 'started', 'eventful', 'uneventful', 'failed', 'permfailed', 'lost'); comment on type task_run_status is 'Status of a given task run'; create table task_run ( id bigserial primary key, task bigint not null references task(id), backend_id text, scheduled timestamptz, started timestamptz, ended timestamptz, metadata jsonb, status task_run_status not null default 'scheduled' ); comment on table task_run is 'History of task runs sent to the job-running backend'; comment on column task_run.backend_id is 'id of the task run in the job-running backend'; comment on column task_run.metadata is 'Useful metadata for the given task run. ' 'For instance, the worker that took on the job, ' 'or the logs for the run.'; create index on task_run(task); create index on task_run(backend_id); create or replace function swh_scheduler_mktemp_task () returns void language sql as $$ create temporary table tmp_task ( like task excluding indexes ) on commit drop; alter table tmp_task drop column id, drop column current_interval, drop column status, alter column policy drop not null, alter column retries_left drop not null; $$; comment on function swh_scheduler_mktemp_task () is 'Create a temporary table for bulk task creation'; create or replace function swh_scheduler_create_tasks_from_temp () returns setof task language plpgsql as $$ begin return query - insert into task (type, arguments, next_run, status, current_interval, policy, retries_left) + insert into task (type, arguments, next_run, status, current_interval, policy, retries_left, priority) select type, arguments, next_run, 'next_run_not_scheduled', - (select default_interval from task_type tt where tt.type = tmp_task.type), + (select default_interval from task_type tt where tt.type = t.type), coalesce(policy, 'recurring'), - coalesce(retries_left, (select num_retries from task_type tt where tt.type = tmp_task.type), 0) - from tmp_task + coalesce(retries_left, (select num_retries from task_type tt where tt.type = t.type), 0), + coalesce(priority, null) + from tmp_task t + where not exists(select 1 + from task + where type=t.type and + arguments=t.arguments and + policy=t.policy and + status='next_run_not_scheduled') returning task.*; end; $$; comment on function swh_scheduler_create_tasks_from_temp () is 'Create tasks in bulk from the temporary table'; -create or replace function swh_scheduler_peek_ready_tasks (task_type text, ts timestamptz default now(), - num_tasks bigint default NULL) +create or replace function swh_scheduler_peek_no_priority_tasks (task_type text, ts timestamptz default now(), + num_tasks bigint default NULL) returns setof task language sql stable as $$ select * from task where next_run <= ts and type = task_type and status = 'next_run_not_scheduled' + and priority is null order by next_run - limit num_tasks; + limit num_tasks + for update skip locked; +$$; + +comment on function swh_scheduler_peek_no_priority_tasks (text, timestamptz, bigint) +is 'Retrieve tasks without priority'; + +create or replace function swh_scheduler_nb_priority_tasks(num_tasks_priority bigint, task_priority task_priority) + returns numeric + language sql stable +as $$ + select ceil(num_tasks_priority * (select ratio from priority_ratio where id = task_priority)) :: numeric +$$; + +comment on function swh_scheduler_nb_priority_tasks (bigint, task_priority) +is 'Given a priority task and a total number, compute the number of tasks to read'; + +create or replace function swh_scheduler_peek_tasks_with_priority (task_type text, ts timestamptz default now(), + num_tasks_priority bigint default NULL, + task_priority task_priority default 'normal') + returns setof task + language sql + stable +as $$ + select * + from task t + where t.next_run <= ts + and t.type = task_type + and t.status = 'next_run_not_scheduled' + and t.priority = task_priority + order by t.next_run + limit num_tasks_priority + for update skip locked; $$; +comment on function swh_scheduler_peek_tasks_with_priority(text, timestamptz, bigint, task_priority) +is 'Retrieve tasks with a given priority'; + +create or replace function swh_scheduler_peek_priority_tasks (task_type text, ts timestamptz default now(), + num_tasks_priority bigint default NULL) + returns setof task + language plpgsql +as $$ +declare + r record; + count_row bigint; + nb_diff bigint; + nb_high bigint; + nb_normal bigint; + nb_low bigint; +begin + -- expected values to fetch + select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'high') into nb_high; + select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'normal') into nb_normal; + select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'low') into nb_low; + nb_diff := 0; + count_row := 0; + + for r in select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_high, 'high') + loop + count_row := count_row + 1; + return next r; + end loop; + + if count_row < nb_high then + nb_normal := nb_normal + nb_high - count_row; + end if; + + count_row := 0; + for r in select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_normal, 'normal') + loop + count_row := count_row + 1; + return next r; + end loop; + + if count_row < nb_normal then + nb_low := nb_low + nb_normal - count_row; + end if; + + return query select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_low, 'low'); +end +$$; + +comment on function swh_scheduler_peek_priority_tasks(text, timestamptz, bigint) +is 'Retrieve priority tasks'; + +create or replace function swh_scheduler_peek_ready_tasks (task_type text, ts timestamptz default now(), + num_tasks bigint default NULL, num_tasks_priority bigint default NULL) + returns setof task + language plpgsql +as $$ +declare + r record; + count_row bigint; + nb_diff bigint; + nb_tasks bigint; +begin + count_row := 0; + + for r in select * from swh_scheduler_peek_priority_tasks(task_type, ts, num_tasks_priority) + order by priority, next_run + loop + count_row := count_row + 1; + return next r; + end loop; + + if count_row < num_tasks_priority then + nb_tasks := num_tasks + num_tasks_priority - count_row; + else + nb_tasks := num_tasks; + end if; + + for r in select * from swh_scheduler_peek_no_priority_tasks(task_type, ts, nb_tasks) + order by priority, next_run + loop + return next r; + end loop; + + return; +end +$$; + +comment on function swh_scheduler_peek_ready_tasks(text, timestamptz, bigint, bigint) +is 'Retrieve tasks with/without priority in order'; + create or replace function swh_scheduler_grab_ready_tasks (task_type text, ts timestamptz default now(), - num_tasks bigint default NULL) + num_tasks bigint default NULL, + num_tasks_priority bigint default NULL) returns setof task language sql as $$ update task set status='next_run_scheduled' from ( - select id from task - where next_run <= ts - and type = task_type - and status='next_run_not_scheduled' - order by next_run - limit num_tasks - for update skip locked + select id from swh_scheduler_peek_ready_tasks(task_type, ts, num_tasks, num_tasks_priority) ) next_tasks where task.id = next_tasks.id returning task.*; $$; +comment on function swh_scheduler_grab_ready_tasks (text, timestamptz, bigint, bigint) +is 'Grab tasks ready for scheduling and change their status'; + create or replace function swh_scheduler_schedule_task_run (task_id bigint, backend_id text, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ insert into task_run (task, backend_id, metadata, scheduled, status) values (task_id, backend_id, metadata, ts, 'scheduled') returning *; $$; create or replace function swh_scheduler_mktemp_task_run () returns void language sql as $$ create temporary table tmp_task_run ( like task_run excluding indexes ) on commit drop; alter table tmp_task_run drop column id, drop column status; $$; comment on function swh_scheduler_mktemp_task_run () is 'Create a temporary table for bulk task run scheduling'; create or replace function swh_scheduler_schedule_task_run_from_temp () returns void language plpgsql as $$ begin insert into task_run (task, backend_id, metadata, scheduled, status) select task, backend_id, metadata, scheduled, 'scheduled' from tmp_task_run; return; end; $$; create or replace function swh_scheduler_start_task_run (backend_id text, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ update task_run set started = ts, status = 'started', metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_start_task_run.metadata where task_run.backend_id = swh_scheduler_start_task_run.backend_id returning *; $$; create or replace function swh_scheduler_end_task_run (backend_id text, status task_run_status, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ update task_run set ended = ts, status = swh_scheduler_end_task_run.status, metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_end_task_run.metadata where task_run.backend_id = swh_scheduler_end_task_run.backend_id returning *; $$; create type task_record as ( task_id bigint, task_policy task_policy, task_status task_status, task_run_id bigint, arguments jsonb, type text, backend_id text, metadata jsonb, scheduled timestamptz, started timestamptz, ended timestamptz ); create index task_run_id_asc_idx on task_run(task asc, ended asc); create or replace function swh_scheduler_task_to_archive( ts_after timestamptz, ts_before timestamptz, last_id bigint default -1, lim bigint default 10) returns setof task_record language sql stable as $$ select t.id as task_id, t.policy as task_policy, t.status as task_status, tr.id as task_run_id, t.arguments, t.type, tr.backend_id, tr.metadata, tr.scheduled, tr.started, tr.ended from task_run tr inner join task t on tr.task=t.id where ((t.policy = 'oneshot' and t.status ='completed') or (t.policy = 'recurring' and t.status ='disabled')) and ts_after <= tr.ended and tr.ended < ts_before and t.id > last_id order by tr.task, tr.ended limit lim; $$; comment on function swh_scheduler_task_to_archive is 'Read archivable tasks function'; create or replace function swh_scheduler_delete_archived_tasks( task_ids bigint[], task_run_ids bigint[]) returns void language sql as $$ -- clean up task_run_ids delete from task_run where id in (select * from unnest(task_run_ids)); -- clean up only tasks whose associated task_run are all cleaned up. -- Remaining tasks will stay there and will be cleaned up when -- remaining data have been indexed delete from task where id in (select t.id from task t left outer join task_run tr on t.id=tr.task where t.id in (select * from unnest(task_ids)) and tr.task is null); $$; comment on function swh_scheduler_delete_archived_tasks is 'Clean up archived tasks function'; create or replace function swh_scheduler_update_task_on_task_end () returns trigger language plpgsql as $$ declare cur_task task%rowtype; cur_task_type task_type%rowtype; adjustment_factor float; new_interval interval; begin select * from task where id = new.task into cur_task; select * from task_type where type = cur_task.type into cur_task_type; case when new.status = 'permfailed' then update task set status = 'disabled' where id = cur_task.id; when new.status in ('eventful', 'uneventful') then case when cur_task.policy = 'oneshot' then update task set status = 'completed' where id = cur_task.id; when cur_task.policy = 'recurring' then if new.status = 'uneventful' then adjustment_factor := 1/cur_task_type.backoff_factor; else adjustment_factor := 1/cur_task_type.backoff_factor; end if; new_interval := greatest( cur_task_type.min_interval, least( cur_task_type.max_interval, adjustment_factor * cur_task.current_interval)); update task set status = 'next_run_not_scheduled', next_run = now() + new_interval, current_interval = new_interval, retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; else -- new.status in 'failed', 'lost' if cur_task.retries_left > 0 then update task set status = 'next_run_not_scheduled', next_run = now() + coalesce(cur_task_type.retry_delay, interval '1 hour'), retries_left = cur_task.retries_left - 1 where id = cur_task.id; else -- no retries left case when cur_task.policy = 'oneshot' then update task set status = 'disabled' where id = cur_task.id; when cur_task.policy = 'recurring' then update task set status = 'next_run_not_scheduled', next_run = now() + cur_task.current_interval, retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; end if; -- retries end case; return null; end; $$; create trigger update_task_on_task_end after update of status on task_run for each row when (new.status NOT IN ('scheduled', 'started')) execute procedure swh_scheduler_update_task_on_task_end (); diff --git a/sql/updater/sql/Makefile b/sql/updater/sql/Makefile new file mode 100644 index 0000000..cd976e0 --- /dev/null +++ b/sql/updater/sql/Makefile @@ -0,0 +1,50 @@ +# Depends: postgresql-client, postgresql-autodoc + +DBNAME = softwareheritage-scheduler-updater-dev +DOCDIR = autodoc + +SQL_INIT = swh-init.sql +SQL_SCHEMA = swh-schema.sql +SQL_FUNC = swh-func.sql +SQLS = $(SQL_INIT) $(SQL_SCHEMA) $(SQL_FUNC) + +PSQL_BIN = psql +PSQL_FLAGS = --echo-all -X -v ON_ERROR_STOP=1 +PSQL = $(PSQL_BIN) $(PSQL_FLAGS) + +all: + +createdb: createdb-stamp +createdb-stamp: $(SQL_INIT) + createdb $(DBNAME) + touch $@ + +filldb: filldb-stamp +filldb-stamp: createdb-stamp + cat $(SQLS) | $(PSQL) $(DBNAME) + touch $@ + +dropdb: + -dropdb $(DBNAME) + +dumpdb: swh-scheduler-updater.dump +swh-scheduler.dump: filldb-stamp + pg_dump -Fc $(DBNAME) > $@ + +doc: autodoc-stamp $(DOCDIR)/swh-scheduler-updater.pdf +autodoc-stamp: filldb-stamp + test -d $(DOCDIR)/ || mkdir $(DOCDIR) + postgresql_autodoc -d $(DBNAME) -f $(DOCDIR)/swh + cp -a $(DOCDIR)/swh-scheduler.dot $(DOCDIR)/swh-scheduler-updater.dot.orig + touch $@ + +$(DOCDIR)/swh-scheduler.pdf: autodoc-stamp + dot -T pdf $(DOCDIR)/swh-scheduler-updater.dot > $(DOCDIR)/swh-scheduler-updater.pdf + +clean: + rm -rf *-stamp $(DOCDIR)/ + +distclean: clean dropdb + rm -f swh-scheduler-updater.dump + +.PHONY: all initdb createdb dropdb doc clean diff --git a/sql/updater/sql/swh-func.sql b/sql/updater/sql/swh-func.sql new file mode 100644 index 0000000..786dee1 --- /dev/null +++ b/sql/updater/sql/swh-func.sql @@ -0,0 +1,48 @@ +-- Postgresql index helper function +create or replace function hash_sha1(text) + returns sha1 +as $$ + select public.digest($1, 'sha1') :: sha1 +$$ language sql strict immutable; + +comment on function hash_sha1(text) is 'Compute sha1 hash as text'; + +-- create a temporary table for cache tmp_cache, +create or replace function swh_mktemp_cache() + returns void + language sql +as $$ + create temporary table tmp_cache ( + like cache including defaults + ) on commit drop; + alter table tmp_cache drop column id; +$$; + +create or replace function swh_cache_put() + returns void + language plpgsql +as $$ +begin + insert into cache (id, url, origin_type, cnt, last_seen) + select hash_sha1(url), url, origin_type, cnt, last_seen + from tmp_cache t + on conflict(id) + do update set cnt = (select cnt from cache where id=excluded.id) + excluded.cnt, + last_seen = excluded.last_seen; + return; +end +$$; + +comment on function swh_cache_put() is 'Write to cache temporary events'; + +create or replace function swh_cache_read(ts timestamptz, lim integer) + returns setof cache + language sql stable +as $$ + select id, url, origin_type, cnt, first_seen, last_seen + from cache + where last_seen <= ts + limit lim; +$$; + +comment on function swh_cache_read(timestamptz, integer) is 'Read cache entries'; diff --git a/sql/updater/sql/swh-init.sql b/sql/updater/sql/swh-init.sql new file mode 100644 index 0000000..43774e3 --- /dev/null +++ b/sql/updater/sql/swh-init.sql @@ -0,0 +1,4 @@ +create extension if not exists btree_gist; +create extension if not exists pgcrypto; + +create or replace language plpgsql; diff --git a/sql/updater/sql/swh-schema.sql b/sql/updater/sql/swh-schema.sql new file mode 100644 index 0000000..5833df8 --- /dev/null +++ b/sql/updater/sql/swh-schema.sql @@ -0,0 +1,29 @@ +create table dbversion +( + version int primary key, + release timestamptz not null, + description text not null +); + +comment on table dbversion is 'Schema update tracking'; + +-- a SHA1 checksum (not necessarily originating from Git) +create domain sha1 as bytea check (length(value) = 20); + +insert into dbversion (version, release, description) + values (1, now(), 'Work In Progress'); + +create type origin_type as enum ('git', 'svn', 'hg', 'deb'); +comment on type origin_type is 'Url''s repository type'; + +create table cache ( + id sha1 primary key, + url text not null, + origin_type origin_type not null, + cnt int default 1, + first_seen timestamptz not null default now(), + last_seen timestamptz not null + ); + +create index on cache(url); +create index on cache(last_seen); diff --git a/sql/updates/09.sql b/sql/updates/09.sql new file mode 100644 index 0000000..7fe97ca --- /dev/null +++ b/sql/updates/09.sql @@ -0,0 +1,205 @@ +-- SWH Scheduler Schema upgrade +-- from_version: 08 +-- to_version: 09 +-- description: Schedule task with priority + +insert into dbversion (version, release, description) +values (9, now(), 'Work In Progress'); + +create type task_priority as enum('high', 'normal', 'low'); +comment on type task_priority is 'Priority of the given task'; + +create table priority_ratio( + id task_priority primary key, + ratio float not null +); + +comment on table priority_ratio is 'Oneshot task''s reading ratio per priority'; +comment on column priority_ratio.id is 'Task priority id'; +comment on column priority_ratio.ratio is 'Percentage of tasks to read per priority'; + +insert into priority_ratio (id, ratio) values ('high', 0.5); +insert into priority_ratio (id, ratio) values ('normal', 0.3); +insert into priority_ratio (id, ratio) values ('low', 0.2); + +alter table task add column priority task_priority references priority_ratio(id); +comment on column task.priority is 'Policy of the given task'; + +drop function swh_scheduler_peek_ready_tasks(text, timestamptz, bigint); +drop function swh_scheduler_grab_ready_tasks(text, timestamptz, bigint); + +create or replace function swh_scheduler_peek_no_priority_tasks (task_type text, ts timestamptz default now(), + num_tasks bigint default NULL) + returns setof task + language sql + stable +as $$ +select * from task + where next_run <= ts + and type = task_type + and status = 'next_run_not_scheduled' + and priority is null + order by next_run + limit num_tasks + for update skip locked; +$$; + +comment on function swh_scheduler_peek_no_priority_tasks (text, timestamptz, bigint) +is 'Retrieve tasks without priority'; + +create or replace function swh_scheduler_nb_priority_tasks(num_tasks_priority bigint, task_priority task_priority) + returns numeric + language sql stable +as $$ + select ceil(num_tasks_priority * (select ratio from priority_ratio where id = task_priority)) :: numeric +$$; + +comment on function swh_scheduler_nb_priority_tasks (bigint, task_priority) +is 'Given a priority task and a total number, compute the number of tasks to read'; + +create or replace function swh_scheduler_peek_tasks_with_priority (task_type text, ts timestamptz default now(), + num_tasks_priority bigint default NULL, + task_priority task_priority default 'normal') + returns setof task + language sql + stable +as $$ + select * + from task t + where t.next_run <= ts + and t.type = task_type + and t.status = 'next_run_not_scheduled' + and t.priority = task_priority + order by t.next_run + limit num_tasks_priority + for update skip locked; +$$; + +comment on function swh_scheduler_peek_tasks_with_priority(text, timestamptz, bigint, task_priority) +is 'Retrieve tasks with a given priority'; + +create or replace function swh_scheduler_peek_priority_tasks (task_type text, ts timestamptz default now(), + num_tasks_priority bigint default NULL) + returns setof task + language plpgsql +as $$ +declare + r record; + count_row bigint; + nb_diff bigint; + nb_high bigint; + nb_normal bigint; + nb_low bigint; +begin + -- expected values to fetch + select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'high') into nb_high; + select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'normal') into nb_normal; + select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'low') into nb_low; + nb_diff := 0; + count_row := 0; + + for r in select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_high, 'high') + loop + count_row := count_row + 1; + return next r; + end loop; + + if count_row < nb_high then + nb_normal := nb_normal + nb_high - count_row; + end if; + + count_row := 0; + for r in select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_normal, 'normal') + loop + count_row := count_row + 1; + return next r; + end loop; + + if count_row < nb_normal then + nb_low := nb_low + nb_normal - count_row; + end if; + + return query select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_low, 'low'); +end +$$; + +comment on function swh_scheduler_peek_priority_tasks(text, timestamptz, bigint) +is 'Retrieve priority tasks'; + +create or replace function swh_scheduler_peek_ready_tasks (task_type text, ts timestamptz default now(), + num_tasks bigint default NULL, num_tasks_priority bigint default NULL) + returns setof task + language plpgsql +as $$ +declare + r record; + count_row bigint; + nb_diff bigint; + nb_tasks bigint; +begin + count_row := 0; + + for r in select * from swh_scheduler_peek_priority_tasks(task_type, ts, num_tasks_priority) + order by priority, next_run + loop + count_row := count_row + 1; + return next r; + end loop; + + if count_row < num_tasks_priority then + nb_tasks := num_tasks + num_tasks_priority - count_row; + else + nb_tasks := num_tasks; + end if; + + for r in select * from swh_scheduler_peek_no_priority_tasks(task_type, ts, nb_tasks) + order by priority, next_run + loop + return next r; + end loop; + + return; +end +$$; + +comment on function swh_scheduler_peek_ready_tasks(text, timestamptz, bigint, bigint) +is 'Retrieve tasks with/without priority in order'; + +create or replace function swh_scheduler_grab_ready_tasks (task_type text, ts timestamptz default now(), + num_tasks bigint default NULL, + num_tasks_priority bigint default NULL) + returns setof task + language sql +as $$ + update task + set status='next_run_scheduled' + from ( + select id from swh_scheduler_peek_ready_tasks(task_type, ts, num_tasks, num_tasks_priority) + ) next_tasks + where task.id = next_tasks.id + returning task.*; +$$; + +comment on function swh_scheduler_grab_ready_tasks (text, timestamptz, bigint, bigint) +is 'Grab tasks ready for scheduling and change their status'; + +create index on task(priority); + +create or replace function swh_scheduler_create_tasks_from_temp () + returns setof task + language plpgsql +as $$ +begin + return query + insert into task (type, arguments, next_run, status, current_interval, policy, retries_left, priority) + select type, arguments, next_run, 'next_run_not_scheduled', + (select default_interval from task_type tt where tt.type = tmp_task.type), + coalesce(policy, 'recurring'), + coalesce(retries_left, (select num_retries from task_type tt where tt.type = tmp_task.type), 0), + coalesce(priority, null) + from tmp_task + returning task.*; +end; +$$; + +comment on function swh_scheduler_create_tasks_from_temp () is 'Create tasks in bulk from the temporary table'; diff --git a/swh.scheduler.egg-info/PKG-INFO b/swh.scheduler.egg-info/PKG-INFO index 5c734d8..7d558c2 100644 --- a/swh.scheduler.egg-info/PKG-INFO +++ b/swh.scheduler.egg-info/PKG-INFO @@ -1,10 +1,10 @@ Metadata-Version: 1.0 Name: swh.scheduler -Version: 0.0.26 +Version: 0.0.27 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN diff --git a/swh.scheduler.egg-info/SOURCES.txt b/swh.scheduler.egg-info/SOURCES.txt index c7827d8..871c99b 100644 --- a/swh.scheduler.egg-info/SOURCES.txt +++ b/swh.scheduler.egg-info/SOURCES.txt @@ -1,62 +1,82 @@ .gitignore AUTHORS LICENSE LICENSE.Celery MANIFEST.in Makefile requirements-swh.txt requirements.txt setup.py version.txt bin/swh-worker-control data/README.md data/elastic-template.json data/update-index-settings.json debian/changelog debian/compat debian/control debian/copyright debian/rules debian/source/format docs/.gitignore docs/Makefile docs/conf.py docs/index.rst docs/_static/.placeholder docs/_templates/.placeholder sql/.gitignore sql/Makefile sql/swh-scheduler-data.sql sql/swh-scheduler-schema.sql sql/swh-scheduler-testdata.sql +sql/updater/sql/Makefile +sql/updater/sql/swh-func.sql +sql/updater/sql/swh-init.sql +sql/updater/sql/swh-schema.sql sql/updates/02.sql sql/updates/03.sql sql/updates/04.sql sql/updates/05.sql sql/updates/06.sql sql/updates/07.sql sql/updates/08.sql +sql/updates/09.sql swh/__init__.py swh.scheduler.egg-info/PKG-INFO swh.scheduler.egg-info/SOURCES.txt swh.scheduler.egg-info/dependency_links.txt swh.scheduler.egg-info/entry_points.txt swh.scheduler.egg-info/requires.txt swh.scheduler.egg-info/top_level.txt swh/scheduler/__init__.py swh/scheduler/backend.py swh/scheduler/backend_es.py swh/scheduler/cli.py swh/scheduler/task.py swh/scheduler/utils.py swh/scheduler/api/__init__.py swh/scheduler/api/client.py swh/scheduler/api/server.py swh/scheduler/celery_backend/__init__.py swh/scheduler/celery_backend/config.py swh/scheduler/celery_backend/listener.py swh/scheduler/celery_backend/runner.py swh/scheduler/tests/__init__.py swh/scheduler/tests/test_api_client.py swh/scheduler/tests/test_scheduler.py -swh/scheduler/tests/test_task.py \ No newline at end of file +swh/scheduler/tests/test_task.py +swh/scheduler/tests/test_utils.py +swh/scheduler/tests/updater/__init__.py +swh/scheduler/tests/updater/test_backend.py +swh/scheduler/tests/updater/test_consumer.py +swh/scheduler/tests/updater/test_events.py +swh/scheduler/tests/updater/test_ghtorrent.py +swh/scheduler/tests/updater/test_writer.py +swh/scheduler/updater/__init__.py +swh/scheduler/updater/backend.py +swh/scheduler/updater/consumer.py +swh/scheduler/updater/events.py +swh/scheduler/updater/writer.py +swh/scheduler/updater/ghtorrent/__init__.py +swh/scheduler/updater/ghtorrent/cli.py +swh/scheduler/updater/ghtorrent/fake.py \ No newline at end of file diff --git a/swh.scheduler.egg-info/requires.txt b/swh.scheduler.egg-info/requires.txt index 5d329b8..b37314a 100644 --- a/swh.scheduler.egg-info/requires.txt +++ b/swh.scheduler.egg-info/requires.txt @@ -1,8 +1,10 @@ Click arrow celery elasticsearch>5.4 flask +hypothesis +kombu psycopg2 -swh.core>=0.0.38 +swh.core>=0.0.40 vcversioner diff --git a/swh/scheduler/api/client.py b/swh/scheduler/api/client.py index a73cd68..dbb9641 100644 --- a/swh/scheduler/api/client.py +++ b/swh/scheduler/api/client.py @@ -1,103 +1,108 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.core.api import SWHRemoteAPI class SchedulerAPIError(Exception): """Specific internal scheduler api issue (mainly connection) """ def __str__(self): args = self.args return 'An unexpected error occurred in the api backend: %s' % args class RemoteScheduler(SWHRemoteAPI): """Proxy to a remote scheduler API """ - def __init__(self, url): - super().__init__(api_exception=SchedulerAPIError, url=url) + def __init__(self, url, timeout=None): + super().__init__( + api_exception=SchedulerAPIError, url=url, timeout=timeout) def close_connection(self): return self.post('close_connection', {}) def set_status_tasks(self, task_ids, status='disabled'): return self.post('set_status_tasks', {'task_ids': task_ids, 'status': status}) def create_task_type(self, task_type): return self.post('create_task_type', {'task_type': task_type}) def get_task_type(self, task_type_name): return self.post('get_task_type', {'task_type_name': task_type_name}) def get_task_types(self): return self.post('get_task_types', {}) def create_tasks(self, tasks): return self.post('create_tasks', {'tasks': tasks}) def disable_tasks(self, task_ids): return self.post('disable_tasks', {'task_ids': task_ids}) def get_tasks(self, task_ids): return self.post('get_tasks', {'task_ids': task_ids}) - def peek_ready_tasks(self, task_type, timestamp=None, num_tasks=None): + def peek_ready_tasks(self, task_type, timestamp=None, num_tasks=None, + num_tasks_priority=None): return self.post('peek_ready_tasks', { 'task_type': task_type, 'timestamp': timestamp, 'num_tasks': num_tasks, + 'num_tasks_priority': num_tasks_priority, }) - def grab_ready_tasks(self, task_type, timestamp=None, num_tasks=None): + def grab_ready_tasks(self, task_type, timestamp=None, num_tasks=None, + num_tasks_priority=None): return self.post('grab_ready_tasks', { 'task_type': task_type, 'timestamp': timestamp, 'num_tasks': num_tasks, + 'num_tasks_priority': num_tasks_priority, }) def schedule_task_run(self, task_id, backend_id, metadata=None, timestamp=None): return self.post('schedule_task_run', { 'task_id': task_id, 'backend_id': backend_id, 'metadata': metadata, 'timestamp': timestamp, }) def mass_schedule_task_runs(self, task_runs): return self.post('mass_schedule_task_runs', {'task_runs': task_runs}) def start_task_run(self, backend_id, metadata=None, timestamp=None): return self.post('start_task_run', { 'backend_id': backend_id, 'metadata': metadata, 'timestamp': timestamp, }) def end_task_run(self, backend_id, status, metadata=None, timestamp=None): return self.post('end_task_run', { 'backend_id': backend_id, 'status': status, 'metadata': metadata, 'timestamp': timestamp, }) def filter_task_to_archive(self, after_ts, before_ts, limit=10, last_id=-1): return self.post('filter_task_to_archive', { 'after_ts': after_ts, 'before_ts': before_ts, 'limit': limit, 'last_id': last_id, }) def delete_archived_tasks(self, task_ids): return self.post('delete_archived_tasks', {'task_ids': task_ids}) diff --git a/swh/scheduler/api/server.py b/swh/scheduler/api/server.py index c7f1de8..737abb2 100644 --- a/swh/scheduler/api/server.py +++ b/swh/scheduler/api/server.py @@ -1,154 +1,157 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging import click -from flask import g, request +from flask import request from swh.core import config -from swh.scheduler import get_scheduler +from swh.scheduler import get_scheduler as get_scheduler_from from swh.core.api import (SWHServerAPIApp, decode_request, error_handler, encode_data_server as encode_data) DEFAULT_CONFIG_PATH = 'backend/scheduler' DEFAULT_CONFIG = { 'scheduler': ('dict', { 'cls': 'local', 'args': { 'scheduling_db': 'dbname=softwareheritage-scheduler-dev', }, }) } app = SWHServerAPIApp(__name__) +scheduler = None @app.errorhandler(Exception) def my_error_handler(exception): return error_handler(exception, encode_data) -@app.before_request -def before_request(): - g.scheduler = get_scheduler(**app.config['scheduler']) +def get_sched(): + global scheduler + if not scheduler: + scheduler = get_scheduler_from(**app.config['scheduler']) + return scheduler @app.route('/') def index(): return 'SWH Scheduler API server' @app.route('/close_connection', methods=['POST']) def close_connection(): - return encode_data(g.scheduler.close_connection()) + return encode_data(get_sched().close_connection()) @app.route('/set_status_tasks', methods=['POST']) def set_status_tasks(): - return encode_data(g.scheduler.set_status_tasks(**decode_request(request))) + return encode_data(get_sched().set_status_tasks(**decode_request(request))) @app.route('/create_task_type', methods=['POST']) def create_task_type(): - return encode_data(g.scheduler.create_task_type(**decode_request(request))) + return encode_data(get_sched().create_task_type(**decode_request(request))) @app.route('/get_task_type', methods=['POST']) def get_task_type(): - return encode_data(g.scheduler.get_task_type(**decode_request(request))) + return encode_data(get_sched().get_task_type(**decode_request(request))) @app.route('/get_task_types', methods=['POST']) def get_task_types(): - return encode_data(g.scheduler.get_task_types(**decode_request(request))) + return encode_data(get_sched().get_task_types(**decode_request(request))) @app.route('/create_tasks', methods=['POST']) def create_tasks(): - return encode_data(g.scheduler.create_tasks(**decode_request(request))) + return encode_data(get_sched().create_tasks(**decode_request(request))) @app.route('/disable_tasks', methods=['POST']) def disable_tasks(): - return encode_data(g.scheduler.disable_tasks(**decode_request(request))) + return encode_data(get_sched().disable_tasks(**decode_request(request))) @app.route('/get_tasks', methods=['POST']) def get_tasks(): - return encode_data(g.scheduler.get_tasks(**decode_request(request))) + return encode_data(get_sched().get_tasks(**decode_request(request))) @app.route('/peek_ready_tasks', methods=['POST']) def peek_ready_tasks(): - return encode_data(g.scheduler.peek_ready_tasks(**decode_request(request))) + return encode_data(get_sched().peek_ready_tasks(**decode_request(request))) @app.route('/grab_ready_tasks', methods=['POST']) def grab_ready_tasks(): - return encode_data(g.scheduler.grab_ready_tasks(**decode_request(request))) + return encode_data(get_sched().grab_ready_tasks(**decode_request(request))) @app.route('/schedule_task_run', methods=['POST']) def schedule_task_run(): - return encode_data(g.scheduler.schedule_task_run( + return encode_data(get_sched().schedule_task_run( **decode_request(request))) @app.route('/mass_schedule_task_runs', methods=['POST']) def mass_schedule_task_runs(): return encode_data( - g.scheduler.mass_schedule_task_runs(**decode_request(request))) + get_sched().mass_schedule_task_runs(**decode_request(request))) @app.route('/start_task_run', methods=['POST']) def start_task_run(): - return encode_data(g.scheduler.start_task_run(**decode_request(request))) + return encode_data(get_sched().start_task_run(**decode_request(request))) @app.route('/end_task_run', methods=['POST']) def end_task_run(): - return encode_data(g.scheduler.end_task_run(**decode_request(request))) + return encode_data(get_sched().end_task_run(**decode_request(request))) @app.route('/filter_task_to_archive', methods=['POST']) def filter_task_to_archive(): return encode_data( - g.scheduler.filter_task_to_archive(**decode_request(request))) + get_sched().filter_task_to_archive(**decode_request(request))) @app.route('/delete_archived_tasks', methods=['POST']) def delete_archived_tasks(): return encode_data( - g.scheduler.delete_archived_tasks(**decode_request(request))) + get_sched().delete_archived_tasks(**decode_request(request))) def run_from_webserver(environ, start_response, config_path=DEFAULT_CONFIG_PATH): """Run the WSGI app from the webserver, loading the configuration.""" cfg = config.load_named_config(config_path, DEFAULT_CONFIG) app.config.update(cfg) handler = logging.StreamHandler() app.logger.addHandler(handler) return app(environ, start_response) @click.command() @click.argument('config-path', required=1) @click.option('--host', default='0.0.0.0', help="Host to run the scheduler server api") @click.option('--port', default=5008, type=click.INT, help="Binding port of the server") @click.option('--debug/--nodebug', default=True, help="Indicates if the server should run in debug mode") def launch(config_path, host, port, debug): app.config.update(config.read(config_path, DEFAULT_CONFIG)) app.run(host, port=port, debug=bool(debug)) if __name__ == '__main__': launch() diff --git a/swh/scheduler/backend.py b/swh/scheduler/backend.py index a288a5c..125fc23 100644 --- a/swh/scheduler/backend.py +++ b/swh/scheduler/backend.py @@ -1,493 +1,508 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime from functools import wraps import json import tempfile from arrow import Arrow, utcnow import psycopg2 import psycopg2.extras from psycopg2.extensions import AsIs from swh.core.config import SWHConfig def adapt_arrow(arrow): return AsIs("'%s'::timestamptz" % arrow.isoformat()) psycopg2.extensions.register_adapter(dict, psycopg2.extras.Json) psycopg2.extensions.register_adapter(Arrow, adapt_arrow) def autocommit(fn): @wraps(fn) def wrapped(self, *args, **kwargs): autocommit = False if 'cursor' not in kwargs or not kwargs['cursor']: autocommit = True kwargs['cursor'] = self.cursor() try: ret = fn(self, *args, **kwargs) except Exception: if autocommit: self.rollback() raise if autocommit: self.commit() return ret return wrapped -class SchedulerBackend(SWHConfig): - """Backend for the Software Heritage scheduling database. - - """ - CONFIG_BASE_FILENAME = 'scheduler' - DEFAULT_CONFIG = { - 'scheduling_db': ('str', 'dbname=softwareheritage-scheduler-dev'), - } - - def __init__(self, **override_config): - self.config = self.parse_config_file(global_config=False) - self.config.update(override_config) +class DbBackend: + """Mixin intended to be used within scheduling db backend classes - self.db = None - - self.reconnect() + cf. swh.scheduler.backend.SchedulerBackend, and + swh.scheduler.updater.backend.SchedulerUpdaterBackend + """ def reconnect(self): if not self.db or self.db.closed: self.db = psycopg2.connect( - dsn=self.config['scheduling_db'], + dsn=self.db_conn_dsn, cursor_factory=psycopg2.extras.RealDictCursor, ) def cursor(self): """Return a fresh cursor on the database, with auto-reconnection in case of failure """ cur = None # Get a fresh cursor and reconnect at most three times tries = 0 while True: tries += 1 try: cur = self.db.cursor() cur.execute('select 1') break except psycopg2.OperationalError: if tries < 3: self.reconnect() else: raise return cur def commit(self): """Commit a transaction""" self.db.commit() def rollback(self): """Rollback a transaction""" self.db.rollback() def close_connection(self): """Close db connection""" if self.db and not self.db.closed: self.db.close() + def _format_query(self, query, keys): + """Format a query with the given keys""" + + query_keys = ', '.join(keys) + placeholders = ', '.join(['%s'] * len(keys)) + + return query.format(keys=query_keys, placeholders=placeholders) + + def _format_multiquery(self, query, keys, values): + """Format a query with placeholders generated for multiple values""" + query_keys = ', '.join(keys) + placeholders = '), ('.join( + [', '.join(['%s'] * len(keys))] * len(values) + ) + ret_values = sum([[value[key] for key in keys] + for value in values], []) + + return ( + query.format(keys=query_keys, placeholders=placeholders), + ret_values, + ) + def copy_to(self, items, tblname, columns, cursor=None, item_cb=None): def escape(data): if data is None: return '' if isinstance(data, bytes): return '\\x%s' % binascii.hexlify(data).decode('ascii') elif isinstance(data, str): return '"%s"' % data.replace('"', '""') elif isinstance(data, (datetime.datetime, Arrow)): # We escape twice to make sure the string generated by # isoformat gets escaped return escape(data.isoformat()) elif isinstance(data, dict): return escape(json.dumps(data)) elif isinstance(data, list): return escape("{%s}" % ','.join(escape(d) for d in data)) elif isinstance(data, psycopg2.extras.Range): # We escape twice here too, so that we make sure # everything gets passed to copy properly return escape( '%s%s,%s%s' % ( '[' if data.lower_inc else '(', '-infinity' if data.lower_inf else escape(data.lower), 'infinity' if data.upper_inf else escape(data.upper), ']' if data.upper_inc else ')', ) ) else: # We don't escape here to make sure we pass literals properly return str(data) with tempfile.TemporaryFile('w+') as f: for d in items: if item_cb is not None: item_cb(d) line = [escape(d.get(k)) for k in columns] f.write(','.join(line)) f.write('\n') f.seek(0) cursor.copy_expert('COPY %s (%s) FROM STDIN CSV' % ( tblname, ', '.join(columns)), f) + +class SchedulerBackend(SWHConfig, DbBackend): + """Backend for the Software Heritage scheduling database. + + """ + CONFIG_BASE_FILENAME = 'scheduler' + DEFAULT_CONFIG = { + 'scheduling_db': ('str', 'dbname=softwareheritage-scheduler-dev'), + } + + def __init__(self, **override_config): + super().__init__() + self.config = self.parse_config_file(global_config=False) + self.config.update(override_config) + self.db = None + self.db_conn_dsn = self.config['scheduling_db'] + self.reconnect() + task_type_keys = [ 'type', 'description', 'backend_name', 'default_interval', 'min_interval', 'max_interval', 'backoff_factor', 'max_queue_length', 'num_retries', 'retry_delay', ] - def _format_query(self, query, keys): - """Format a query with the given keys""" - - query_keys = ', '.join(keys) - placeholders = ', '.join(['%s'] * len(keys)) - - return query.format(keys=query_keys, placeholders=placeholders) - - def _format_multiquery(self, query, keys, values): - """Format a query with placeholders generated for multiple values""" - query_keys = ', '.join(keys) - placeholders = '), ('.join( - [', '.join(['%s'] * len(keys))] * len(values) - ) - ret_values = sum([[value[key] for key in keys] - for value in values], []) - - return ( - query.format(keys=query_keys, placeholders=placeholders), - ret_values, - ) - @autocommit def create_task_type(self, task_type, cursor=None): """Create a new task type ready for scheduling. Args: task_type (dict): a dictionary with the following keys: - type (str): an identifier for the task type - description (str): a human-readable description of what the task does - backend_name (str): the name of the task in the job-scheduling backend - default_interval (datetime.timedelta): the default interval between two task runs - min_interval (datetime.timedelta): the minimum interval between two task runs - max_interval (datetime.timedelta): the maximum interval between two task runs - backoff_factor (float): the factor by which the interval changes at each run - max_queue_length (int): the maximum length of the task queue for this task type """ query = self._format_query( """insert into task_type ({keys}) values ({placeholders})""", self.task_type_keys, ) cursor.execute(query, [task_type[key] for key in self.task_type_keys]) @autocommit def get_task_type(self, task_type_name, cursor=None): """Retrieve the task type with id task_type_name""" query = self._format_query( "select {keys} from task_type where type=%s", self.task_type_keys, ) cursor.execute(query, (task_type_name,)) ret = cursor.fetchone() return ret @autocommit def get_task_types(self, cursor=None): query = self._format_query( "select {keys} from task_type", self.task_type_keys, ) cursor.execute(query) ret = cursor.fetchall() return ret task_create_keys = [ - 'type', 'arguments', 'next_run', 'policy', 'retries_left', + 'type', 'arguments', 'next_run', 'policy', 'retries_left', 'priority', ] task_keys = task_create_keys + ['id', 'current_interval', 'status'] @autocommit def create_tasks(self, tasks, cursor=None): """Create new tasks. Args: tasks (list): each task is a dictionary with the following keys: - type (str): the task type - arguments (dict): the arguments for the task runner, keys: - args (list of str): arguments - kwargs (dict str -> str): keyword arguments - next_run (datetime.datetime): the next scheduled run for the task Returns: a list of created tasks. """ cursor.execute('select swh_scheduler_mktemp_task()') self.copy_to(tasks, 'tmp_task', self.task_create_keys, cursor) query = self._format_query( 'select {keys} from swh_scheduler_create_tasks_from_temp()', self.task_keys, ) cursor.execute(query) return cursor.fetchall() @autocommit def set_status_tasks(self, task_ids, status='disabled', cursor=None): """Set the tasks' status whose ids are listed.""" query = "UPDATE task SET status = %s WHERE id IN %s" cursor.execute(query, (status, tuple(task_ids),)) return None @autocommit def disable_tasks(self, task_ids, cursor=None): """Disable the tasks whose ids are listed.""" return self.set_status_tasks(task_ids) @autocommit def get_tasks(self, task_ids, cursor=None): """Retrieve the info of tasks whose ids are listed.""" query = self._format_query('select {keys} from task where id in %s', self.task_keys) cursor.execute(query, (tuple(task_ids),)) return cursor.fetchall() @autocommit def peek_ready_tasks(self, task_type, timestamp=None, num_tasks=None, + num_tasks_priority=None, cursor=None): """Fetch the list of ready tasks Args: task_type (str): filtering task per their type timestamp (datetime.datetime): peek tasks that need to be executed before that timestamp - num_tasks (int): only peek at num_tasks tasks + num_tasks (int): only peek at num_tasks tasks (with no priority) + num_tasks_priority (int): only peek at num_tasks_priority + tasks (with priority) Returns: a list of tasks - """ + """ if timestamp is None: timestamp = utcnow() cursor.execute( - 'select * from swh_scheduler_peek_ready_tasks(%s, %s, %s)', - (task_type, timestamp, num_tasks) + '''select * from swh_scheduler_peek_ready_tasks( + %s, %s, %s :: bigint, %s :: bigint)''', + (task_type, timestamp, num_tasks, num_tasks_priority) ) return cursor.fetchall() @autocommit def grab_ready_tasks(self, task_type, timestamp=None, num_tasks=None, - cursor=None): + num_tasks_priority=None, cursor=None): """Fetch the list of ready tasks, and mark them as scheduled Args: task_type (str): filtering task per their type timestamp (datetime.datetime): grab tasks that need to be executed before that timestamp - num_tasks (int): only grab num_tasks tasks + num_tasks (int): only grab num_tasks tasks (with no priority) + num_tasks_priority (int): only grab oneshot num_tasks tasks (with + priorities) Returns: a list of tasks - """ + """ if timestamp is None: timestamp = utcnow() cursor.execute( - 'select * from swh_scheduler_grab_ready_tasks(%s, %s, %s)', - (task_type, timestamp, num_tasks) + '''select * from swh_scheduler_grab_ready_tasks( + %s, %s, %s :: bigint, %s :: bigint)''', + (task_type, timestamp, num_tasks, num_tasks_priority) ) return cursor.fetchall() task_run_create_keys = ['task', 'backend_id', 'scheduled', 'metadata'] @autocommit def schedule_task_run(self, task_id, backend_id, metadata=None, timestamp=None, cursor=None): """Mark a given task as scheduled, adding a task_run entry in the database. Args: task_id (int): the identifier for the task being scheduled backend_id (str): the identifier of the job in the backend metadata (dict): metadata to add to the task_run entry timestamp (datetime.datetime): the instant the event occurred Returns: a fresh task_run entry """ if metadata is None: metadata = {} if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_schedule_task_run(%s, %s, %s, %s)', (task_id, backend_id, metadata, timestamp) ) return cursor.fetchone() @autocommit def mass_schedule_task_runs(self, task_runs, cursor=None): """Schedule a bunch of task runs. Args: task_runs (list): a list of dicts with keys: - task (int): the identifier for the task being scheduled - backend_id (str): the identifier of the job in the backend - metadata (dict): metadata to add to the task_run entry - scheduled (datetime.datetime): the instant the event occurred Returns: None """ cursor.execute('select swh_scheduler_mktemp_task_run()') self.copy_to(task_runs, 'tmp_task_run', self.task_run_create_keys, cursor) cursor.execute('select swh_scheduler_schedule_task_run_from_temp()') @autocommit def start_task_run(self, backend_id, metadata=None, timestamp=None, cursor=None): """Mark a given task as started, updating the corresponding task_run entry in the database. Args: backend_id (str): the identifier of the job in the backend metadata (dict): metadata to add to the task_run entry timestamp (datetime.datetime): the instant the event occurred Returns: the updated task_run entry """ if metadata is None: metadata = {} if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_start_task_run(%s, %s, %s)', (backend_id, metadata, timestamp) ) return cursor.fetchone() @autocommit def end_task_run(self, backend_id, status, metadata=None, timestamp=None, result=None, cursor=None): """Mark a given task as ended, updating the corresponding task_run entry in the database. Args: backend_id (str): the identifier of the job in the backend status (str): how the task ended; one of: 'eventful', 'uneventful', 'failed' metadata (dict): metadata to add to the task_run entry timestamp (datetime.datetime): the instant the event occurred Returns: the updated task_run entry """ if metadata is None: metadata = {} if timestamp is None: timestamp = utcnow() cursor.execute( 'select * from swh_scheduler_end_task_run(%s, %s, %s, %s)', (backend_id, status, metadata, timestamp) ) return cursor.fetchone() @autocommit def filter_task_to_archive(self, after_ts, before_ts, limit=10, last_id=-1, cursor=None): """Returns the list of task/task_run prior to a given date to archive. """ last_task_run_id = None while True: row = None cursor.execute( "select * from swh_scheduler_task_to_archive(%s, %s, %s, %s)", (after_ts, before_ts, last_id, limit) ) for row in cursor: # nested type index does not accept bare values # transform it as a dict to comply with this row['arguments']['args'] = { i: v for i, v in enumerate(row['arguments']['args']) } kwargs = row['arguments']['kwargs'] row['arguments']['kwargs'] = json.dumps(kwargs) yield row if not row: break _id = row.get('task_id') _task_run_id = row.get('task_run_id') if last_id == _id and last_task_run_id == _task_run_id: break last_id = _id last_task_run_id = _task_run_id @autocommit def delete_archived_tasks(self, task_ids, cursor=None): """Delete archived tasks as much as possible. Only the task_ids whose complete associated task_run have been cleaned up will be. """ _task_ids = _task_run_ids = [] for task_id in task_ids: _task_ids.append(task_id['task_id']) _task_run_ids.append(task_id['task_run_id']) cursor.execute( "select * from swh_scheduler_delete_archived_tasks(%s, %s)", (_task_ids, _task_run_ids)) diff --git a/swh/scheduler/celery_backend/listener.py b/swh/scheduler/celery_backend/listener.py index e497afa..cb70aea 100644 --- a/swh/scheduler/celery_backend/listener.py +++ b/swh/scheduler/celery_backend/listener.py @@ -1,178 +1,181 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import click import datetime import socket from arrow import utcnow from kombu import Queue from celery.events import EventReceiver from swh.scheduler import get_scheduler from .config import app as main_app class ReliableEventReceiver(EventReceiver): def __init__(self, channel, handlers=None, routing_key='#', node_id=None, app=None, queue_prefix='celeryev', accept=None): super(ReliableEventReceiver, self).__init__( channel, handlers, routing_key, node_id, app, queue_prefix, accept) self.queue = Queue('.'.join([self.queue_prefix, self.node_id]), exchange=self.exchange, routing_key=self.routing_key, auto_delete=False, durable=True, queue_arguments=self._get_queue_arguments()) def get_consumers(self, consumer, channel): return [consumer(queues=[self.queue], callbacks=[self._receive], no_ack=False, accept=self.accept)] def _receive(self, body, message): type, body = self.event_from_message(body) self.process(type, body, message) def process(self, type, event, message): """Process the received event by dispatching it to the appropriate handler.""" handler = self.handlers.get(type) or self.handlers.get('*') handler and handler(event, message) ACTION_SEND_DELAY = datetime.timedelta(seconds=1.0) ACTION_QUEUE_MAX_LENGTH = 1000 def event_monitor(app, backend): actions = { 'last_send': utcnow() - 2*ACTION_SEND_DELAY, 'queue': [], } def try_perform_actions(actions=actions): if not actions['queue']: return if utcnow() - actions['last_send'] > ACTION_SEND_DELAY or \ len(actions['queue']) > ACTION_QUEUE_MAX_LENGTH: perform_actions(actions) def perform_actions(actions, backend=backend): action_map = { 'start_task_run': backend.start_task_run, 'end_task_run': backend.end_task_run, } messages = [] cursor = backend.cursor() for action in actions['queue']: messages.append(action['message']) function = action_map[action['action']] args = action.get('args', ()) kwargs = action.get('kwargs', {}) kwargs['cursor'] = cursor function(*args, **kwargs) backend.commit() for message in messages: message.ack() actions['queue'] = [] actions['last_send'] = utcnow() def queue_action(action, actions=actions): actions['queue'].append(action) try_perform_actions() def catchall_event(event, message): message.ack() try_perform_actions() def task_started(event, message): queue_action({ 'action': 'start_task_run', 'args': [event['uuid']], 'kwargs': { 'timestamp': utcnow(), 'metadata': { 'worker': event['hostname'], }, }, 'message': message, }) def task_succeeded(event, message): result = event['result'] try: status = result.get('status') if status == 'success': status = 'eventful' if result.get('eventful') else 'uneventful' except Exception: status = 'eventful' if result else 'uneventful' queue_action({ 'action': 'end_task_run', 'args': [event['uuid']], 'kwargs': { 'timestamp': utcnow(), 'status': status, 'result': result, }, 'message': message, }) def task_failed(event, message): queue_action({ 'action': 'end_task_run', 'args': [event['uuid']], 'kwargs': { 'timestamp': utcnow(), 'status': 'failed', }, 'message': message, }) recv = ReliableEventReceiver( main_app.connection(), app=main_app, handlers={ 'task-started': task_started, 'task-result': task_succeeded, 'task-failed': task_failed, '*': catchall_event, }, node_id='listener-%s' % socket.gethostname(), ) recv.capture(limit=None, timeout=None, wakeup=True) @click.command() @click.option('--cls', '-c', default='local', help="Scheduler's class, default to 'local'") @click.option( - '--database', '-d', help='Scheduling database DSN', - default='host=db.internal.softwareheritage.org ' - 'dbname=softwareheritage-scheduler user=guest') -@click.option('--url', '-u', default='http://localhost:5008', + '--database', '-d', help='Scheduling database DSN') +@click.option('--url', '-u', help="(Optional) Scheduler's url access") def main(cls, database, url): scheduler = None + override_config = {} if cls == 'local': - scheduler = get_scheduler(cls, args={'scheduling_db': database}) + if database: + override_config = {'scheduling_db': database} + scheduler = get_scheduler(cls, args=override_config) elif cls == 'remote': - scheduler = get_scheduler(cls, args={'url': url}) + if url: + override_config = {'url': url} + scheduler = get_scheduler(cls, args=override_config) if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') event_monitor(main_app, backend=scheduler) if __name__ == '__main__': main() diff --git a/swh/scheduler/celery_backend/runner.py b/swh/scheduler/celery_backend/runner.py index 6e271b5..daa5772 100644 --- a/swh/scheduler/celery_backend/runner.py +++ b/swh/scheduler/celery_backend/runner.py @@ -1,84 +1,95 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import time import arrow from celery import group from swh.scheduler import get_scheduler from .config import app as main_app # Max batch size for tasks MAX_NUM_TASKS = 10000 +# Percentage of tasks with priority to schedule +PRIORITY_SLOT = 0.6 def run_ready_tasks(backend, app): - """Run all tasks that are ready""" + """Run tasks that are ready + Args + backend (Scheduler): backend to read tasks to schedule + app (App): Celery application to send tasks to + + """ while True: throttled = False cursor = backend.cursor() task_types = {} pending_tasks = [] for task_type in backend.get_task_types(cursor=cursor): task_type_name = task_type['type'] task_types[task_type_name] = task_type max_queue_length = task_type['max_queue_length'] if max_queue_length: backend_name = task_type['backend_name'] queue_name = app.tasks[backend_name].task_queue queue_length = app.get_queue_length(queue_name) num_tasks = min(max_queue_length - queue_length, MAX_NUM_TASKS) else: num_tasks = MAX_NUM_TASKS if num_tasks > 0: + num_tasks_priority = PRIORITY_SLOT * num_tasks + num_tasks = (1 - PRIORITY_SLOT) * num_tasks pending_tasks.extend( - backend.grab_ready_tasks(task_type_name, - num_tasks=num_tasks, - cursor=cursor)) + backend.grab_ready_tasks( + task_type_name, + num_tasks=num_tasks, + num_tasks_priority=num_tasks_priority, + cursor=cursor)) if not pending_tasks: break celery_tasks = [] for task in pending_tasks: args = task['arguments']['args'] kwargs = task['arguments']['kwargs'] celery_task = app.tasks[ task_types[task['type']]['backend_name'] ].s(*args, **kwargs) celery_tasks.append(celery_task) group_result = group(celery_tasks).delay() backend_tasks = [{ 'task': task['id'], 'backend_id': group_result.results[i].id, 'scheduled': arrow.utcnow(), } for i, task in enumerate(pending_tasks)] backend.mass_schedule_task_runs(backend_tasks, cursor=cursor) backend.commit() if throttled: time.sleep(10) if __name__ == '__main__': for module in main_app.conf.CELERY_IMPORTS: __import__(module) main_backend = get_scheduler('local') try: run_ready_tasks(main_backend, main_app) except Exception: main_backend.rollback() raise diff --git a/swh/scheduler/cli.py b/swh/scheduler/cli.py index 69bb124..ee7dc68 100644 --- a/swh/scheduler/cli.py +++ b/swh/scheduler/cli.py @@ -1,290 +1,293 @@ # Copyright (C) 2016-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import arrow import click import csv import itertools import json import locale import logging from swh.core import utils from .backend_es import SWHElasticSearchClient locale.setlocale(locale.LC_ALL, '') ARROW_LOCALE = locale.getlocale(locale.LC_TIME)[0] class DateTimeType(click.ParamType): name = 'time and date' def convert(self, value, param, ctx): if not isinstance(value, arrow.Arrow): value = arrow.get(value) return value DATETIME = DateTimeType() CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) def pretty_print_list(list, indent): """Pretty-print a list""" return ''.join('%s%s\n' % (' ' * indent, item) for item in list) def pretty_print_dict(dict, indent): """Pretty-print a list""" return ''.join('%s%s: %s\n' % (' ' * indent, click.style(key, bold=True), value) for key, value in dict.items()) def pretty_print_task(task): """Pretty-print a task""" next_run = arrow.get(task['next_run']) lines = [ '%s %s\n' % (click.style('Task', bold=True), task['id']), click.style(' Next run: ', bold=True), "%s (%s)" % (next_run.humanize(locale=ARROW_LOCALE), next_run.format()), '\n', click.style(' Interval: ', bold=True), str(task['current_interval']), '\n', click.style(' Type: ', bold=True), task['type'], '\n', click.style(' Args:\n', bold=True), pretty_print_list(task['arguments']['args'], indent=4), click.style(' Keyword args:\n', bold=True), pretty_print_dict(task['arguments']['kwargs'], indent=4), ] return ''.join(lines) @click.group(context_settings=CONTEXT_SETTINGS) @click.option('--cls', '-c', default='local', help="Scheduler's class, default to 'local'") @click.option('--database', '-d', - default='host=db.internal.softwareheritage.org ' - 'dbname=softwareheritage-scheduler user=guest', help='Scheduling database DSN') -@click.option('--url', '-u', default='http://localhost:5008', +@click.option('--url', '-u', help="(Optional) Scheduler's url access") @click.pass_context def cli(ctx, cls, database, url): """Software Heritage Scheduler CLI interface Default to use the the local scheduler instance (plugged to the main scheduler db). """ scheduler = None + override_config = {} from . import get_scheduler if cls == 'local': - scheduler = get_scheduler(cls, args={'scheduling_db': database}) + if database: + override_config = {'scheduling_db': database} + scheduler = get_scheduler(cls, args=override_config) elif cls == 'remote': - scheduler = get_scheduler(cls, args={'url': url}) + if url: + override_config = {'url': url} + scheduler = get_scheduler(cls, args=override_config) if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') ctx.obj = scheduler @cli.group('task') @click.pass_context def task(ctx): """Manipulate tasks.""" pass @task.command('schedule') @click.option('--columns', '-c', multiple=True, default=['type', 'args', 'kwargs', 'next_run'], type=click.Choice([ 'type', 'args', 'kwargs', 'policy', 'next_run']), help='columns present in the CSV file') @click.option('--delimiter', '-d', default=',') @click.argument('file', type=click.File(encoding='utf-8')) @click.pass_context def schedule_tasks(ctx, columns, delimiter, file): """Schedule tasks from a CSV input file. The following columns are expected, and can be set through the -c option: - type: the type of the task to be scheduled (mandatory) - args: the arguments passed to the task (JSON list, defaults to an empty list) - kwargs: the keyword arguments passed to the task (JSON object, defaults to an empty dict) - next_run: the date at which the task should run (datetime, defaults to now) The CSV can be read either from a named file, or from stdin (use - as filename). Use sample: cat scheduling-task.txt | \ python3 -m swh.scheduler.cli \ --database 'service=swh-scheduler-dev' \ task schedule \ --columns type --columns kwargs --columns policy \ --delimiter ';' - """ tasks = [] now = arrow.utcnow() reader = csv.reader(file, delimiter=delimiter) for line in reader: task = dict(zip(columns, line)) args = json.loads(task.pop('args', '[]')) kwargs = json.loads(task.pop('kwargs', '{}')) task['arguments'] = { 'args': args, 'kwargs': kwargs, } task['next_run'] = DATETIME.convert(task.get('next_run', now), None, None) tasks.append(task) created = ctx.obj.create_tasks(tasks) output = [ 'Created %d tasks\n' % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo_via_pager('\n'.join(output)) @task.command('list-pending') @click.option('--task-type', '-t', required=True, help='The tasks\' type concerned by the listing') @click.option('--limit', '-l', required=False, type=click.INT, help='The maximum number of tasks to fetch') @click.option('--before', '-b', required=False, type=DATETIME, help='List all jobs supposed to run before the given date') @click.pass_context def list_pending_tasks(ctx, task_type, limit, before): """List the tasks that are going to be run. You can override the number of tasks to fetch """ pending = ctx.obj.peek_ready_tasks(task_type, timestamp=before, num_tasks=limit) output = [ 'Found %d tasks\n' % len(pending) ] for task in pending: output.append(pretty_print_task(task)) click.echo_via_pager('\n'.join(output)) @task.command('archive') @click.option('--before', '-b', default=None, help='''Task whose ended date is anterior will be archived. Default to current month's first day.''') @click.option('--after', '-a', default=None, help='''Task whose ended date is after the specified date will be archived. Default to prior month's first day.''') @click.option('--batch-index', default=1000, type=click.INT, help='Batch size of tasks to read from db to archive') @click.option('--bulk-index', default=200, type=click.INT, help='Batch size of tasks to bulk index') @click.option('--batch-clean', default=1000, type=click.INT, help='Batch size of task to clean after archival') @click.option('--dry-run/--no-dry-run', is_flag=True, default=False, help='Default to list only what would be archived.') @click.option('--verbose', is_flag=True, default=False, help='Default to list only what would be archived.') @click.option('--cleanup/--no-cleanup', is_flag=True, default=True, help='Clean up archived tasks (default)') @click.option('--start-from', type=click.INT, default=-1, help='(Optional) default task id to start from. Default is -1.') @click.pass_context def archive_tasks(ctx, before, after, batch_index, bulk_index, batch_clean, dry_run, verbose, cleanup, start_from): """Archive task/task_run whose (task_type is 'oneshot' and task_status is 'completed') or (task_type is 'recurring' and task_status is 'disabled'). With --dry-run flag set (default), only list those. """ es_client = SWHElasticSearchClient() logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) log = logging.getLogger('swh.scheduler.cli.archive') logging.getLogger('urllib3').setLevel(logging.WARN) logging.getLogger('elasticsearch').setLevel(logging.WARN) if dry_run: log.info('**DRY-RUN** (only reading db)') if not cleanup: log.info('**NO CLEANUP**') now = arrow.utcnow() # Default to archive all tasks prior to now's last month if not before: before = now.format('YYYY-MM-01') if not after: after = now.shift(months=-1).format('YYYY-MM-01') log.debug('index: %s; cleanup: %s; period: [%s ; %s]' % ( not dry_run, not dry_run and cleanup, after, before)) def group_by_index_name(data, es_client=es_client): ended = data['ended'] return es_client.compute_index_name(ended.year, ended.month) def index_data(before, last_id, batch_index, backend=ctx.obj): tasks_in = backend.filter_task_to_archive( after, before, last_id=last_id, limit=batch_index) for index_name, tasks_group in itertools.groupby( tasks_in, key=group_by_index_name): log.debug('Send for indexation to index %s' % index_name) if dry_run: for task in tasks_group: yield task continue yield from es_client.streaming_bulk( index_name, tasks_group, source=['task_id', 'task_run_id'], chunk_size=bulk_index, log=log) gen = index_data(before, last_id=start_from, batch_index=batch_index) if cleanup: for task_ids in utils.grouper(gen, n=batch_clean): task_ids = list(task_ids) log.info('Clean up %s tasks: [%s, ...]' % ( len(task_ids), task_ids[0])) if dry_run: # no clean up continue ctx.obj.delete_archived_tasks(task_ids) else: for task_ids in utils.grouper(gen, n=batch_index): task_ids = list(task_ids) log.info('Indexed %s tasks: [%s, ...]' % ( len(task_ids), task_ids[0])) @cli.group('task-run') @click.pass_context def task_run(ctx): """Manipulate task runs.""" pass if __name__ == '__main__': cli() diff --git a/swh/scheduler/tests/test_scheduler.py b/swh/scheduler/tests/test_scheduler.py index 43edfc5..e60facb 100644 --- a/swh/scheduler/tests/test_scheduler.py +++ b/swh/scheduler/tests/test_scheduler.py @@ -1,358 +1,474 @@ # Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import datetime import os import random import unittest import uuid from arrow import utcnow from collections import defaultdict from nose.plugins.attrib import attr from nose.tools import istest import psycopg2 from swh.core.tests.db_testing import SingleDbTestFixture from swh.scheduler import get_scheduler TEST_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(TEST_DIR, '../../../../swh-storage-testdata') @attr('db') class CommonSchedulerTest(SingleDbTestFixture): TEST_DB_NAME = 'softwareheritage-scheduler-test' TEST_DB_DUMP = os.path.join(TEST_DATA_DIR, 'dumps/swh-scheduler.dump') def setUp(self): super().setUp() tt = { 'type': 'update-git', 'description': 'Update a git repository', 'backend_name': 'swh.loader.git.tasks.UpdateGitRepository', 'default_interval': datetime.timedelta(days=64), 'min_interval': datetime.timedelta(hours=12), 'max_interval': datetime.timedelta(days=64), 'backoff_factor': 2, 'max_queue_length': None, 'num_retries': 7, 'retry_delay': datetime.timedelta(hours=2), } tt2 = tt.copy() tt2['type'] = 'update-hg' tt2['description'] = 'Update a mercurial repository' tt2['backend_name'] = 'swh.loader.mercurial.tasks.UpdateHgRepository' tt2['max_queue_length'] = 42 tt2['num_retries'] = None tt2['retry_delay'] = None self.task_types = { tt['type']: tt, tt2['type']: tt2, } self.task1_template = t1_template = { 'type': tt['type'], 'arguments': { 'args': [], 'kwargs': {}, }, 'next_run': None, } self.task2_template = t2_template = copy.deepcopy(t1_template) t2_template['type'] = tt2['type'] t2_template['policy'] = 'oneshot' def tearDown(self): self.backend.close_connection() self.empty_tables() super().tearDown() - def empty_tables(self): - self.cursor.execute("""SELECT table_name FROM information_schema.tables - WHERE table_schema = %s""", ('public',)) + def empty_tables(self, whitelist=["priority_ratio"]): + query = """SELECT table_name FROM information_schema.tables + WHERE table_schema = %%s and + table_name not in (%s) + """ % ','.join(map(lambda t: "'%s'" % t, whitelist)) + self.cursor.execute(query, ('public', )) tables = set(table for (table,) in self.cursor.fetchall()) for table in tables: self.cursor.execute('truncate table %s cascade' % table) self.conn.commit() @istest def add_task_type(self): tt, tt2 = self.task_types.values() self.backend.create_task_type(tt) self.assertEqual(tt, self.backend.get_task_type(tt['type'])) with self.assertRaisesRegex(psycopg2.IntegrityError, '\(type\)=\(%s\)' % tt['type']): self.backend.create_task_type(tt) self.backend.create_task_type(tt2) self.assertEqual(tt, self.backend.get_task_type(tt['type'])) self.assertEqual(tt2, self.backend.get_task_type(tt2['type'])) @istest def get_task_types(self): tt, tt2 = self.task_types.values() self.backend.create_task_type(tt) self.backend.create_task_type(tt2) self.assertCountEqual([tt2, tt], self.backend.get_task_types()) @staticmethod - def _task_from_template(template, next_run, *args, **kwargs): + def _task_from_template(template, next_run, priority, *args, **kwargs): ret = copy.deepcopy(template) ret['next_run'] = next_run + if priority: + ret['priority'] = priority if args: ret['arguments']['args'] = list(args) if kwargs: ret['arguments']['kwargs'] = kwargs return ret - def _tasks_from_template(self, template, max_timestamp, num): - return [ - self._task_from_template( + def _pop_priority(self, priorities): + if not priorities: + return None + for priority, remains in priorities.items(): + if remains > 0: + priorities[priority] = remains - 1 + return priority + return None + + def _tasks_from_template(self, template, max_timestamp, num, + num_priority=0, priorities=None): + if num_priority and priorities: + priorities = { + priority: ratio * num_priority + for priority, ratio in priorities.items() + } + + tasks = [] + for i in range(num + num_priority): + priority = self._pop_priority(priorities) + tasks.append(self._task_from_template( template, max_timestamp - datetime.timedelta(microseconds=i), + priority, 'argument-%03d' % i, **{'kwarg%03d' % i: 'bogus-kwarg'} - ) - for i in range(num) - ] + )) + return tasks def _create_task_types(self): for tt in self.task_types.values(): self.backend.create_task_type(tt) @istest def create_tasks(self): + priority_ratio = self._priority_ratio() self._create_task_types() - tasks = ( - self._tasks_from_template(self.task1_template, utcnow(), 100) - + self._tasks_from_template(self.task2_template, utcnow(), 100) - ) - ret = self.backend.create_tasks(tasks) + num_tasks_priority = 100 + tasks_1 = self._tasks_from_template(self.task1_template, utcnow(), 100) + tasks_2 = self._tasks_from_template( + self.task2_template, utcnow(), 100, + num_tasks_priority, priorities=priority_ratio) + tasks = tasks_1 + tasks_2 + # duplicate tasks are dropped at creation + ret = self.backend.create_tasks(tasks + tasks_1 + tasks_2) ids = set() + actual_priorities = defaultdict(int) + for task, orig_task in zip(ret, tasks): task = copy.deepcopy(task) task_type = self.task_types[orig_task['type']] self.assertNotIn(task['id'], ids) self.assertEqual(task['status'], 'next_run_not_scheduled') self.assertEqual(task['current_interval'], task_type['default_interval']) self.assertEqual(task['policy'], orig_task.get('policy', 'recurring')) + priority = task.get('priority') + if priority: + actual_priorities[priority] += 1 + self.assertEqual(task['retries_left'], task_type['num_retries'] or 0) ids.add(task['id']) del task['id'] del task['status'] del task['current_interval'] del task['retries_left'] if 'policy' not in orig_task: del task['policy'] - self.assertEqual(task, orig_task) + if 'priority' not in orig_task: + del task['priority'] + self.assertEqual(task, orig_task) + + self.assertEqual(dict(actual_priorities), { + priority: int(ratio * num_tasks_priority) + for priority, ratio in priority_ratio.items() + }) @istest - def peek_ready_tasks(self): + def peek_ready_tasks_no_priority(self): self._create_task_types() t = utcnow() task_type = self.task1_template['type'] tasks = self._tasks_from_template(self.task1_template, t, 100) random.shuffle(tasks) self.backend.create_tasks(tasks) ready_tasks = self.backend.peek_ready_tasks(task_type) self.assertEqual(len(ready_tasks), len(tasks)) for i in range(len(ready_tasks) - 1): self.assertLessEqual(ready_tasks[i]['next_run'], ready_tasks[i+1]['next_run']) # Only get the first few ready tasks limit = random.randrange(5, 5 + len(tasks)//2) ready_tasks_limited = self.backend.peek_ready_tasks( task_type, num_tasks=limit) self.assertEqual(len(ready_tasks_limited), limit) self.assertCountEqual(ready_tasks_limited, ready_tasks[:limit]) # Limit by timestamp max_ts = tasks[limit-1]['next_run'] ready_tasks_timestamped = self.backend.peek_ready_tasks( task_type, timestamp=max_ts) for ready_task in ready_tasks_timestamped: self.assertLessEqual(ready_task['next_run'], max_ts) # Make sure we get proper behavior for the first ready tasks self.assertCountEqual( ready_tasks[:len(ready_tasks_timestamped)], ready_tasks_timestamped, ) # Limit by both ready_tasks_both = self.backend.peek_ready_tasks( task_type, timestamp=max_ts, num_tasks=limit//3) self.assertLessEqual(len(ready_tasks_both), limit//3) for ready_task in ready_tasks_both: self.assertLessEqual(ready_task['next_run'], max_ts) self.assertIn(ready_task, ready_tasks[:limit//3]) + def _priority_ratio(self): + self.cursor.execute('select id, ratio from priority_ratio') + priority_ratio = {} + for row in self.cursor.fetchall(): + priority_ratio[row[0]] = row[1] + return priority_ratio + + @istest + def peek_ready_tasks_mixed_priorities(self): + priority_ratio = self._priority_ratio() + self._create_task_types() + t = utcnow() + task_type = self.task1_template['type'] + num_tasks_priority = 100 + num_tasks_no_priority = 100 + # Create tasks with and without priorities + tasks = self._tasks_from_template( + self.task1_template, t, + num=num_tasks_no_priority, + num_priority=num_tasks_priority, + priorities=priority_ratio) + + random.shuffle(tasks) + self.backend.create_tasks(tasks) + + # take all available tasks + ready_tasks = self.backend.peek_ready_tasks( + task_type) + + self.assertEqual(len(ready_tasks), len(tasks)) + self.assertEqual(num_tasks_priority + num_tasks_no_priority, + len(ready_tasks)) + + count_tasks_per_priority = defaultdict(int) + for task in ready_tasks: + priority = task.get('priority') + if priority: + count_tasks_per_priority[priority] += 1 + + self.assertEqual(dict(count_tasks_per_priority), { + priority: int(ratio * num_tasks_priority) + for priority, ratio in priority_ratio.items() + }) + + # Only get some ready tasks + num_tasks = random.randrange(5, 5 + num_tasks_no_priority//2) + num_tasks_priority = random.randrange(5, num_tasks_priority//2) + ready_tasks_limited = self.backend.peek_ready_tasks( + task_type, num_tasks=num_tasks, + num_tasks_priority=num_tasks_priority) + + count_tasks_per_priority = defaultdict(int) + for task in ready_tasks_limited: + priority = task.get('priority') + count_tasks_per_priority[priority] += 1 + + import math + for priority, ratio in priority_ratio.items(): + expected_count = math.ceil(ratio * num_tasks_priority) + actual_prio = count_tasks_per_priority[priority] + self.assertTrue( + actual_prio == expected_count or + actual_prio == expected_count + 1) + + self.assertEqual(count_tasks_per_priority[None], num_tasks) + @istest def grab_ready_tasks(self): + priority_ratio = self._priority_ratio() self._create_task_types() t = utcnow() task_type = self.task1_template['type'] - tasks = self._tasks_from_template(self.task1_template, t, 100) + num_tasks_priority = 100 + num_tasks_no_priority = 100 + # Create tasks with and without priorities + tasks = self._tasks_from_template( + self.task1_template, t, + num=num_tasks_no_priority, + num_priority=num_tasks_priority, + priorities=priority_ratio) random.shuffle(tasks) self.backend.create_tasks(tasks) first_ready_tasks = self.backend.peek_ready_tasks( - task_type, num_tasks=10) - grabbed_tasks = self.backend.grab_ready_tasks(task_type, num_tasks=10) + task_type, num_tasks=10, num_tasks_priority=10) + grabbed_tasks = self.backend.grab_ready_tasks( + task_type, num_tasks=10, num_tasks_priority=10) for peeked, grabbed in zip(first_ready_tasks, grabbed_tasks): self.assertEqual(peeked['status'], 'next_run_not_scheduled') del peeked['status'] self.assertEqual(grabbed['status'], 'next_run_scheduled') del grabbed['status'] self.assertEqual(peeked, grabbed) + self.assertEqual(peeked['priority'], grabbed['priority']) @istest def get_tasks(self): self._create_task_types() t = utcnow() tasks = self._tasks_from_template(self.task1_template, t, 100) tasks = self.backend.create_tasks(tasks) random.shuffle(tasks) while len(tasks) > 1: length = random.randrange(1, len(tasks)) cur_tasks = tasks[:length] tasks[:length] = [] ret = self.backend.get_tasks(task['id'] for task in cur_tasks) self.assertCountEqual(ret, cur_tasks) @istest def filter_task_to_archive(self): """Filtering only list disabled recurring or completed oneshot tasks """ self._create_task_types() _time = utcnow() recurring = self._tasks_from_template(self.task1_template, _time, 12) oneshots = self._tasks_from_template(self.task2_template, _time, 12) total_tasks = len(recurring) + len(oneshots) # simulate scheduling tasks pending_tasks = self.backend.create_tasks(recurring + oneshots) backend_tasks = [{ 'task': task['id'], 'backend_id': str(uuid.uuid4()), 'scheduled': utcnow(), } for task in pending_tasks] self.backend.mass_schedule_task_runs(backend_tasks) # we simulate the task are being done _tasks = [] for task in backend_tasks: t = self.backend.end_task_run( task['backend_id'], status='eventful') _tasks.append(t) # Randomly update task's status per policy status_per_policy = {'recurring': 0, 'oneshot': 0} status_choice = { # policy: [tuple (1-for-filtering, 'associated-status')] 'recurring': [(1, 'disabled'), (0, 'completed'), (0, 'next_run_not_scheduled')], 'oneshot': [(0, 'next_run_not_scheduled'), (0, 'disabled'), (1, 'completed')] } tasks_to_update = defaultdict(list) _task_ids = defaultdict(list) # randomize 'disabling' recurring task or 'complete' oneshot task for task in pending_tasks: policy = task['policy'] _task_ids[policy].append(task['id']) status = random.choice(status_choice[policy]) if status[0] != 1: continue # elected for filtering status_per_policy[policy] += status[0] tasks_to_update[policy].append(task['id']) self.backend.disable_tasks(tasks_to_update['recurring']) # hack: change the status to something else than completed self.backend.set_status_tasks( _task_ids['oneshot'], status='disabled') self.backend.set_status_tasks( tasks_to_update['oneshot'], status='completed') total_tasks_filtered = (status_per_policy['recurring'] + status_per_policy['oneshot']) # retrieve tasks to archive after = _time.shift(days=-1).format('YYYY-MM-DD') before = utcnow().shift(days=1).format('YYYY-MM-DD') tasks_to_archive = list(self.backend.filter_task_to_archive( after_ts=after, before_ts=before, limit=total_tasks)) self.assertEqual(len(tasks_to_archive), total_tasks_filtered) actual_filtered_per_status = {'recurring': 0, 'oneshot': 0} for task in tasks_to_archive: actual_filtered_per_status[task['task_policy']] += 1 self.assertEqual(actual_filtered_per_status, status_per_policy) @istest def delete_archived_tasks(self): self._create_task_types() _time = utcnow() recurring = self._tasks_from_template( self.task1_template, _time, 12) oneshots = self._tasks_from_template( self.task2_template, _time, 12) total_tasks = len(recurring) + len(oneshots) pending_tasks = self.backend.create_tasks(recurring + oneshots) backend_tasks = [{ 'task': task['id'], 'backend_id': str(uuid.uuid4()), 'scheduled': utcnow(), } for task in pending_tasks] self.backend.mass_schedule_task_runs(backend_tasks) _tasks = [] percent = random.randint(0, 100) # random election removal boundary for task in backend_tasks: t = self.backend.end_task_run( task['backend_id'], status='eventful') c = random.randint(0, 100) if c <= percent: _tasks.append({'task_id': t['task'], 'task_run_id': t['id']}) self.backend.delete_archived_tasks(_tasks) self.cursor.execute('select count(*) from task') tasks_count = self.cursor.fetchone() self.cursor.execute('select count(*) from task_run') tasks_run_count = self.cursor.fetchone() self.assertEqual(tasks_count[0], total_tasks - len(_tasks)) self.assertEqual(tasks_run_count[0], total_tasks - len(_tasks)) class LocalSchedulerTest(CommonSchedulerTest, unittest.TestCase): def setUp(self): super().setUp() self.config = {'scheduling_db': 'dbname=' + self.TEST_DB_NAME} self.backend = get_scheduler('local', self.config) diff --git a/swh/scheduler/tests/test_utils.py b/swh/scheduler/tests/test_utils.py new file mode 100644 index 0000000..a539cea --- /dev/null +++ b/swh/scheduler/tests/test_utils.py @@ -0,0 +1,60 @@ +# Copyright (C) 2017-2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import unittest + +from datetime import timezone +from nose.tools import istest +from unittest.mock import patch + +from swh.scheduler import utils + + +class UtilsTest(unittest.TestCase): + + @istest + @patch('swh.scheduler.utils.datetime') + def create_oneshot_task_dict_simple(self, mock_datetime): + mock_datetime.now.return_value = 'some-date' + + actual_task = utils.create_oneshot_task_dict('some-task-type') + + expected_task = { + 'policy': 'oneshot', + 'type': 'some-task-type', + 'next_run': 'some-date', + 'arguments': { + 'args': [], + 'kwargs': {}, + }, + 'priority': None, + } + + self.assertEqual(actual_task, expected_task) + mock_datetime.now.assert_called_once_with(tz=timezone.utc) + + @istest + @patch('swh.scheduler.utils.datetime') + def create_oneshot_task_dict_other_call(self, mock_datetime): + mock_datetime.now.return_value = 'some-other-date' + + actual_task = utils.create_oneshot_task_dict( + 'some-task-type', 'arg0', 'arg1', + priority='high', other_stuff='normal' + ) + + expected_task = { + 'policy': 'oneshot', + 'type': 'some-task-type', + 'next_run': 'some-other-date', + 'arguments': { + 'args': ('arg0', 'arg1'), + 'kwargs': {'other_stuff': 'normal'}, + }, + 'priority': 'high', + } + + self.assertEqual(actual_task, expected_task) + mock_datetime.now.assert_called_once_with(tz=timezone.utc) diff --git a/swh/scheduler/tests/updater/__init__.py b/swh/scheduler/tests/updater/__init__.py new file mode 100644 index 0000000..bd7747b --- /dev/null +++ b/swh/scheduler/tests/updater/__init__.py @@ -0,0 +1,53 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +from arrow import utcnow + +try: + from hypothesis.strategies import from_regex +except ImportError: + from hypothesis.strategies import text + + # Revert to using basic text generation + def from_regex(*args, **kwargs): + return text() + + +class UpdaterTestUtil: + """Mixin intended for event generation purposes + + """ + def _make_event(self, event_type, name, origin_type): + return { + 'type': event_type, + 'repo': { + 'name': name, + }, + 'created_at': utcnow(), + 'origin_type': origin_type, + } + + def _make_events(self, events): + for event_type, repo_name, origin_type in events: + yield self._make_event(event_type, repo_name, origin_type) + + def _make_incomplete_event(self, event_type, name, origin_type, + missing_data_key): + event = self._make_event(event_type, name, origin_type) + del event[missing_data_key] + return event + + def _make_incomplete_events(self, events): + for event_type, repo_name, origin_type, missing_data_key in events: + yield self._make_incomplete_event(event_type, repo_name, + origin_type, missing_data_key) + + def _make_simple_event(self, event_type, name, origin_type): + return { + 'type': event_type, + 'url': 'https://fakeurl/%s' % name, + 'origin_type': origin_type, + 'created_at': utcnow(), + } diff --git a/swh/scheduler/tests/updater/test_backend.py b/swh/scheduler/tests/updater/test_backend.py new file mode 100644 index 0000000..c141f6c --- /dev/null +++ b/swh/scheduler/tests/updater/test_backend.py @@ -0,0 +1,71 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import os +import unittest + +from arrow import utcnow +from nose.plugins.attrib import attr +from nose.tools import istest +from hypothesis import given +from hypothesis.strategies import sets + +from swh.core.tests.db_testing import SingleDbTestFixture +from swh.scheduler.updater.backend import SchedulerUpdaterBackend +from swh.scheduler.updater.events import SWHEvent + +from . import from_regex + + +TEST_DIR = os.path.dirname(os.path.abspath(__file__)) +TEST_DATA_DIR = os.path.join(TEST_DIR, '../../../../../swh-storage-testdata') + + +@attr('db') +class SchedulerUpdaterBackendTest(SingleDbTestFixture, unittest.TestCase): + TEST_DB_NAME = 'softwareheritage-scheduler-updater-test' + TEST_DB_DUMP = os.path.join(TEST_DATA_DIR, + 'dumps/swh-scheduler-updater.dump') + + def setUp(self): + super().setUp() + config = { + 'scheduling_updater_db': 'dbname=' + self.TEST_DB_NAME, + 'cache_read_limit': 1000, + } + self.backend = SchedulerUpdaterBackend(**config) + + def _empty_tables(self): + self.cursor.execute( + """SELECT table_name FROM information_schema.tables + WHERE table_schema = %s""", ('public', )) + tables = set(table for (table,) in self.cursor.fetchall()) + for table in tables: + self.cursor.execute('truncate table %s cascade' % table) + self.conn.commit() + + def tearDown(self): + self.backend.close_connection() + self._empty_tables() + super().tearDown() + + @istest + @given(sets( + from_regex( + r'^https://somewhere[.]org/[a-z0-9]{5,7}/[a-z0-9]{3,10}$'), + min_size=10, max_size=15)) + def cache_read(self, urls): + def gen_events(urls): + for url in urls: + yield SWHEvent({ + 'url': url, + 'type': 'create', + 'origin_type': 'git', + }) + + self.backend.cache_put(gen_events(urls)) + r = self.backend.cache_read(timestamp=utcnow()) + + self.assertNotEqual(r, []) diff --git a/swh/scheduler/tests/updater/test_consumer.py b/swh/scheduler/tests/updater/test_consumer.py new file mode 100644 index 0000000..0944e48 --- /dev/null +++ b/swh/scheduler/tests/updater/test_consumer.py @@ -0,0 +1,199 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import unittest + +from hypothesis import given +from hypothesis.strategies import sampled_from, lists, tuples, text + +from itertools import chain +from nose.tools import istest + +from . import UpdaterTestUtil, from_regex + +from swh.scheduler.updater.events import SWHEvent, LISTENED_EVENTS +from swh.scheduler.updater.consumer import UpdaterConsumer + + +class FakeSchedulerUpdaterBackend: + def __init__(self): + self.events = [] + + def cache_put(self, events): + self.events.append(events) + + +class FakeUpdaterConsumerBase(UpdaterConsumer): + def __init__(self, backend_class=FakeSchedulerUpdaterBackend): + super().__init__(backend_class=backend_class) + self.connection_opened = False + self.connection_closed = False + self.consume_called = False + self.has_events_called = False + + def open_connection(self): + self.connection_opened = True + + def close_connection(self): + self.connection_closed = True + + def convert_event(self, event): + pass + + +class FakeUpdaterConsumerRaise(FakeUpdaterConsumerBase): + def has_events(self): + self.has_events_called = True + return True + + def consume_events(self): + self.consume_called = True + raise ValueError('Broken stuff') + + +class UpdaterConsumerRaisingTest(unittest.TestCase): + def setUp(self): + self.updater = FakeUpdaterConsumerRaise() + + @istest + def running_raise(self): + """Raising during run should finish fine. + + """ + # given + self.assertEqual(self.updater.count, 0) + self.assertEqual(self.updater.seen_events, set()) + self.assertEqual(self.updater.events, []) + + # when + with self.assertRaisesRegex(ValueError, 'Broken stuff'): + self.updater.run() + + # then + self.assertEqual(self.updater.count, 0) + self.assertEqual(self.updater.seen_events, set()) + self.assertEqual(self.updater.events, []) + self.assertTrue(self.updater.connection_opened) + self.assertTrue(self.updater.has_events_called) + self.assertTrue(self.updater.connection_closed) + self.assertTrue(self.updater.consume_called) + + +class FakeUpdaterConsumerNoEvent(FakeUpdaterConsumerBase): + def has_events(self): + self.has_events_called = True + return False + + def consume_events(self): + self.consume_called = True + + +class UpdaterConsumerNoEventTest(unittest.TestCase): + def setUp(self): + self.updater = FakeUpdaterConsumerNoEvent() + + @istest + def running_does_not_consume(self): + """Run with no events should do just fine""" + # given + self.assertEqual(self.updater.count, 0) + self.assertEqual(self.updater.seen_events, set()) + self.assertEqual(self.updater.events, []) + + # when + self.updater.run() + + # then + self.assertEqual(self.updater.count, 0) + self.assertEqual(self.updater.seen_events, set()) + self.assertEqual(self.updater.events, []) + self.assertTrue(self.updater.connection_opened) + self.assertTrue(self.updater.has_events_called) + self.assertTrue(self.updater.connection_closed) + self.assertFalse(self.updater.consume_called) + + +EVENT_KEYS = ['type', 'repo', 'created_at', 'origin_type'] + + +class FakeUpdaterConsumer(FakeUpdaterConsumerBase): + def __init__(self, messages): + super().__init__() + self.messages = messages + self.debug = False + + def has_events(self): + self.has_events_called = True + return len(self.messages) > 0 + + def consume_events(self): + self.consume_called = True + for msg in self.messages: + yield msg + self.messages.pop() + + def convert_event(self, event, keys=EVENT_KEYS): + for k in keys: + v = event.get(k) + if v is None: + return None + + e = { + 'type': event['type'], + 'url': 'https://fake.url/%s' % event['repo']['name'], + 'last_seen': event['created_at'], + 'origin_type': event['origin_type'], + } + return SWHEvent(e) + + +class UpdaterConsumerWithEventTest(UpdaterTestUtil, unittest.TestCase): + @istest + @given(lists(tuples(sampled_from(LISTENED_EVENTS), # event type + from_regex(r'^[a-z0-9]{5,10}/[a-z0-9]{7,12}$'), # name + text()), # origin type + min_size=3, max_size=10), + lists(tuples(text(), # event type + from_regex(r'^[a-z0-9]{5,7}/[a-z0-9]{3,10}$'), # name + text()), # origin type + min_size=3, max_size=10), + lists(tuples(sampled_from(LISTENED_EVENTS), # event type + from_regex(r'^[a-z0-9]{5,10}/[a-z0-9]{7,12}$'), # name + text(), # origin type + sampled_from(EVENT_KEYS)), # keys to drop + min_size=3, max_size=10)) + def running(self, events, uninteresting_events, incomplete_events): + """Interesting events are written to cache, others are dropped + + """ + # given + ready_events = self._make_events(events) + ready_uninteresting_events = self._make_events(uninteresting_events) + ready_incomplete_events = self._make_incomplete_events( + incomplete_events) + + updater = FakeUpdaterConsumer(list(chain( + ready_events, ready_incomplete_events, + ready_uninteresting_events))) + + self.assertEqual(updater.count, 0) + self.assertEqual(updater.seen_events, set()) + self.assertEqual(updater.events, []) + + # when + updater.run() + + # then + self.assertEqual(updater.count, 0) + self.assertEqual(updater.seen_events, set()) + self.assertEqual(updater.events, []) + self.assertTrue(updater.connection_opened) + self.assertTrue(updater.has_events_called) + self.assertTrue(updater.connection_closed) + self.assertTrue(updater.consume_called) + + self.assertEqual(updater.messages, []) + # uninteresting or incomplete events are dropped + self.assertTrue(len(updater.backend.events), len(events)) diff --git a/swh/scheduler/tests/updater/test_events.py b/swh/scheduler/tests/updater/test_events.py new file mode 100644 index 0000000..cb7489e --- /dev/null +++ b/swh/scheduler/tests/updater/test_events.py @@ -0,0 +1,48 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import unittest + +from hypothesis import given +from hypothesis.strategies import text, sampled_from +from nose.tools import istest + +from swh.scheduler.updater.events import SWHEvent, LISTENED_EVENTS +from swh.scheduler.updater.ghtorrent import events + +from . import UpdaterTestUtil + + +def event_values_ko(): + return set(events['evt']).union( + set(events['ent'])).difference( + set(LISTENED_EVENTS)) + + +WRONG_EVENTS = sorted(list(event_values_ko())) + + +class EventTest(UpdaterTestUtil, unittest.TestCase): + @istest + @given(sampled_from(LISTENED_EVENTS), text(), text()) + def is_interesting_ok(self, event_type, name, origin_type): + evt = self._make_simple_event(event_type, name, origin_type) + self.assertTrue(SWHEvent(evt).is_interesting()) + + @istest + @given(text(), text(), text()) + def is_interested_with_noisy_event_should_be_ko( + self, event_type, name, origin_type): + if event_type in LISTENED_EVENTS: + # just in case something good is generated, skip it + return + evt = self._make_simple_event(event_type, name, origin_type) + self.assertFalse(SWHEvent(evt).is_interesting()) + + @istest + @given(sampled_from(WRONG_EVENTS), text(), text()) + def is_interesting_ko(self, event_type, name, origin_type): + evt = self._make_simple_event(event_type, name, origin_type) + self.assertFalse(SWHEvent(evt).is_interesting()) diff --git a/swh/scheduler/tests/updater/test_ghtorrent.py b/swh/scheduler/tests/updater/test_ghtorrent.py new file mode 100644 index 0000000..bfeecf2 --- /dev/null +++ b/swh/scheduler/tests/updater/test_ghtorrent.py @@ -0,0 +1,171 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import unittest + +from hypothesis import given +from hypothesis.strategies import sampled_from +from nose.tools import istest +from unittest.mock import patch + +from swh.scheduler.updater.events import SWHEvent +from swh.scheduler.updater.ghtorrent import ( + events, GHTorrentConsumer, INTERESTING_EVENT_KEYS) + +from . import from_regex, UpdaterTestUtil + + +def event_values(): + return set(events['evt']).union(set(events['ent'])) + + +def ghtorrentize_event_name(event_name): + return '%sEvent' % event_name.capitalize() + + +EVENT_TYPES = sorted([ghtorrentize_event_name(e) for e in event_values()]) + + +class FakeChannel: + """Fake Channel (virtual connection inside a connection) + + """ + def close(self): + self.close = True + + +class FakeConnection: + """Fake Rabbitmq connection for test purposes + + """ + def __init__(self, conn_string): + self._conn_string = conn_string + self._connect = False + self._release = False + self._channel = False + + def connect(self): + self._connect = True + return True + + def release(self): + self._connect = False + self._release = True + + def channel(self): + self._channel = True + return FakeChannel() + + +class GHTorrentConsumerTest(UpdaterTestUtil, unittest.TestCase): + def setUp(self): + self.fake_config = { + 'conn': { + 'url': 'amqp://u:p@https://somewhere:9807', + }, + 'debug': True, + 'batch_cache_write': 10, + 'rabbitmq_prefetch_read': 100, + } + + self.consumer = GHTorrentConsumer(self.fake_config, + _connection_class=FakeConnection) + + @istest + def test_init(self): + # given + # check init is ok + self.assertEqual(self.consumer.debug, + self.fake_config['debug']) + self.assertEqual(self.consumer.batch, + self.fake_config['batch_cache_write']) + self.assertEqual(self.consumer.prefetch_read, + self.fake_config['rabbitmq_prefetch_read']) + self.assertEqual(self.consumer.config, self.fake_config) + + @istest + def test_has_events(self): + self.assertTrue(self.consumer.has_events()) + + @istest + def test_connection(self): + # when + self.consumer.open_connection() + + # then + self.assertEqual(self.consumer.conn._conn_string, + self.fake_config['conn']['url']) + self.assertTrue(self.consumer.conn._connect) + self.assertFalse(self.consumer.conn._release) + + # when + self.consumer.close_connection() + + # then + self.assertFalse(self.consumer.conn._connect) + self.assertTrue(self.consumer.conn._release) + self.assertIsInstance(self.consumer.channel, FakeChannel) + + @istest + @given(sampled_from(EVENT_TYPES), + from_regex(r'^[a-z0-9]{5,7}/[a-z0-9]{3,10}$')) + def convert_event_ok(self, event_type, name): + input_event = self._make_event(event_type, name, 'git') + actual_event = self.consumer.convert_event(input_event) + + self.assertTrue(isinstance(actual_event, SWHEvent)) + + event = actual_event.get() + + expected_event = { + 'type': event_type.lower().rstrip('Event'), + 'url': 'https://github.com/%s' % name, + 'last_seen': input_event['created_at'], + 'cnt': 1, + 'origin_type': 'git', + } + self.assertEqual(event, expected_event) + + @istest + @given(sampled_from(EVENT_TYPES), + from_regex(r'^[a-z0-9]{5,7}/[a-z0-9]{3,10}$'), + sampled_from(INTERESTING_EVENT_KEYS)) + def convert_event_ko(self, event_type, name, missing_data_key): + input_event = self._make_incomplete_event( + event_type, name, 'git', missing_data_key) + + actual_converted_event = self.consumer.convert_event(input_event) + + self.assertIsNone(actual_converted_event) + + @patch('swh.scheduler.updater.ghtorrent.collect_replies') + @istest + def consume_events(self, mock_collect_replies): + # given + self.consumer.queue = 'fake-queue' # hack + self.consumer.open_connection() + + fake_events = [ + self._make_event('PushEvent', 'user/some-repo', 'git'), + self._make_event('PushEvent', 'user2/some-other-repo', 'git'), + ] + + mock_collect_replies.return_value = fake_events + + # when + actual_events = [] + for e in self.consumer.consume_events(): + actual_events.append(e) + + # then + self.assertEqual(fake_events, actual_events) + + mock_collect_replies.assert_called_once_with( + self.consumer.conn, + self.consumer.channel, + 'fake-queue', + no_ack=False, + limit=self.fake_config['rabbitmq_prefetch_read'] + ) diff --git a/swh/scheduler/tests/updater/test_writer.py b/swh/scheduler/tests/updater/test_writer.py new file mode 100644 index 0000000..2d98f28 --- /dev/null +++ b/swh/scheduler/tests/updater/test_writer.py @@ -0,0 +1,162 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import os +import unittest + + +from nose.plugins.attrib import attr +from nose.tools import istest + +from swh.core.tests.db_testing import DbTestFixture +from swh.scheduler.updater.events import SWHEvent +from swh.scheduler.updater.writer import UpdaterWriter +from swh.scheduler.updater.events import LISTENED_EVENTS + +from . import UpdaterTestUtil + + +TEST_DIR = os.path.dirname(os.path.abspath(__file__)) +TEST_DATA_DIR = os.path.join(TEST_DIR, '../../../../../swh-storage-testdata') + + +@attr('db') +class CommonSchedulerTest(DbTestFixture): + TEST_SCHED_DB = 'softwareheritage-scheduler-test' + TEST_SCHED_DUMP = os.path.join(TEST_DATA_DIR, + 'dumps/swh-scheduler.dump') + + TEST_SCHED_UPDATER_DB = 'softwareheritage-scheduler-updater-test' + TEST_SCHED_UPDATER_DUMP = os.path.join(TEST_DATA_DIR, + 'dumps/swh-scheduler-updater.dump') + + @classmethod + def setUpClass(cls): + cls.add_db(cls.TEST_SCHED_DB, cls.TEST_SCHED_DUMP) + cls.add_db(cls.TEST_SCHED_UPDATER_DB, cls.TEST_SCHED_UPDATER_DUMP) + super().setUpClass() + + def tearDown(self): + self.reset_db_tables(self.TEST_SCHED_UPDATER_DB) + self.reset_db_tables(self.TEST_SCHED_DB, + excluded=['task_type', 'priority_ratio']) + super().tearDown() + + +class UpdaterWriterTest(UpdaterTestUtil, CommonSchedulerTest, + unittest.TestCase): + def setUp(self): + super().setUp() + + config = { + 'scheduler': { + 'cls': 'local', + 'args': { + 'scheduling_db': 'dbname=softwareheritage-scheduler-test', + }, + }, + 'scheduler_updater': { + 'scheduling_updater_db': + 'dbname=softwareheritage-scheduler-updater-test', + 'cache_read_limit': 5, + }, + 'pause': 0.1, + 'verbose': False, + } + self.writer = UpdaterWriter(**config) + self.scheduler_backend = self.writer.scheduler_backend + self.scheduler_updater_backend = self.writer.scheduler_updater_backend + + def tearDown(self): + self.scheduler_backend.close_connection() + self.scheduler_updater_backend.close_connection() + super().tearDown() + + @istest + def run_ko(self): + """Only git tasks are supported for now, other types are dismissed. + + """ + ready_events = [ + SWHEvent( + self._make_simple_event(event_type, 'origin-%s' % i, + 'svn')) + for i, event_type in enumerate(LISTENED_EVENTS) + ] + + expected_length = len(ready_events) + + self.scheduler_updater_backend.cache_put(ready_events) + data = list(self.scheduler_updater_backend.cache_read()) + self.assertEqual(len(data), expected_length) + + r = self.scheduler_backend.peek_ready_tasks( + 'origin-update-git') + + # first read on an empty scheduling db results with nothing in it + self.assertEqual(len(r), 0) + + # Read from cache to scheduler db + self.writer.run() + + r = self.scheduler_backend.peek_ready_tasks( + 'origin-update-git') + + # other reads after writes are still empty since it's not supported + self.assertEqual(len(r), 0) + + @istest + def run_ok(self): + """Only git origin are supported for now + + """ + ready_events = [ + SWHEvent( + self._make_simple_event(event_type, 'origin-%s' % i, 'git')) + for i, event_type in enumerate(LISTENED_EVENTS) + ] + + expected_length = len(ready_events) + + self.scheduler_updater_backend.cache_put(ready_events) + + data = list(self.scheduler_updater_backend.cache_read()) + self.assertEqual(len(data), expected_length) + + r = self.scheduler_backend.peek_ready_tasks( + 'origin-update-git') + + # first read on an empty scheduling db results with nothing in it + self.assertEqual(len(r), 0) + + # Read from cache to scheduler db + self.writer.run() + + # now, we should have scheduling task ready + r = self.scheduler_backend.peek_ready_tasks( + 'origin-update-git') + + self.assertEquals(len(r), expected_length) + + # Check the task has been scheduled + for t in r: + self.assertEquals(t['type'], 'origin-update-git') + self.assertEquals(t['priority'], 'normal') + self.assertEquals(t['policy'], 'oneshot') + self.assertEquals(t['status'], 'next_run_not_scheduled') + + # writer has nothing to do now + self.writer.run() + + # so no more data in cache + data = list(self.scheduler_updater_backend.cache_read()) + + self.assertEqual(len(data), 0) + + # provided, no runner is ran, still the same amount of scheduling tasks + r = self.scheduler_backend.peek_ready_tasks( + 'origin-update-git') + + self.assertEquals(len(r), expected_length) diff --git a/swh/scheduler/updater/__init__.py b/swh/scheduler/updater/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/swh/scheduler/updater/backend.py b/swh/scheduler/updater/backend.py new file mode 100644 index 0000000..68b1318 --- /dev/null +++ b/swh/scheduler/updater/backend.py @@ -0,0 +1,82 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + + +from arrow import utcnow +from swh.core.config import SWHConfig +from swh.scheduler.backend import DbBackend, autocommit + + +class SchedulerUpdaterBackend(SWHConfig, DbBackend): + CONFIG_BASE_FILENAME = 'scheduler-updater' + DEFAULT_CONFIG = { + 'scheduling_updater_db': ( + 'str', 'dbname=softwareheritage-scheduler-updater-dev'), + 'cache_read_limit': ('int', 1000), + } + + def __init__(self, **override_config): + super().__init__() + if override_config: + self.config = override_config + else: + self.config = self.parse_config_file(global_config=False) + self.db = None + self.db_conn_dsn = self.config['scheduling_updater_db'] + self.limit = self.config['cache_read_limit'] + self.reconnect() + + cache_put_keys = ['url', 'cnt', 'last_seen', 'origin_type'] + + @autocommit + def cache_put(self, events, timestamp=None, cursor=None): + """Write new events in the backend. + + """ + if timestamp is None: + timestamp = utcnow() + + def prepare_events(events): + for e in events: + event = e.get() + seen = event['last_seen'] + if seen is None: + event['last_seen'] = timestamp + yield event + + cursor.execute('select swh_mktemp_cache()') + self.copy_to(prepare_events(events), + 'tmp_cache', self.cache_put_keys, cursor) + cursor.execute('select swh_cache_put()') + + cache_read_keys = ['id', 'url', 'origin_type', 'cnt', 'first_seen', + 'last_seen'] + + @autocommit + def cache_read(self, timestamp=None, limit=None, cursor=None): + """Read events from the cache prior to timestamp. + + """ + if not timestamp: + timestamp = utcnow() + + if not limit: + limit = self.limit + + q = self._format_query('select {keys} from swh_cache_read(%s, %s)', + self.cache_read_keys) + cursor.execute(q, (timestamp, limit)) + for r in cursor.fetchall(): + r['id'] = r['id'].tobytes() + yield r + + @autocommit + def cache_remove(self, entries, cursor=None): + """Clean events from the cache + + """ + q = 'delete from cache where url in (%s)' % ( + ', '.join(("'%s'" % e for e in entries)), ) + cursor.execute(q) diff --git a/swh/scheduler/updater/consumer.py b/swh/scheduler/updater/consumer.py new file mode 100644 index 0000000..e0a0760 --- /dev/null +++ b/swh/scheduler/updater/consumer.py @@ -0,0 +1,140 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import logging + +from abc import ABCMeta, abstractmethod + +from swh.scheduler.updater.backend import SchedulerUpdaterBackend + + +class UpdaterConsumer(metaclass=ABCMeta): + """Event consumer + + """ + def __init__(self, batch=1000, backend_class=SchedulerUpdaterBackend, + log_class='swh.scheduler.updater.consumer.UpdaterConsumer'): + super().__init__() + self._reset_cache() + self.backend = backend_class() + self.batch = batch + logging.basicConfig(level=logging.DEBUG) + self.log = logging.getLogger(log_class) + + def _reset_cache(self): + """Reset internal cache. + + """ + self.count = 0 + self.seen_events = set() + self.events = [] + + def is_interesting(self, event): + """Determine if an event is interesting or not. + + Args: + event (SWHEvent): SWH event + + """ + return event.is_interesting() + + @abstractmethod + def convert_event(self, event): + """Parse an event into an SWHEvent. + + """ + pass + + def process_event(self, event): + """Process converted and interesting event. + + Args: + event (SWHEvent): Event to process if deemed interesting + + """ + try: + if event.url in self.seen_events: + event.cnt += 1 + else: + self.events.append(event) + self.seen_events.add(event.url) + self.count += 1 + finally: + if self.count >= self.batch: + if self.events: + self.backend.cache_put(self.events) + self._reset_cache() + + def _flush(self): + """Flush remaining internal cache if any. + + """ + if self.events: + self.backend.cache_put(self.events) + self._reset_cache() + + @abstractmethod + def has_events(self): + """Determine if there remains events to consume. + + Returns + boolean value, true for remaining events, False otherwise + + """ + pass + + @abstractmethod + def consume_events(self): + """The main entry point to consume events. + + This should either yield or return message for consumption. + + """ + pass + + @abstractmethod + def open_connection(self): + """Open a connection to the remote system we are supposed to consume + from. + + """ + pass + + @abstractmethod + def close_connection(self): + """Close opened connection to the remote system. + + """ + pass + + def run(self): + """The main entry point to consume events. + + """ + try: + self.open_connection() + while self.has_events(): + for _event in self.consume_events(): + event = self.convert_event(_event) + if not event: + self.log.warn( + 'Incomplete event dropped %s' % _event) + continue + if not self.is_interesting(event): + continue + if self.debug: + self.log.debug('Event: %s' % event) + try: + self.process_event(event) + except Exception: + self.log.exception( + 'Problem when processing event %s' % _event) + continue + except Exception as e: + self.log.error('Error raised during consumption: %s' % e) + raise e + finally: + self.close_connection() + self._flush() diff --git a/swh/scheduler/updater/events.py b/swh/scheduler/updater/events.py new file mode 100644 index 0000000..d70efbe --- /dev/null +++ b/swh/scheduler/updater/events.py @@ -0,0 +1,39 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + + +LISTENED_EVENTS = [ + 'delete', + 'public', + 'push' +] + + +class SWHEvent: + """SWH's interesting event (resulting in an origin update) + + """ + def __init__(self, evt, cnt=1): + self.event = evt + self.type = evt['type'].lower() + self.url = evt['url'] + self.last_seen = evt.get('last_seen') + self.cnt = cnt + self.origin_type = evt.get('origin_type') + + def is_interesting(self): + return self.type in LISTENED_EVENTS + + def get(self): + return { + 'type': self.type, + 'url': self.url, + 'last_seen': self.last_seen, + 'cnt': self.cnt, + 'origin_type': self.origin_type + } + + def __str__(self): + return str(self.get()) diff --git a/swh/scheduler/updater/ghtorrent/__init__.py b/swh/scheduler/updater/ghtorrent/__init__.py new file mode 100644 index 0000000..2b9e5fc --- /dev/null +++ b/swh/scheduler/updater/ghtorrent/__init__.py @@ -0,0 +1,146 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import json + +from kombu import Connection, Exchange, Queue +from kombu.common import collect_replies + +from swh.core.config import SWHConfig +from swh.scheduler.updater.events import SWHEvent +from swh.scheduler.updater.consumer import UpdaterConsumer + + +events = { + # ghtorrent events related to github events (interesting) + 'evt': [ + 'commitcomment', 'create', 'delete', 'deployment', + 'deploymentstatus', 'download', 'follow', 'fork', 'forkapply', + 'gist', 'gollum', 'issuecomment', 'issues', 'member', + 'membership', 'pagebuild', 'public', 'pullrequest', + 'pullrequestreviewcomment', 'push', 'release', 'repository', + 'status', 'teamadd', 'watch' + ], + # ghtorrent events related to mongodb insert (not interesting) + 'ent': [ + 'commit_comments', 'commits', 'followers', 'forks', + 'geo_cache', 'issue_comments', 'issue_events', 'issues', + 'org_members', 'pull_request_comments', 'pull_requests', + 'repo_collaborators', 'repo_labels', 'repos', 'users', 'watchers' + ] +} + + +class RabbitMQConn(SWHConfig): + """RabbitMQ Connection class + + """ + CONFIG_BASE_FILENAME = 'backend/ghtorrent' + + DEFAULT_CONFIG = { + 'conn': ('dict', { + 'url': 'amqp://guest:guest@localhost:5672', + 'exchange_name': 'ght-streams', + 'routing_key': 'something', + 'queue_name': 'fake-events' + }) + } + + ADDITIONAL_CONFIG = {} + + def __init__(self, **config): + super().__init__() + if config and set(config.keys()) - {'log_class'} != set(): + self.config = config + else: + self.config = self.parse_config_file( + additional_configs=[self.ADDITIONAL_CONFIG]) + + self.conn_string = self.config['conn']['url'] + self.exchange = Exchange(self.config['conn']['exchange_name'], + 'topic', durable=True) + self.routing_key = self.config['conn']['routing_key'] + self.queue = Queue(self.config['conn']['queue_name'], + exchange=self.exchange, + routing_key=self.routing_key, + auto_delete=True) + + +INTERESTING_EVENT_KEYS = ['type', 'repo', 'created_at'] + + +class GHTorrentConsumer(RabbitMQConn, UpdaterConsumer): + """GHTorrent events consumer + + """ + ADDITIONAL_CONFIG = { + 'debug': ('bool', False), + 'batch_cache_write': ('int', 1000), + 'rabbitmq_prefetch_read': ('int', 100), + } + + def __init__(self, config=None, _connection_class=Connection): + if config is None: + super().__init__( + log_class='swh.scheduler.updater.ghtorrent.GHTorrentConsumer') + else: + self.config = config + self._connection_class = _connection_class + self.debug = self.config['debug'] + self.batch = self.config['batch_cache_write'] + self.prefetch_read = self.config['rabbitmq_prefetch_read'] + + def has_events(self): + """Always has events + + """ + return True + + def convert_event(self, event): + """Given ghtorrent event, convert it to a SWHEvent instance. + + """ + if isinstance(event, str): + event = json.loads(event) + + for k in INTERESTING_EVENT_KEYS: + if k not in event: + if hasattr(self, 'log'): + self.log.warn( + 'Event should have the \'%s\' entry defined' % k) + return None + + _type = event['type'].lower().rstrip('Event') + _repo_name = 'https://github.com/%s' % event['repo']['name'] + + return SWHEvent({ + 'type': _type, + 'url': _repo_name, + 'last_seen': event['created_at'], + 'origin_type': 'git', + }) + + def open_connection(self): + """Open rabbitmq connection + + """ + self.conn = self._connection_class(self.config['conn']['url']) + self.conn.connect() + self.channel = self.conn.channel() + + def close_connection(self): + """Close rabbitmq connection + + """ + self.channel.close() + self.conn.release() + + def consume_events(self): + """Consume and yield queue messages + + """ + yield from collect_replies( + self.conn, self.channel, self.queue, + no_ack=False, limit=self.prefetch_read) diff --git a/swh/scheduler/updater/ghtorrent/cli.py b/swh/scheduler/updater/ghtorrent/cli.py new file mode 100644 index 0000000..db05060 --- /dev/null +++ b/swh/scheduler/updater/ghtorrent/cli.py @@ -0,0 +1,28 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import click +import logging + +from swh.scheduler.updater.ghtorrent import GHTorrentConsumer + + +@click.command() +@click.option('--verbose/--no-verbose', '-v', default=False, + help='Verbose mode') +def main(verbose): + """Consume events from ghtorrent and write them to cache. + + """ + log = logging.getLogger('swh.scheduler.updater.ghtorrent.cli') + log.addHandler(logging.StreamHandler()) + _loglevel = logging.DEBUG if verbose else logging.INFO + log.setLevel(_loglevel) + + GHTorrentConsumer().run() + + +if __name__ == '__main__': + main() diff --git a/swh/scheduler/updater/ghtorrent/fake.py b/swh/scheduler/updater/ghtorrent/fake.py new file mode 100644 index 0000000..fc5f34f --- /dev/null +++ b/swh/scheduler/updater/ghtorrent/fake.py @@ -0,0 +1,82 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import random +import string + +from arrow import utcnow +from kombu import Connection + +from swh.scheduler.updater.ghtorrent import RabbitMQConn, events + + +class FakeRandomOriginGenerator: + def _random_string(self, length): + """Build a fake string of length length. + + """ + return ''.join([ + random.choice(string.ascii_letters + string.digits) + for n in range(length)]) + + def generate(self, user_range=range(5, 10), repo_range=range(10, 15)): + """Build a fake url + + """ + length_username = random.choice(user_range) + user = self._random_string(length_username) + length_repo = random.choice(repo_range) + repo = self._random_string(length_repo) + return '%s/%s' % (user, repo) + + +class FakeGHTorrentPublisher(RabbitMQConn): + """Fake GHTorrent that randomly publishes fake events. Those events + are published in similar manner as described by ghtorrent's + documentation [2]. + + context: stuck with raw ghtorrent so far [1] + + [1] https://github.com/ghtorrent/ghtorrent.org/issues/397#issuecomment-387052462 # noqa + [2] http://ghtorrent.org/streaming.html + + """ + + ADDITIONAL_CONFIG = { + 'nb_messages': ('int', 100) + } + + def __init__(self, **config): + super().__init__(**config) + self.fake_origin_generator = FakeRandomOriginGenerator() + self.nb_messages = self.config['nb_messages'] + + def _random_event(self): + """Create a fake and random event + + """ + event_type = random.choice(['evt', 'ent']) + sub_event = random.choice(events[event_type]) + return { + 'type': sub_event, + 'repo': { + 'name': self.fake_origin_generator.generate(), + }, + 'created_at': utcnow().isoformat() + + } + + def publish(self, nb_messages=None): + if not nb_messages: + nb_messages = self.nb_messages + + with Connection(self.config['conn']['url']) as conn: + with conn.Producer(serializer='json') as producer: + for n in range(nb_messages): + event = self._random_event() + producer.publish(event, + exchange=self.exchange, + routing_key=self.routing_key, + declare=[self.queue]) diff --git a/swh/scheduler/updater/writer.py b/swh/scheduler/updater/writer.py new file mode 100644 index 0000000..c744360 --- /dev/null +++ b/swh/scheduler/updater/writer.py @@ -0,0 +1,122 @@ +# Copyright (C) 2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import click +import logging +import time + +from arrow import utcnow + +from swh.core.config import SWHConfig +from swh.core import utils +from swh.scheduler import get_scheduler +from swh.scheduler.utils import create_oneshot_task_dict +from swh.scheduler.updater.backend import SchedulerUpdaterBackend + + +class UpdaterWriter(SWHConfig): + """Updater writer in charge of updating the scheduler db with latest + prioritized oneshot tasks + + In effect, this: + - reads the events from scheduler updater's db + - converts those events into priority oneshot tasks + - dumps them into the scheduler db + + """ + CONFIG_BASE_FILENAME = 'backend/scheduler-updater-writer' + DEFAULT_CONFIG = { + # access to the scheduler backend + 'scheduler': ('dict', { + 'cls': 'local', + 'args': { + 'scheduling_db': 'dbname=softwareheritage-scheduler-dev', + }, + }), + # access to the scheduler updater cache + 'scheduler_updater': ('dict', { + 'scheduling_updater_db': + 'dbname=softwareheritage-scheduler-updater-dev', + 'cache_read_limit': 1000, + }), + # waiting time between db reads + 'pause': ('int', 10), + # verbose or not + 'verbose': ('bool', False), + } + + def __init__(self, **config): + if config: + self.config = config + else: + self.config = self.parse_config_file() + + self.scheduler_updater_backend = SchedulerUpdaterBackend( + **self.config['scheduler_updater']) + self.scheduler_backend = get_scheduler(**self.config['scheduler']) + self.pause = self.config['pause'] + self.log = logging.getLogger( + 'swh.scheduler.updater.writer.UpdaterWriter') + self.log.setLevel( + logging.DEBUG if self.config['verbose'] else logging.INFO) + + def convert_to_oneshot_task(self, event): + """Given an event, convert it into oneshot task with priority + + Args: + event (dict): The event to convert to task + + """ + if event['origin_type'] == 'git': + return create_oneshot_task_dict( + 'origin-update-git', + event['url'], + priority='normal') + self.log.warn('Type %s is not supported for now, only git' % ( + event['origin_type'], )) + return None + + def write_event_to_scheduler(self, events): + """Write events to the scheduler and yield ids when done""" + # convert events to oneshot tasks + oneshot_tasks = filter(lambda e: e is not None, + map(self.convert_to_oneshot_task, events)) + # write event to scheduler + self.scheduler_backend.create_tasks(oneshot_tasks) + for e in events: + yield e['url'] + + def run(self): + """First retrieve events from cache (including origin_type, cnt), + then convert them into oneshot tasks with priority, then + write them to the scheduler db, at last remove them from + cache. + + """ + while True: + timestamp = utcnow() + events = list(self.scheduler_updater_backend.cache_read(timestamp)) + if not events: + break + for urls in utils.grouper(self.write_event_to_scheduler(events), + n=100): + self.scheduler_updater_backend.cache_remove(urls) + time.sleep(self.pause) + + +@click.command() +@click.option('--verbose/--no-verbose', '-v', default=False, + help='Verbose mode') +def main(verbose): + log = logging.getLogger('swh.scheduler.updater.writer') + log.addHandler(logging.StreamHandler()) + _loglevel = logging.DEBUG if verbose else logging.INFO + log.setLevel(_loglevel) + + UpdaterWriter().run() + + +if __name__ == '__main__': + main() diff --git a/swh/scheduler/utils.py b/swh/scheduler/utils.py index d95e0be..722feba 100644 --- a/swh/scheduler/utils.py +++ b/swh/scheduler/utils.py @@ -1,49 +1,53 @@ -# Copyright (C) 2017 The Software Heritage developers +# Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -import datetime +from datetime import datetime, timezone def get_task(task_name): """Retrieve task object in our application instance by its fully qualified python name. Args: task_name (str): task's name (e.g swh.loader.git.tasks.LoadDiskGitRepository) Returns: Instance of task """ from swh.scheduler.celery_backend.config import app for module in app.conf.CELERY_IMPORTS: __import__(module) return app.tasks[task_name] def create_oneshot_task_dict(type, *args, **kwargs): """Create a oneshot task scheduled for as soon as possible. Args: type (str): Type of oneshot task as per swh-scheduler's db table task_type's column (Ex: origin-update-git, swh-deposit-archive-checks) Returns: Expected dictionary for the one-shot task scheduling api (swh.scheduler.backend.create_tasks) """ + priority = None + if 'priority' in kwargs: + priority = kwargs.pop('priority') return { 'policy': 'oneshot', 'type': type, - 'next_run': datetime.datetime.now(tz=datetime.timezone.utc), + 'next_run': datetime.now(tz=timezone.utc), 'arguments': { 'args': args if args else [], 'kwargs': kwargs if kwargs else {}, - } + }, + 'priority': priority, } diff --git a/version.txt b/version.txt index be0983c..f20b482 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.26-0-gffd2dda \ No newline at end of file +v0.0.27-0-gcde2def \ No newline at end of file