diff --git a/MANIFEST.in b/MANIFEST.in index c31099e..a7e7d55 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,7 +1,6 @@ include README.md include Makefile -include requirements.txt -include requirements-swh.txt +include requirements*.txt include version.txt recursive-include swh/scheduler/sql *.sql recursive-include swh py.typed diff --git a/PKG-INFO b/PKG-INFO index 8802a82..e88bccd 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,28 +1,28 @@ Metadata-Version: 2.1 Name: swh.scheduler -Version: 0.0.64 +Version: 0.0.65 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-scheduler Description: swh-scheduler ============= Job scheduler for the Software Heritage project. Task manager for asynchronous/delayed tasks, used for both recurrent (e.g., listing a forge, loading new stuff from a Git repository) and one-off activities (e.g., loading a specific version of a source package). Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/debian/changelog b/debian/changelog index a4adc47..2f1be53 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,597 +1,608 @@ -swh-scheduler (0.0.64-1~swh1~bpo10+1) buster-swh; urgency=medium +swh-scheduler (0.0.65-1~swh2) unstable-swh; urgency=medium - * Rebuild for buster-swh + * Add pytest-mock build-dependency. - -- Software Heritage autobuilder (on jenkins-debian1) Wed, 20 Nov 2019 13:31:50 +0000 + -- Nicolas Dandrimont Fri, 13 Dec 2019 11:57:41 +0100 + +swh-scheduler (0.0.65-1~swh1) unstable-swh; urgency=medium + + * New upstream release 0.0.65 - (tagged by Nicolas Dandrimont + on 2019-12-13 11:45:55 +0100) + * Upstream changes: - Release swh.scheduler v0.0.65 - Drop the + scheduler updater - Add a statsd probe for task execution + timestamps - Add listener and runner statsd probes - CLI + updates - Python packaging housekeeping + + -- Software Heritage autobuilder (on jenkins-debian1) Fri, 13 Dec 2019 10:54:31 +0000 swh-scheduler (0.0.64-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.64 - (tagged by Antoine R. Dumont (@ardumont) on 2019-11-20 14:26:00 +0100) * Upstream changes: - v0.0.64 - req-swh*: Remove old package loader backend names -- Software Heritage autobuilder (on jenkins-debian1) Wed, 20 Nov 2019 13:29:37 +0000 swh-scheduler (0.0.63-1~swh2) unstable-swh; urgency=medium * Update build dependency -- Antoine R. Dumont (@ardumont) Tue, 19 Nov 2019 17:07:40 +0100 swh-scheduler (0.0.63-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.63 - (tagged by Antoine R. Dumont (@ardumont) on 2019-11-19 14:09:12 +0100) * Upstream changes: - v0.0.63 - swh.scheduler.cli: Add `swh scheduler task-type register` cli - Use the shared_task decorator instead of binding to a specific celery app - celery/tests: mostly revert e770eb30 to fix celery app initialization in tests -- Software Heritage autobuilder (on jenkins-debian1) Tue, 19 Nov 2019 13:14:59 +0000 swh-scheduler (0.0.62-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.62 - (tagged by Antoine R. Dumont (@ardumont) on 2019-10-18 13:39:27 +0200) * Upstream changes: - v0.0.62 - celery_backend.config: Make JournalHandler import optional - tests: rewrite tests using pytest fixtures -- Software Heritage autobuilder (on jenkins-debian1) Fri, 18 Oct 2019 11:46:26 +0000 swh-scheduler (0.0.61-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.61 - (tagged by Nicolas Dandrimont on 2019-10-07 16:33:17 +0200) * Upstream changes: - Release swh.scheduler v0.0.61 - Remove bogus dict.get(default=) statement -- Software Heritage autobuilder (on jenkins-debian1) Mon, 07 Oct 2019 14:37:37 +0000 swh-scheduler (0.0.60-1~swh2) unstable-swh; urgency=medium * Force postgresql executable to a pg_ctl that exists when running tests. -- Nicolas Dandrimont Tue, 01 Oct 2019 18:14:39 +0200 swh-scheduler (0.0.60-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.60 - (tagged by Stefano Zacchiroli on 2019-10-01 13:13:13 +0200) * Upstream changes: - v0.0.60 - * tox: anticipate mypy run to just after flake8 - * init.py: switch to documented way of extending path - * tox.ini: add mypy section - * typing: minimal changes to make a no-op mypy run pass - * fix typo in docstring and sample file name - * admin CLI: drop obsolete backward compatibility aliases - * click "required" param wants bool, not int -- Software Heritage autobuilder (on jenkins-debian1) Tue, 01 Oct 2019 11:22:43 +0000 swh-scheduler (0.0.59-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.59 - (tagged by David Douard on 2019-09-04 16:08:27 +0200) * Upstream changes: - v0.0.59 -- Software Heritage autobuilder (on jenkins-debian1) Wed, 04 Sep 2019 14:11:48 +0000 swh-scheduler (0.0.58-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.58 - (tagged by Antoine R. Dumont (@ardumont) on 2019-09-03 10:19:34 +0200) * Upstream changes: - v0.0.58 - celery: auto add tasks declared in the swh.workers entry point in task_modules - api/client: use RPCClient instead of deprecated SWHRemoteAPI - Make schedule_origins use origin urls instead of ids in task arguments. - docs: add code of conduct document - docs: very beginning of a practical documentation on the scheduler - config: Add a pre-commit config file - data: Insert new cgit instance lister task - data: Insert load-tar task-type -- Software Heritage autobuilder (on jenkins-debian1) Tue, 03 Sep 2019 08:28:19 +0000 swh-scheduler (0.0.57-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.57 - (tagged by David Douard on 2019-06-26 14:56:32 +0200) * Upstream changes: - v0.0.57 -- Software Heritage autobuilder (on jenkins-debian1) Wed, 26 Jun 2019 13:05:20 +0000 swh-scheduler (0.0.56-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.56 - (tagged by Nicolas Dandrimont on 2019-05-07 18:16:20 +0200) * Upstream changes: - listener: Release the db object after using it - This is the contract that get_db/put_db is supposed to conform to. -- Software Heritage autobuilder (on jenkins-debian1) Tue, 14 May 2019 12:40:09 +0000 swh-scheduler (0.0.55-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.55 - (tagged by Antoine Lambert on 2019-05-06 11:47:43 +0200) * Upstream changes: - version 0.0.55 -- Software Heritage autobuilder (on jenkins-debian1) Mon, 06 May 2019 09:54:51 +0000 swh-scheduler (0.0.54-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.54 - (tagged by Antoine R. Dumont (@ardumont) on 2019-04-11 11:33:40 +0200) * Upstream changes: - v0.0.54 - cli_utils: Use yaml.safe_load instead of yaml.load - Fix support of latest versions of swh- core and psycopg2 - sql/data: Add npm related task types -- Software Heritage autobuilder (on jenkins-debian1) Thu, 11 Apr 2019 09:40:14 +0000 swh-scheduler (0.0.53-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.53 - (tagged by Antoine Lambert on 2019-04-04 16:45:56 +0200) * Upstream changes: - version 0.0.53 -- Software Heritage autobuilder (on jenkins-debian1) Thu, 04 Apr 2019 14:55:20 +0000 swh-scheduler (0.0.52-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.52 - (tagged by Nicolas Dandrimont on 2019-04-03 10:54:06 +0200) * Upstream changes: - Release swh.scheduler v0.0.52 - Move to result_serializer = json to work around celery 4.3 bug - Fix db initialization -- Software Heritage autobuilder (on jenkins-debian1) Wed, 03 Apr 2019 08:59:00 +0000 swh-scheduler (0.0.51-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.51 - (tagged by Antoine R. Dumont (@ardumont) on 2019-03-22 12:09:22 +0100) * Upstream changes: - v0.0.51 - requirements.txt: Remove kombu dependency -- Software Heritage autobuilder (on jenkins-debian1) Fri, 22 Mar 2019 11:16:06 +0000 swh-scheduler (0.0.50-1~swh2) unstable-swh; urgency=medium * Update build- and runtime dependencies -- Nicolas Dandrimont Fri, 15 Mar 2019 18:24:11 +0100 swh-scheduler (0.0.50-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.50 - (tagged by Nicolas Dandrimont on 2019-03-15 18:07:24 +0100) * Upstream changes: - Release swh.scheduler v0.0.50 - Add an explicit log target for stdout and/or journald - Avoid useless log lines - Improve test coverage - Add support for non- string options in the CLI -- Software Heritage autobuilder (on jenkins-debian1) Fri, 15 Mar 2019 17:16:03 +0000 swh-scheduler (0.0.49-1~swh2) unstable-swh; urgency=medium * Export LC_ALL=C.UTF-8 -- Nicolas Dandrimont Thu, 14 Mar 2019 13:42:24 +0100 swh-scheduler (0.0.49-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.49 - (tagged by Nicolas Dandrimont on 2019-03-03 08:48:04 +0100) * Upstream changes: - Release swh.scheduler v0.0.49 - various fixes around celery behavior - move wsgi endpoint to a separate module - add tests for the CLI -- Software Heritage autobuilder (on jenkins-debian1) Sun, 03 Mar 2019 07:55:41 +0000 swh-scheduler (0.0.48-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.48 - (tagged by Antoine R. Dumont (@ardumont) on 2019-02-22 16:11:51 +0100) * Upstream changes: - v0.0.48 - Fix comment on main scheduler schema -- Software Heritage autobuilder (on jenkins-debian1) Fri, 22 Feb 2019 15:17:20 +0000 swh-scheduler (0.0.47-1~swh2) unstable-swh; urgency=low * Upstream release to fix build dependencies issue -- Antoine Romain Dumont (@ardumont) Thu, 21 Feb 2019 15:41:24 +0100 swh-scheduler (0.0.47-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.47 - (tagged by Valentin Lorentz on 2019-02-20 16:53:20 +0100) * Upstream changes: - Fix crash of SchedulerBackend.search_tasks when no argument is given. -- Software Heritage autobuilder (on jenkins-debian1) Thu, 21 Feb 2019 09:13:07 +0000 swh-scheduler (0.0.46-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.46 - (tagged by Antoine R. Dumont (@ardumont) on 2019-02-15 15:05:47 +0100) * Upstream changes: - v0.0.46 - scheduler.task: Remove no longer used Task class -- Software Heritage autobuilder (on jenkins-debian1) Fri, 15 Feb 2019 14:15:26 +0000 swh-scheduler (0.0.45-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.45 - (tagged by Antoine R. Dumont (@ardumont) on 2019-02-15 10:43:07 +0100) * Upstream changes: - v0.0.45 - celery_backend/config: Fix loglevel for amqp module -- Software Heritage autobuilder (on jenkins-debian1) Fri, 15 Feb 2019 09:48:25 +0000 swh-scheduler (0.0.44-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.44 - (tagged by Antoine R. Dumont (@ardumont) on 2019-02-13 16:29:05 +0100) * Upstream changes: - v0.0.44 - swh-scheduler-api: Fix configuration read too many times -- Software Heritage autobuilder (on jenkins-debian1) Wed, 13 Feb 2019 15:34:34 +0000 swh-scheduler (0.0.43-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.43 - (tagged by David Douard on 2019-02-13 15:27:27 +0100) * Upstream changes: - v0.0.43 -- Software Heritage autobuilder (on jenkins-debian1) Wed, 13 Feb 2019 14:46:59 +0000 swh-scheduler (0.0.42-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.42 - (tagged by Antoine R. Dumont (@ardumont) on 2019-02-11 14:28:10 +0100) * Upstream changes: - v0.0.42 - Fix dependency requirements for hypothesis -- Software Heritage autobuilder (on jenkins-debian1) Mon, 11 Feb 2019 13:33:48 +0000 swh-scheduler (0.0.41-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.41 - (tagged by David Douard on 2019-02-06 15:25:56 +0100) * Upstream changes: - v0.0.41 -- Software Heritage autobuilder (on jenkins-debian1) Wed, 06 Feb 2019 15:33:04 +0000 swh-scheduler (0.0.40-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.40 - (tagged by Antoine R. Dumont (@ardumont) on 2019-01-28 16:24:04 +0100) * Upstream changes: - v0.0.40 - swh.scheduler.tests: Mark db tests as such - Force tox environment to C.UTF-8 locale - Add debug logging in the SWHTask class -- Software Heritage autobuilder (on jenkins-debian1) Mon, 28 Jan 2019 15:30:41 +0000 swh-scheduler (0.0.39-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.39 - (tagged by David Douard on 2019-01-16 13:37:58 +0100) * Upstream changes: - v0.0.39 -- Software Heritage autobuilder (on jenkins-debian1) Wed, 16 Jan 2019 12:42:37 +0000 swh-scheduler (0.0.38-1~swh1) unstable-swh; urgency=medium * New upstream release 0.0.38 - (tagged by David Douard on 2018-12-20 14:39:59 +0100) * Upstream changes: - v0.0.38 -- Software Heritage autobuilder (on jenkins-debian1) Wed, 09 Jan 2019 18:32:14 +0000 swh-scheduler (0.0.35-1~swh1) unstable-swh; urgency=medium * v0.0.35 * tests: Add SchedulerTestFixture * swh.scheduler.utils: Allow to add more task information * sql/40-swh-data: Update new indexer task types for local db -- Antoine R. Dumont (@ardumont) Mon, 29 Oct 2018 10:07:08 +0100 swh-scheduler (0.0.34-1~swh1) unstable-swh; urgency=medium * v0.0.34 * Finalize pytest migration -- Antoine R. Dumont (@ardumont) Thu, 25 Oct 2018 17:52:03 +0200 swh-scheduler (0.0.33-1~swh1) unstable-swh; urgency=medium * v0.0.33 -- David Douard Thu, 25 Oct 2018 16:03:16 +0200 swh-scheduler (0.0.32-1~swh1) unstable-swh; urgency=medium * v0.0.32 * tests: Add celery fixture to ease tests * tests: make tests use sql/ files from the package * tests: Starting migration towards pytest * listener: Make the listener code compatible with new celery (debian buster) * Make swh_scheduler_create_tasks_from_temp use indexes * setup: prepare for pypi upload * docs: add a simple README file -- Antoine R. Dumont (@ardumont) Mon, 22 Oct 2018 15:37:51 +0200 swh-scheduler (0.0.31-1~swh1) unstable-swh; urgency=medium * v0.0.31 * sql/swh-scheduler: Make the create_tasks call idempotent * swh.scheduler.utils: Open create_task_dict function * sql/scheduler-data: Add lister gitlab task types * sql/scheduler-data: Reference the existing production lister data * swh.scheduler.backend_es: Open sniffing options -- Antoine R. Dumont (@ardumont) Tue, 31 Jul 2018 06:55:39 +0200 swh-scheduler (0.0.30-1~swh1) unstable-swh; urgency=medium * v0.0.30 * swh-scheduler-schema.sql: Archive disabled oneshot tasks as well * swh.scheduler.cli: Add policy to pretty printing task routine * swh.scheduler.cli: Fix broken cli list-pending since api change -- Antoine R. Dumont (@ardumont) Fri, 22 Jun 2018 18:07:02 +0200 swh-scheduler (0.0.29-1~swh1) unstable-swh; urgency=medium * v0.0.29 * swh.scheduler.cli: Change archival period to rolling month - 1 week * swh.scheduler.updater.writer: Force filter resolution to list * swh.scheduler.cli: Change default archival period to current month * swh.scheduler.cli: Improve logging message * swh.scheduler.updater.backend: Adapt configuration path accordingly -- Antoine R. Dumont (@ardumont) Thu, 31 May 2018 11:42:51 +0200 swh-scheduler (0.0.28-1~swh1) unstable-swh; urgency=medium * v0.0.28 * Fix wrong runtime dependencies -- Antoine R. Dumont (@ardumont) Tue, 29 May 2018 14:12:15 +0200 swh-scheduler (0.0.27-1~swh1) unstable-swh; urgency=medium * v0.0.27 * scheduler: Deal with priority in tasks * scheduler-update: new package python3-swh.scheduler.updater * Contains tools in charge of consuming events from arbitrary sources * and update the scheduler db -- Antoine R. Dumont (@ardumont) Tue, 29 May 2018 12:27:34 +0200 swh-scheduler (0.0.26-1~swh1) unstable-swh; urgency=medium * v0.0.26 * swh.scheduler: Fix package build * swh.scheduler.tests: Test remote scheduler api as well * swh.scheduler: Add tests around removing archivable tasks * swh.scheduler: Add tests around filtering archivable tasks * swh-scheduler-schema: Fix unneeded drop instructions * swh.scheduler.cli: Improve docstring * swh.scheduler.cli: Permit to specify the backend to use in cli * swh.scheduler.api: Bootstrap scheduler's remote api * swh.scheduler: Use `get_scheduler` api to instantiate a scheduler * swh.scheduler.backend: Fix docstring -- Antoine R. Dumont (@ardumont) Thu, 26 Apr 2018 17:34:07 +0200 swh-scheduler (0.0.25-1~swh1) unstable-swh; urgency=medium * v0.0.25 * swh.scheduler.cli.archive: Index arguments.kwargs as text -- Antoine R. Dumont (@ardumont) Wed, 18 Apr 2018 12:34:43 +0200 swh-scheduler (0.0.24-1~swh1) unstable-swh; urgency=medium * v0.0.24 * data/template: Do not index the arguments field (it's in _source) * data/README: Add a small readme to explain es install step * swh.scheduler.cli: Add a bulk index flag to separate read from index -- Antoine R. Dumont (@ardumont) Fri, 13 Apr 2018 14:55:32 +0200 swh-scheduler (0.0.23-1~swh1) unstable-swh; urgency=medium * swh.scheduler.cli.archive: Delete only completely indexed tasks * Prior to this commit, it could happen that we removed tasks even * though we did not yet index associated task_run. * Related T986 -- Antoine R. Dumont (@ardumont) Tue, 10 Apr 2018 17:43:07 +0200 swh-scheduler (0.0.22-1~swh1) unstable-swh; urgency=medium * v0.0.22 * Update to a more recent python3-elasticsearch client -- Antoine R. Dumont (@ardumont) Mon, 09 Apr 2018 16:09:16 +0200 swh-scheduler (0.0.21-1~swh1) unstable-swh; urgency=medium * v0.0.21 * Adapt default configuration * Fix typo in configuration variable name -- Antoine R. Dumont (@ardumont) Fri, 30 Mar 2018 15:02:55 +0200 swh-scheduler (0.0.20-1~swh1) unstable-swh; urgency=medium * v0.0.20 * swh.scheduler.cli.archive: Open completed oneshot or disabled * recurring tasks archival endpoint * swh.core.serializer: Move to msgpack serialization format * swh.scheduler.cli: Unify pretty print output * sql/data: Add new task type for loading mercurial dump * swh.scheduler.cli: Add sample use case for the scheduling cli * swh.scheduler.cli: Open policy column to the scheduling cli * swh.scheduler.cli: Open the delimiter option as cli argument * Fix issue when updating task-type without any retry delay defined * swh-scheduler/data: Add new oneshot scheduling load-mercurial task * backend: fix default scheduling_db value for consistency * backend: doc: fix return value of create_tasks -- Antoine R. Dumont (@ardumont) Fri, 30 Mar 2018 11:44:18 +0200 swh-scheduler (0.0.19-1~swh1) unstable-swh; urgency=medium * v0.0.19 * swh.scheduler.utils: Open utility function to create oneshot task -- Antoine R. Dumont (@ardumont) Wed, 29 Nov 2017 12:51:15 +0100 swh-scheduler (0.0.18-1~swh1) unstable-swh; urgency=medium * Release swh.scheduler v0.0.18 * Celery 4 compatibility -- Nicolas Dandrimont Wed, 08 Nov 2017 17:06:22 +0100 swh-scheduler (0.0.17-1~swh1) unstable-swh; urgency=medium * Release swh.scheduler version 0.0.17 * Update packaging runes -- Nicolas Dandrimont Thu, 12 Oct 2017 18:49:02 +0200 swh-scheduler (0.0.16-1~swh1) unstable-swh; urgency=medium * Release swh-scheduler v0.0.16 * add some tests * implement one-shot tasks * implement retry on temporary failure -- Nicolas Dandrimont Mon, 07 Aug 2017 18:44:03 +0200 swh-scheduler (0.0.15-1~swh1) unstable-swh; urgency=medium * Release swh-scheduler v0.0.15 * Add some methods to get the length of task queues * worker: Show logs on stdout if loglevel = debug -- Nicolas Dandrimont Mon, 19 Jun 2017 19:44:56 +0200 swh-scheduler (0.0.14-1~swh1) unstable-swh; urgency=medium * Release swh.scheduler 0.0.14 * Make the return value of tasks available in the listener -- Nicolas Dandrimont Mon, 12 Jun 2017 17:50:32 +0200 swh-scheduler (0.0.13-1~swh1) unstable-swh; urgency=medium * Release swh.scheduler v0.0.13 * Use systemd for logging rather than PostgreSQL -- Nicolas Dandrimont Fri, 07 Apr 2017 11:57:50 +0200 swh-scheduler (0.0.12-1~swh1) unstable-swh; urgency=medium * Release swh.scheduler v0.0.12 * Only log to database if the configuration is present -- Nicolas Dandrimont Thu, 09 Mar 2017 11:12:45 +0100 swh-scheduler (0.0.11-1~swh1) unstable-swh; urgency=medium * Release swh.scheduler v0.0.11 * add utils.get_task -- Nicolas Dandrimont Tue, 14 Feb 2017 19:49:34 +0100 swh-scheduler (0.0.10-1~swh1) unstable-swh; urgency=medium * Release swh.scheduler v0.0.10 * Allow disabling tasks -- Nicolas Dandrimont Thu, 20 Oct 2016 17:20:17 +0200 swh-scheduler (0.0.9-1~swh1) unstable-swh; urgency=medium * Release swh.scheduler v0.0.9 * Revert management of one shot tasks * Add possibility of launching several worker instances -- Nicolas Dandrimont Fri, 02 Sep 2016 17:09:18 +0200 swh-scheduler (0.0.7-1~swh1) unstable-swh; urgency=medium * v0.0.7 * Add oneshot task -- Antoine R. Dumont (@ardumont) Fri, 01 Jul 2016 16:42:45 +0200 swh-scheduler (0.0.6-1~swh1) unstable-swh; urgency=medium * Release swh-scheduler v0.0.6 * More reliability and efficiency when scheduling a lot ot tasks -- Nicolas Dandrimont Wed, 24 Feb 2016 18:46:57 +0100 swh-scheduler (0.0.5-1~swh1) unstable-swh; urgency=medium * Release swh.scheduler v0.0.5 * Use copy for task mass-scheduling -- Nicolas Dandrimont Wed, 24 Feb 2016 12:13:38 +0100 swh-scheduler (0.0.4-1~swh1) unstable-swh; urgency=medium * Release swh-scheduler v0.0.4 * general cleanup of the backend * use arrow instead of dateutil * add new cli program -- Nicolas Dandrimont Tue, 23 Feb 2016 17:46:04 +0100 swh-scheduler (0.0.3-1~swh1) unstable-swh; urgency=medium * Release swh.scheduler version 0.0.3 * Implement the timestamp arguments to the task_run functions * Make the celery event listener use a reliable queue -- Nicolas Dandrimont Mon, 22 Feb 2016 15:14:28 +0100 swh-scheduler (0.0.2-1~swh1) unstable-swh; urgency=medium * Release swh.scheduler v0.0.2 * Multiple schema changes * Initial releases for the celery job runner and the event listener -- Nicolas Dandrimont Fri, 19 Feb 2016 18:50:47 +0100 swh-scheduler (0.0.1-1~swh1) unstable-swh; urgency=medium * Initial release * Release swh.scheduler v0.0.1 * Move swh.core.scheduling and swh.core.worker to swh.scheduler -- Nicolas Dandrimont Mon, 15 Feb 2016 11:07:30 +0100 diff --git a/debian/control b/debian/control index f494ef6..6b9620c 100644 --- a/debian/control +++ b/debian/control @@ -1,36 +1,31 @@ Source: swh-scheduler Maintainer: Software Heritage developers Section: python Priority: optional Build-Depends: debhelper (>= 9), dh-python (>= 2), python3-all, python3-arrow, python3-celery, python3-click, python3-elasticsearch (>= 5.4.0), python3-flask, python3-hypothesis (>= 3.11.0~), python3-kombu, python3-pytest, + python3-pytest-mock, python3-psycopg2, python3-setuptools, python3-swh.core (>= 0.0.65~), python3-swh.storage (>= 0.0.129~), python3-swh.lister, python3-vcversioner, python3-pytest-postgresql, postgresql-contrib, Standards-Version: 3.9.6 Homepage: https://forge.softwareheritage.org/diffusion/DSCH/ Package: python3-swh.scheduler Architecture: all Depends: python3-swh.core (>= 0.0.65~), python3-swh.storage (>= 0.0.129~), ${misc:Depends}, ${python3:Depends} Description: Software Heritage Scheduler - -Package: python3-swh.scheduler.updater -Architecture: all -Depends: python3-swh.scheduler (= ${binary:Version}), - ${misc:Depends}, ${python3:Depends} -Description: Software Heritage Scheduler Updater diff --git a/debian/rules b/debian/rules index a57b34f..dac39fb 100755 --- a/debian/rules +++ b/debian/rules @@ -1,24 +1,15 @@ #!/usr/bin/make -f export LC_ALL=C.UTF-8 export PG_CTL := $(lastword $(sort $(wildcard /usr/lib/postgresql/*/bin/pg_ctl))) export PYBUILD_NAME=swh.scheduler export PYBUILD_TEST_ARGS= --postgresql-exec $(PG_CTL) -m 'not db and not fs' %: dh $@ --with python3 --buildsystem=pybuild override_dh_install: dh_install rm -v $(CURDIR)/debian/python3-*/usr/lib/python*/dist-packages/swh/__init__.py rm -rf $(CURDIR)/debian/python3-*/usr/lib/python*/dist-packages/.hypothesis - - for pyvers in $(shell py3versions -vr); do \ - mkdir -p $(CURDIR)/debian/python3-swh.scheduler.updater/usr/lib/python$$pyvers/dist-packages/swh/scheduler/updater/ ; \ - mv $(CURDIR)/debian/python3-swh.scheduler/usr/lib/python$$pyvers/dist-packages/swh/scheduler/updater/ \ - $(CURDIR)/debian/python3-swh.scheduler.updater/usr/lib/python$$pyvers/dist-packages/swh/scheduler/ ; \ - mkdir -p $(CURDIR)/debian/python3-swh.scheduler.updater/usr/lib/python$$pyvers/dist-packages/swh/scheduler/updater/tests/ ; \ - mv $(CURDIR)/debian/python3-swh.scheduler/usr/lib/python$$pyvers/dist-packages/swh/scheduler/tests/updater/ \ - $(CURDIR)/debian/python3-swh.scheduler.updater/usr/lib/python$$pyvers/dist-packages/swh/scheduler/updater/tests/ ; \ - done diff --git a/requirements-test.txt b/requirements-test.txt new file mode 100644 index 0000000..80828c5 --- /dev/null +++ b/requirements-test.txt @@ -0,0 +1,6 @@ +pytest +pytest-mock +pytest-postgresql >= 2.1.0 +celery >= 4.3 +hypothesis >= 3.11.0 +swh.lister diff --git a/swh.scheduler.egg-info/PKG-INFO b/swh.scheduler.egg-info/PKG-INFO index 8802a82..e88bccd 100644 --- a/swh.scheduler.egg-info/PKG-INFO +++ b/swh.scheduler.egg-info/PKG-INFO @@ -1,28 +1,28 @@ Metadata-Version: 2.1 Name: swh.scheduler -Version: 0.0.64 +Version: 0.0.65 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-scheduler Description: swh-scheduler ============= Job scheduler for the Software Heritage project. Task manager for asynchronous/delayed tasks, used for both recurrent (e.g., listing a forge, loading new stuff from a Git repository) and one-off activities (e.g., loading a specific version of a source package). Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/swh.scheduler.egg-info/SOURCES.txt b/swh.scheduler.egg-info/SOURCES.txt index 9afe090..494b64d 100644 --- a/swh.scheduler.egg-info/SOURCES.txt +++ b/swh.scheduler.egg-info/SOURCES.txt @@ -1,65 +1,49 @@ MANIFEST.in Makefile README.md requirements-swh.txt +requirements-test.txt requirements.txt setup.py version.txt bin/swh-worker-control swh/__init__.py swh.scheduler.egg-info/PKG-INFO swh.scheduler.egg-info/SOURCES.txt swh.scheduler.egg-info/dependency_links.txt swh.scheduler.egg-info/entry_points.txt swh.scheduler.egg-info/requires.txt swh.scheduler.egg-info/top_level.txt swh/scheduler/__init__.py swh/scheduler/backend.py swh/scheduler/backend_es.py swh/scheduler/cli_utils.py swh/scheduler/py.typed swh/scheduler/task.py swh/scheduler/utils.py swh/scheduler/api/__init__.py swh/scheduler/api/client.py swh/scheduler/api/server.py swh/scheduler/celery_backend/__init__.py swh/scheduler/celery_backend/config.py swh/scheduler/celery_backend/listener.py swh/scheduler/celery_backend/runner.py swh/scheduler/cli/__init__.py swh/scheduler/cli/admin.py swh/scheduler/cli/task.py swh/scheduler/cli/task_type.py swh/scheduler/cli/utils.py swh/scheduler/sql/30-swh-schema.sql swh/scheduler/sql/40-swh-func.sql swh/scheduler/sql/50-swh-data.sql swh/scheduler/sql/60-swh-indexes.sql -swh/scheduler/sql/updater/10-swh-init.sql -swh/scheduler/sql/updater/30-swh-schema.sql -swh/scheduler/sql/updater/40-swh-func.sql swh/scheduler/tests/__init__.py swh/scheduler/tests/conftest.py swh/scheduler/tests/tasks.py swh/scheduler/tests/test_api_client.py swh/scheduler/tests/test_celery_tasks.py swh/scheduler/tests/test_cli.py swh/scheduler/tests/test_cli_task_type.py swh/scheduler/tests/test_scheduler.py swh/scheduler/tests/test_server.py -swh/scheduler/tests/test_utils.py -swh/scheduler/tests/updater/__init__.py -swh/scheduler/tests/updater/conftest.py -swh/scheduler/tests/updater/test_backend.py -swh/scheduler/tests/updater/test_consumer.py -swh/scheduler/tests/updater/test_events.py -swh/scheduler/tests/updater/test_ghtorrent.py -swh/scheduler/tests/updater/test_writer.py -swh/scheduler/updater/__init__.py -swh/scheduler/updater/backend.py -swh/scheduler/updater/consumer.py -swh/scheduler/updater/events.py -swh/scheduler/updater/writer.py -swh/scheduler/updater/ghtorrent/__init__.py -swh/scheduler/updater/ghtorrent/cli.py \ No newline at end of file +swh/scheduler/tests/test_utils.py \ No newline at end of file diff --git a/swh.scheduler.egg-info/requires.txt b/swh.scheduler.egg-info/requires.txt index 774c8c1..97dad8a 100644 --- a/swh.scheduler.egg-info/requires.txt +++ b/swh.scheduler.egg-info/requires.txt @@ -1,18 +1,19 @@ arrow celery>=4 Click elasticsearch>5.4 flask psycopg2 pyyaml vcversioner setuptools swh.core[db,http]>=0.0.65 swh.storage>=0.0.129 [testing] -pytest<4 +pytest +pytest-mock pytest-postgresql>=2.1.0 -celery>=4 +celery>=4.3 hypothesis>=3.11.0 swh.lister diff --git a/swh/scheduler/celery_backend/listener.py b/swh/scheduler/celery_backend/listener.py index 9a4f453..cc419d8 100644 --- a/swh/scheduler/celery_backend/listener.py +++ b/swh/scheduler/celery_backend/listener.py @@ -1,205 +1,210 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import logging import time import sys import click from arrow import utcnow from kombu import Queue import celery from celery.events import EventReceiver +from swh.core.statsd import statsd + class ReliableEventReceiver(EventReceiver): def __init__(self, channel, handlers=None, routing_key='#', node_id=None, app=None, queue_prefix='celeryev', accept=None): super(ReliableEventReceiver, self).__init__( channel, handlers, routing_key, node_id, app, queue_prefix, accept) self.queue = Queue('.'.join([self.queue_prefix, self.node_id]), exchange=self.exchange, routing_key=self.routing_key, auto_delete=False, durable=True) def get_consumers(self, consumer, channel): return [consumer(queues=[self.queue], callbacks=[self._receive], no_ack=False, accept=self.accept)] def _receive(self, bodies, message): if not isinstance(bodies, list): # celery<4 returned body as element bodies = [bodies] for body in bodies: type, body = self.event_from_message(body) self.process(type, body, message) def process(self, type, event, message): """Process the received event by dispatching it to the appropriate handler.""" handler = self.handlers.get(type) or self.handlers.get('*') - handler and handler(event, message) + if handler: + handler(event, message) + statsd.increment('swh_scheduler_listener_handled_event_total', + tags={'event_type': type}) ACTION_SEND_DELAY = datetime.timedelta(seconds=1.0) ACTION_QUEUE_MAX_LENGTH = 1000 def event_monitor(app, backend): logger = logging.getLogger('swh.scheduler.listener') actions = { 'last_send': utcnow() - 2*ACTION_SEND_DELAY, 'queue': [], } def try_perform_actions(actions=actions): logger.debug('Try perform pending actions') if actions['queue'] and ( len(actions['queue']) > ACTION_QUEUE_MAX_LENGTH or utcnow() - actions['last_send'] > ACTION_SEND_DELAY): perform_actions(actions) def perform_actions(actions, backend=backend): logger.info('Perform %s pending actions' % len(actions['queue'])) action_map = { 'start_task_run': backend.start_task_run, 'end_task_run': backend.end_task_run, } messages = [] db = backend.get_db() try: cursor = db.cursor(None) for action in actions['queue']: messages.append(action['message']) function = action_map[action['action']] args = action.get('args', ()) kwargs = action.get('kwargs', {}) kwargs['cur'] = cursor function(*args, **kwargs) except Exception: db.conn.rollback() else: db.conn.commit() finally: backend.put_db(db) for message in messages: if not message.acknowledged: message.ack() actions['queue'] = [] actions['last_send'] = utcnow() def queue_action(action, actions=actions): actions['queue'].append(action) try_perform_actions() def catchall_event(event, message): logger.debug('event: %s %s', event['type'], event.get('name', 'N/A')) if not message.acknowledged: message.ack() try_perform_actions() def task_started(event, message): logger.debug('task_started: %s %s', event['type'], event.get('name', 'N/A')) queue_action({ 'action': 'start_task_run', 'args': [event['uuid']], 'kwargs': { 'timestamp': utcnow(), 'metadata': { 'worker': event['hostname'], }, }, 'message': message, }) def task_succeeded(event, message): logger.debug('task_succeeded: event: %s' % event) logger.debug(' message: %s' % message) result = event['result'] logger.debug('task_succeeded: result: %s' % result) try: status = result.get('status') if status == 'success': status = 'eventful' if result.get('eventful') else 'uneventful' except Exception: status = 'eventful' if result else 'uneventful' queue_action({ 'action': 'end_task_run', 'args': [event['uuid']], 'kwargs': { 'timestamp': utcnow(), 'status': status, 'result': result, }, 'message': message, }) def task_failed(event, message): logger.debug('task_failed: event: %s' % event) logger.debug(' message: %s' % message) queue_action({ 'action': 'end_task_run', 'args': [event['uuid']], 'kwargs': { 'timestamp': utcnow(), 'status': 'failed', }, 'message': message, }) recv = ReliableEventReceiver( celery.current_app.connection(), app=celery.current_app, handlers={ 'task-started': task_started, 'task-result': task_succeeded, 'task-failed': task_failed, '*': catchall_event, }, node_id='listener', ) errors = 0 while True: try: recv.capture(limit=None, timeout=None, wakeup=True) errors = 0 except KeyboardInterrupt: logger.exception('Keyboard interrupt, exiting') break except Exception: logger.exception('Unexpected exception') if errors < 5: time.sleep(errors) errors += 1 else: logger.error('Too many consecutive errors, exiting') sys.exit(1) @click.command() @click.pass_context def main(ctx): click.echo("Deprecated! Use 'swh-scheduler listener' instead.", err=True) ctx.exit(1) if __name__ == '__main__': main() diff --git a/swh/scheduler/celery_backend/runner.py b/swh/scheduler/celery_backend/runner.py index 9e4dc5f..c9250ee 100644 --- a/swh/scheduler/celery_backend/runner.py +++ b/swh/scheduler/celery_backend/runner.py @@ -1,121 +1,125 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import arrow import logging from kombu.utils.uuid import uuid +from swh.core.statsd import statsd from swh.scheduler import get_scheduler, compute_nb_tasks_from logger = logging.getLogger(__name__) # Max batch size for tasks MAX_NUM_TASKS = 10000 def run_ready_tasks(backend, app): """Run tasks that are ready Args: backend (Scheduler): backend to read tasks to schedule app (App): Celery application to send tasks to Returns: A list of dictionaries:: { 'task': the scheduler's task id, 'backend_id': Celery's task id, 'scheduler': arrow.utcnow() } The result can be used to block-wait for the tasks' results:: backend_tasks = run_ready_tasks(self.scheduler, app) for task in backend_tasks: AsyncResult(id=task['backend_id']).get() """ all_backend_tasks = [] while True: task_types = {} pending_tasks = [] for task_type in backend.get_task_types(): task_type_name = task_type['type'] task_types[task_type_name] = task_type max_queue_length = task_type['max_queue_length'] backend_name = task_type['backend_name'] if max_queue_length: try: queue_length = app.get_queue_length(backend_name) except ValueError: queue_length = None if queue_length is None: # Running without RabbitMQ (probably a test env). num_tasks = MAX_NUM_TASKS else: num_tasks = min(max_queue_length - queue_length, MAX_NUM_TASKS) else: num_tasks = MAX_NUM_TASKS if num_tasks > 0: num_tasks, num_tasks_priority = compute_nb_tasks_from( num_tasks) grabbed_tasks = backend.grab_ready_tasks( task_type_name, num_tasks=num_tasks, num_tasks_priority=num_tasks_priority) if grabbed_tasks: pending_tasks.extend(grabbed_tasks) logger.info('Grabbed %s tasks %s', len(grabbed_tasks), task_type_name) - + statsd.increment( + 'swh_scheduler_runner_scheduled_task_total', + len(grabbed_tasks), + tags={'task_type': task_type_name}) if not pending_tasks: return all_backend_tasks backend_tasks = [] celery_tasks = [] for task in pending_tasks: args = task['arguments']['args'] kwargs = task['arguments']['kwargs'] backend_name = task_types[task['type']]['backend_name'] backend_id = uuid() celery_tasks.append((backend_name, backend_id, args, kwargs)) data = { 'task': task['id'], 'backend_id': backend_id, 'scheduled': arrow.utcnow(), } backend_tasks.append(data) logger.debug('Sent %s celery tasks', len(backend_tasks)) backend.mass_schedule_task_runs(backend_tasks) for backend_name, backend_id, args, kwargs in celery_tasks: app.send_task( backend_name, task_id=backend_id, args=args, kwargs=kwargs, ) all_backend_tasks.extend(backend_tasks) def main(): from .config import app as main_app for module in main_app.conf.CELERY_IMPORTS: __import__(module) main_backend = get_scheduler('local') try: run_ready_tasks(main_backend, main_app) except Exception: main_backend.rollback() raise if __name__ == '__main__': main() diff --git a/swh/scheduler/cli/admin.py b/swh/scheduler/cli/admin.py index 276014d..3365c8f 100644 --- a/swh/scheduler/cli/admin.py +++ b/swh/scheduler/cli/admin.py @@ -1,124 +1,90 @@ # Copyright (C) 2016-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging import time import click from . import cli @cli.command('start-runner') @click.option('--period', '-p', default=0, help=('Period (in s) at witch pending tasks are checked and ' 'executed. Set to 0 (default) for a one shot.')) @click.pass_context def runner(ctx, period): """Starts a swh-scheduler runner service. This process is responsible for checking for ready-to-run tasks and schedule them.""" from swh.scheduler.celery_backend.runner import run_ready_tasks from swh.scheduler.celery_backend.config import build_app app = build_app(ctx.obj['config'].get('celery')) app.set_current() logger = logging.getLogger(__name__ + '.runner') scheduler = ctx.obj['scheduler'] logger.debug('Scheduler %s' % scheduler) try: while True: logger.debug('Run ready tasks') try: ntasks = len(run_ready_tasks(scheduler, app)) if ntasks: logger.info('Scheduled %s tasks', ntasks) except Exception: logger.exception('Unexpected error in run_ready_tasks()') if not period: break time.sleep(period) except KeyboardInterrupt: ctx.exit(0) @cli.command('start-listener') @click.pass_context def listener(ctx): """Starts a swh-scheduler listener service. This service is responsible for listening at task lifecycle events and handle their workflow status in the database.""" scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') from swh.scheduler.celery_backend.config import build_app app = build_app(ctx.obj['config'].get('celery')) app.set_current() from swh.scheduler.celery_backend.listener import event_monitor event_monitor(app, backend=scheduler) @cli.command('rpc-serve') @click.option('--host', default='0.0.0.0', help="Host to run the scheduler server api") @click.option('--port', default=5008, type=click.INT, help="Binding port of the server") @click.option('--debug/--nodebug', default=None, help=("Indicates if the server should run in debug mode. " "Defaults to True if log-level is DEBUG, False otherwise.") ) @click.pass_context def rpc_server(ctx, host, port, debug): """Starts a swh-scheduler API HTTP server. """ if ctx.obj['config']['scheduler']['cls'] == 'remote': click.echo("The API server can only be started with a 'local' " "configuration", err=True) ctx.exit(1) from swh.scheduler.api import server server.app.config.update(ctx.obj['config']) if debug is None: debug = ctx.obj['log_level'] <= logging.DEBUG server.app.run(host, port=port, debug=bool(debug)) - - -@cli.command('start-updater') -@click.option('--verbose/--no-verbose', '-v', default=False, - help='Verbose mode') -@click.pass_context -def updater(ctx, verbose): - """Starts a scheduler-updater service. - - Insert tasks in the scheduler from the scheduler-updater's events read from - the db cache (filled e.g. by the ghtorrent consumer service) . - - """ - from swh.scheduler.updater.writer import UpdaterWriter - UpdaterWriter(**ctx.obj['config']).run() - - -@cli.command('start-ghtorrent') -@click.option('--verbose/--no-verbose', '-v', default=False, - help='Verbose mode') -@click.pass_context -def ghtorrent(ctx, verbose): - """Starts a ghtorrent consumer service. - - Consumes events from ghtorrent and write them to a cache. - - """ - from swh.scheduler.updater.ghtorrent import GHTorrentConsumer - from swh.scheduler.updater.backend import SchedulerUpdaterBackend - - ght_config = ctx.obj['config'].get('ghtorrent', {}) - back_config = ctx.obj['config'].get('scheduler_updater', {}) - backend = SchedulerUpdaterBackend(**back_config) - GHTorrentConsumer(backend, **ght_config).run() diff --git a/swh/scheduler/cli/task.py b/swh/scheduler/cli/task.py index e6d0cdf..24ff757 100644 --- a/swh/scheduler/cli/task.py +++ b/swh/scheduler/cli/task.py @@ -1,564 +1,567 @@ # Copyright (C) 2016-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import json import itertools import locale import logging import arrow import csv import click from . import cli locale.setlocale(locale.LC_ALL, '') ARROW_LOCALE = locale.getlocale(locale.LC_TIME)[0] class DateTimeType(click.ParamType): name = 'time and date' def convert(self, value, param, ctx): if not isinstance(value, arrow.Arrow): value = arrow.get(value) return value DATETIME = DateTimeType() CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) def format_dict(d): ret = {} for k, v in d.items(): if isinstance(v, (arrow.Arrow, datetime.date, datetime.datetime)): v = arrow.get(v).format() elif isinstance(v, dict): v = format_dict(v) ret[k] = v return ret def pretty_print_list(list, indent=0): """Pretty-print a list""" return ''.join('%s%r\n' % (' ' * indent, item) for item in list) def pretty_print_dict(dict, indent=0): """Pretty-print a list""" return ''.join('%s%s: %r\n' % (' ' * indent, click.style(key, bold=True), value) for key, value in sorted(dict.items())) def pretty_print_run(run, indent=4): fmt = ('{indent}{backend_id} [{status}]\n' '{indent} scheduled: {scheduled} [{started}:{ended}]') return fmt.format(indent=' '*indent, **format_dict(run)) def pretty_print_task(task, full=False): """Pretty-print a task If 'full' is True, also print the status and priority fields. >>> task = { ... 'id': 1234, ... 'arguments': { ... 'args': ['foo', 'bar', True], ... 'kwargs': {'key': 'value', 'key2': 42}, ... }, ... 'current_interval': datetime.timedelta(hours=1), ... 'next_run': datetime.datetime(2019, 2, 21, 13, 52, 35, 407818), ... 'policy': 'oneshot', ... 'priority': None, ... 'status': 'next_run_not_scheduled', ... 'type': 'test_task', ... } >>> print(click.unstyle(pretty_print_task(task))) Task 1234 Next run: ... (2019-02-21 13:52:35+00:00) Interval: 1:00:00 Type: test_task Policy: oneshot Args: 'foo' 'bar' True Keyword args: key: 'value' key2: 42 >>> print(click.unstyle(pretty_print_task(task, full=True))) Task 1234 Next run: ... (2019-02-21 13:52:35+00:00) Interval: 1:00:00 Type: test_task Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: 'foo' 'bar' True Keyword args: key: 'value' key2: 42 """ next_run = arrow.get(task['next_run']) lines = [ '%s %s\n' % (click.style('Task', bold=True), task['id']), click.style(' Next run: ', bold=True), "%s (%s)" % (next_run.humanize(locale=ARROW_LOCALE), next_run.format()), '\n', click.style(' Interval: ', bold=True), str(task['current_interval']), '\n', click.style(' Type: ', bold=True), task['type'] or '', '\n', click.style(' Policy: ', bold=True), task['policy'] or '', '\n', ] if full: lines += [ click.style(' Status: ', bold=True), task['status'] or '', '\n', click.style(' Priority: ', bold=True), task['priority'] or '', '\n', ] lines += [ click.style(' Args:\n', bold=True), pretty_print_list(task['arguments']['args'], indent=4), click.style(' Keyword args:\n', bold=True), pretty_print_dict(task['arguments']['kwargs'], indent=4), ] return ''.join(lines) @cli.group('task') @click.pass_context def task(ctx): """Manipulate tasks.""" pass @task.command('schedule') @click.option('--columns', '-c', multiple=True, default=['type', 'args', 'kwargs', 'next_run'], type=click.Choice([ 'type', 'args', 'kwargs', 'policy', 'next_run']), help='columns present in the CSV file') @click.option('--delimiter', '-d', default=',') @click.argument('file', type=click.File(encoding='utf-8')) @click.pass_context def schedule_tasks(ctx, columns, delimiter, file): """Schedule tasks from a CSV input file. The following columns are expected, and can be set through the -c option: - type: the type of the task to be scheduled (mandatory) - args: the arguments passed to the task (JSON list, defaults to an empty list) - kwargs: the keyword arguments passed to the task (JSON object, defaults to an empty dict) - next_run: the date at which the task should run (datetime, defaults to now) The CSV can be read either from a named file, or from stdin (use - as filename). Use sample: cat scheduling-task.txt | \ python3 -m swh.scheduler.cli \ --database 'service=swh-scheduler-dev' \ task schedule \ --columns type --columns kwargs --columns policy \ --delimiter ';' - """ tasks = [] now = arrow.utcnow() scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') reader = csv.reader(file, delimiter=delimiter) for line in reader: task = dict(zip(columns, line)) args = json.loads(task.pop('args', '[]')) kwargs = json.loads(task.pop('kwargs', '{}')) task['arguments'] = { 'args': args, 'kwargs': kwargs, } task['next_run'] = DATETIME.convert(task.get('next_run', now), None, None) tasks.append(task) created = scheduler.create_tasks(tasks) output = [ 'Created %d tasks\n' % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo_via_pager('\n'.join(output)) @task.command('add') @click.argument('type', nargs=1, required=True) @click.argument('options', nargs=-1) @click.option('--policy', '-p', default='recurring', type=click.Choice(['recurring', 'oneshot'])) @click.option('--priority', '-P', default=None, type=click.Choice(['low', 'normal', 'high'])) @click.option('--next-run', '-n', default=None) @click.pass_context def schedule_task(ctx, type, options, policy, priority, next_run): """Schedule one task from arguments. The first argument is the name of the task type, further ones are positional and keyword argument(s) of the task, in YAML format. Keyword args are of the form key=value. Usage sample: swh-scheduler --database 'service=swh-scheduler' \ task add list-pypi swh-scheduler --database 'service=swh-scheduler' \ task add list-debian-distribution --policy=oneshot distribution=stretch Note: if the priority is not given, the task won't have the priority set, which is considered as the lowest priority level. """ from .utils import parse_options scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') now = arrow.utcnow() (args, kw) = parse_options(options) task = {'type': type, 'policy': policy, 'priority': priority, 'arguments': { 'args': args, 'kwargs': kw, }, 'next_run': DATETIME.convert(next_run or now, None, None), } created = scheduler.create_tasks([task]) output = [ 'Created %d tasks\n' % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo('\n'.join(output)) @task.command('schedule_origins') @click.argument('type', nargs=1, required=True) @click.argument('options', nargs=-1) @click.option('--batch-size', '-b', 'origin_batch_size', default=10, show_default=True, type=int, help="Number of origins per task") @click.option('--min-id', default=0, show_default=True, type=int, help="Only schedule tasks for origins whose ID is greater") @click.option('--max-id', default=None, type=int, help="Only schedule tasks for origins whose ID is lower") @click.option('--storage-url', '-g', help="URL of the (graph) storage API") @click.option('--dry-run/--no-dry-run', is_flag=True, default=False, help='List only what would be scheduled.') @click.pass_context def schedule_origin_metadata_index( ctx, type, options, storage_url, origin_batch_size, min_id, max_id, dry_run): """Schedules tasks for origins that are already known. The first argument is the name of the task type, further ones are keyword argument(s) of the task in the form key=value, where value is in YAML format. Usage sample: swh-scheduler --database 'service=swh-scheduler' \ task schedule_origins index-origin-metadata """ from swh.storage import get_storage from swh.storage.algos.origin import iter_origins from .utils import parse_options, schedule_origin_batches scheduler = ctx.obj['scheduler'] storage = get_storage('remote', {'url': storage_url}) if dry_run: scheduler = None (args, kw) = parse_options(options) if args: raise click.ClickException('Only keywords arguments are allowed.') origins = iter_origins(storage, origin_from=min_id, origin_to=max_id) origin_urls = (origin['url'] for origin in origins) schedule_origin_batches( scheduler, type, origin_urls, origin_batch_size, kw) @task.command('list-pending') @click.argument('task-types', required=True, nargs=-1) @click.option('--limit', '-l', required=False, type=click.INT, help='The maximum number of tasks to fetch') @click.option('--before', '-b', required=False, type=DATETIME, help='List all jobs supposed to run before the given date') @click.pass_context def list_pending_tasks(ctx, task_types, limit, before): """List the tasks that are going to be run. You can override the number of tasks to fetch """ from swh.scheduler import compute_nb_tasks_from scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') num_tasks, num_tasks_priority = compute_nb_tasks_from(limit) output = [] for task_type in task_types: pending = scheduler.peek_ready_tasks( task_type, timestamp=before, num_tasks=num_tasks, num_tasks_priority=num_tasks_priority) output.append('Found %d %s tasks\n' % ( len(pending), task_type)) for task in pending: output.append(pretty_print_task(task)) click.echo('\n'.join(output)) @task.command('list') @click.option('--task-id', '-i', default=None, multiple=True, metavar='ID', help='List only tasks whose id is ID.') @click.option('--task-type', '-t', default=None, multiple=True, metavar='TYPE', help='List only tasks of type TYPE') @click.option('--limit', '-l', required=False, type=click.INT, help='The maximum number of tasks to fetch.') @click.option('--status', '-s', multiple=True, metavar='STATUS', + type=click.Choice( + ('next_run_not_scheduled', 'next_run_scheduled', + 'completed', 'disabled')), default=None, help='List tasks whose status is STATUS.') @click.option('--policy', '-p', default=None, type=click.Choice(['recurring', 'oneshot']), help='List tasks whose policy is POLICY.') @click.option('--priority', '-P', default=None, multiple=True, type=click.Choice(['all', 'low', 'normal', 'high']), help='List tasks whose priority is PRIORITY.') @click.option('--before', '-b', required=False, type=DATETIME, metavar='DATETIME', help='Limit to tasks supposed to run before the given date.') @click.option('--after', '-a', required=False, type=DATETIME, metavar='DATETIME', help='Limit to tasks supposed to run after the given date.') @click.option('--list-runs', '-r', is_flag=True, default=False, help='Also list past executions of each task.') @click.pass_context def list_tasks(ctx, task_id, task_type, limit, status, policy, priority, before, after, list_runs): """List tasks. """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') if not task_type: task_type = [x['type'] for x in scheduler.get_task_types()] # if task_id is not given, default value for status is # 'next_run_not_scheduled' # if task_id is given, default status is 'all' if task_id is None and status is None: status = ['next_run_not_scheduled'] if status and 'all' in status: status = None if priority and 'all' in priority: priority = None output = [] tasks = scheduler.search_tasks( task_id=task_id, task_type=task_type, status=status, priority=priority, policy=policy, before=before, after=after, limit=limit) if list_runs: runs = {t['id']: [] for t in tasks} for r in scheduler.get_task_runs([task['id'] for task in tasks]): runs[r['task']].append(r) else: runs = {} output.append('Found %d tasks\n' % ( len(tasks))) for task in tasks: output.append(pretty_print_task(task, full=True)) if runs.get(task['id']): output.append(click.style(' Executions:', bold=True)) for run in runs[task['id']]: output.append(pretty_print_run(run, indent=4)) click.echo('\n'.join(output)) @task.command('respawn') @click.argument('task-ids', required=True, nargs=-1) @click.option('--next-run', '-n', required=False, type=DATETIME, metavar='DATETIME', default=None, help='Re spawn the selected tasks at this date') @click.pass_context def respawn_tasks(ctx, task_ids, next_run): """Respawn tasks. Respawn tasks given by their ids (see the 'task list' command to find task ids) at the given date (immediately by default). Eg. swh-scheduler task respawn 1 3 12 """ scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') if next_run is None: next_run = arrow.utcnow() output = [] scheduler.set_status_tasks( task_ids, status='next_run_not_scheduled', next_run=next_run) output.append('Respawn tasks %s\n' % (task_ids,)) click.echo('\n'.join(output)) @task.command('archive') @click.option('--before', '-b', default=None, help='''Task whose ended date is anterior will be archived. Default to current month's first day.''') @click.option('--after', '-a', default=None, help='''Task whose ended date is after the specified date will be archived. Default to prior month's first day.''') @click.option('--batch-index', default=1000, type=click.INT, help='Batch size of tasks to read from db to archive') @click.option('--bulk-index', default=200, type=click.INT, help='Batch size of tasks to bulk index') @click.option('--batch-clean', default=1000, type=click.INT, help='Batch size of task to clean after archival') @click.option('--dry-run/--no-dry-run', is_flag=True, default=False, help='Default to list only what would be archived.') @click.option('--verbose', is_flag=True, default=False, help='Verbose mode') @click.option('--cleanup/--no-cleanup', is_flag=True, default=True, help='Clean up archived tasks (default)') @click.option('--start-from', type=click.INT, default=-1, help='(Optional) default task id to start from. Default is -1.') @click.pass_context def archive_tasks(ctx, before, after, batch_index, bulk_index, batch_clean, dry_run, verbose, cleanup, start_from): """Archive task/task_run whose (task_type is 'oneshot' and task_status is 'completed') or (task_type is 'recurring' and task_status is 'disabled'). With --dry-run flag set (default), only list those. """ from swh.core.utils import grouper from swh.scheduler.backend_es import SWHElasticSearchClient scheduler = ctx.obj['scheduler'] if not scheduler: raise ValueError('Scheduler class (local/remote) must be instantiated') es_client = SWHElasticSearchClient() logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) log = logging.getLogger('swh.scheduler.cli.archive') logging.getLogger('urllib3').setLevel(logging.WARN) logging.getLogger('elasticsearch').setLevel(logging.WARN) if dry_run: log.info('**DRY-RUN** (only reading db)') if not cleanup: log.info('**NO CLEANUP**') now = arrow.utcnow() # Default to archive tasks from a rolling month starting the week # prior to the current one if not before: before = now.shift(weeks=-1).format('YYYY-MM-DD') if not after: after = now.shift(weeks=-1).shift(months=-1).format('YYYY-MM-DD') log.debug('index: %s; cleanup: %s; period: [%s ; %s]' % ( not dry_run, not dry_run and cleanup, after, before)) def group_by_index_name(data, es_client=es_client): """Given a data record, determine the index's name through its ending date. This varies greatly depending on the task_run's status. """ date = data.get('started') if not date: date = data['scheduled'] return es_client.compute_index_name(date.year, date.month) def index_data(before, last_id, batch_index): tasks_in = scheduler.filter_task_to_archive( after, before, last_id=last_id, limit=batch_index) for index_name, tasks_group in itertools.groupby( tasks_in, key=group_by_index_name): log.debug('Index tasks to %s' % index_name) if dry_run: for task in tasks_group: yield task continue yield from es_client.streaming_bulk( index_name, tasks_group, source=['task_id', 'task_run_id'], chunk_size=bulk_index, log=log) gen = index_data(before, last_id=start_from, batch_index=batch_index) if cleanup: for task_ids in grouper(gen, n=batch_clean): task_ids = list(task_ids) log.info('Clean up %s tasks: [%s, ...]' % ( len(task_ids), task_ids[0])) if dry_run: # no clean up continue ctx.obj['scheduler'].delete_archived_tasks(task_ids) else: for task_ids in grouper(gen, n=batch_index): task_ids = list(task_ids) log.info('Indexed %s tasks: [%s, ...]' % ( len(task_ids), task_ids[0])) diff --git a/swh/scheduler/sql/30-swh-schema.sql b/swh/scheduler/sql/30-swh-schema.sql index 642ce4f..c4c8a4c 100644 --- a/swh/scheduler/sql/30-swh-schema.sql +++ b/swh/scheduler/sql/30-swh-schema.sql @@ -1,113 +1,113 @@ create table dbversion ( version int primary key, release timestamptz not null, description text not null ); comment on table dbversion is 'Schema update tracking'; comment on column dbversion.version is 'SQL schema version'; comment on column dbversion.release is 'Version deployment timestamp'; comment on column dbversion.description is 'Version description'; insert into dbversion (version, release, description) - values (13, now(), 'Work In Progress'); + values (14, now(), 'Work In Progress'); create table task_type ( type text primary key, description text not null, backend_name text not null, default_interval interval, min_interval interval, max_interval interval, backoff_factor float, max_queue_length bigint, num_retries bigint, retry_delay interval ); comment on table task_type is 'Types of schedulable tasks'; comment on column task_type.type is 'Short identifier for the task type'; comment on column task_type.description is 'Human-readable task description'; comment on column task_type.backend_name is 'Name of the task in the job-running backend'; comment on column task_type.default_interval is 'Default interval for newly scheduled tasks'; comment on column task_type.min_interval is 'Minimum interval between two runs of a task'; comment on column task_type.max_interval is 'Maximum interval between two runs of a task'; comment on column task_type.backoff_factor is 'Adjustment factor for the backoff between two task runs'; comment on column task_type.max_queue_length is 'Maximum length of the queue for this type of tasks'; comment on column task_type.num_retries is 'Default number of retries on transient failures'; comment on column task_type.retry_delay is 'Retry delay for the task'; create type task_status as enum ('next_run_not_scheduled', 'next_run_scheduled', 'completed', 'disabled'); comment on type task_status is 'Status of a given task'; create type task_policy as enum ('recurring', 'oneshot'); comment on type task_policy is 'Recurrence policy of the given task'; create type task_priority as enum('high', 'normal', 'low'); comment on type task_priority is 'Priority of the given task'; create table priority_ratio( id task_priority primary key, ratio float not null ); comment on table priority_ratio is 'Oneshot task''s reading ratio per priority'; comment on column priority_ratio.id is 'Task priority id'; comment on column priority_ratio.ratio is 'Percentage of tasks to read per priority'; insert into priority_ratio (id, ratio) values ('high', 0.5); insert into priority_ratio (id, ratio) values ('normal', 0.3); insert into priority_ratio (id, ratio) values ('low', 0.2); create table task ( id bigserial primary key, type text not null references task_type(type), arguments jsonb not null, next_run timestamptz not null, current_interval interval, status task_status not null, policy task_policy not null default 'recurring', retries_left bigint not null default 0, priority task_priority references priority_ratio(id), check (policy <> 'recurring' or current_interval is not null) ); comment on table task is 'Schedule of recurring tasks'; comment on column task.arguments is 'Arguments passed to the underlying job scheduler. ' 'Contains two keys, ''args'' (list) and ''kwargs'' (object).'; comment on column task.next_run is 'The next run of this task should be run on or after that time'; comment on column task.current_interval is 'The interval between two runs of this task, ' 'taking into account the backoff factor'; comment on column task.policy is 'Whether the task is one-shot or recurring'; comment on column task.retries_left is 'The number of "short delay" retries of the task in case of ' 'transient failure'; comment on column task.priority is 'Policy of the given task'; comment on column task.id is 'Task Identifier'; comment on column task.type is 'References task_type table'; comment on column task.status is 'Task status (''next_run_not_scheduled'', ''next_run_scheduled'', ''completed'', ''disabled'')'; create type task_run_status as enum ('scheduled', 'started', 'eventful', 'uneventful', 'failed', 'permfailed', 'lost'); comment on type task_run_status is 'Status of a given task run'; create table task_run ( id bigserial primary key, task bigint not null references task(id), backend_id text, scheduled timestamptz, started timestamptz, ended timestamptz, metadata jsonb, status task_run_status not null default 'scheduled' ); comment on table task_run is 'History of task runs sent to the job-running backend'; comment on column task_run.backend_id is 'id of the task run in the job-running backend'; comment on column task_run.metadata is 'Useful metadata for the given task run. ' 'For instance, the worker that took on the job, ' 'or the logs for the run.'; comment on column task_run.id is 'Task run identifier'; comment on column task_run.task is 'References task table'; comment on column task_run.scheduled is 'Scheduled run time for task'; comment on column task_run.started is 'Task starting time'; comment on column task_run.ended is 'Task ending time'; diff --git a/swh/scheduler/sql/40-swh-func.sql b/swh/scheduler/sql/40-swh-func.sql index 487e508..e01aa93 100644 --- a/swh/scheduler/sql/40-swh-func.sql +++ b/swh/scheduler/sql/40-swh-func.sql @@ -1,408 +1,408 @@ create or replace function swh_scheduler_mktemp_task () returns void language sql as $$ create temporary table tmp_task ( like task excluding indexes ) on commit drop; alter table tmp_task alter column retries_left drop not null, drop column id; $$; comment on function swh_scheduler_mktemp_task () is 'Create a temporary table for bulk task creation'; create or replace function swh_scheduler_create_tasks_from_temp () returns setof task language plpgsql as $$ begin -- update the default values in one go -- this is separated from the insert/select to avoid too much -- juggling update tmp_task t set current_interval = tt.default_interval, retries_left = coalesce(retries_left, tt.num_retries, 0) from task_type tt where tt.type=t.type; insert into task (type, arguments, next_run, status, current_interval, policy, retries_left, priority) select type, arguments, next_run, status, current_interval, policy, retries_left, priority from tmp_task t where not exists(select 1 from task where type = t.type and - arguments->'args' = t.arguments->'args' and - arguments->'kwargs' = t.arguments->'kwargs' and + md5(arguments::text) = md5(t.arguments::text) and + arguments = t.arguments and policy = t.policy and priority is not distinct from t.priority and status = t.status); return query select distinct t.* from tmp_task tt inner join task t on ( tt.type = t.type and - tt.arguments->'args' = t.arguments->'args' and - tt.arguments->'kwargs' = t.arguments->'kwargs' and + md5(tt.arguments::text) = md5(t.arguments::text) and + tt.arguments = t.arguments and tt.policy = t.policy and tt.priority is not distinct from t.priority and tt.status = t.status ); end; $$; comment on function swh_scheduler_create_tasks_from_temp () is 'Create tasks in bulk from the temporary table'; create or replace function swh_scheduler_peek_no_priority_tasks (task_type text, ts timestamptz default now(), num_tasks bigint default NULL) returns setof task language sql stable as $$ select * from task where next_run <= ts and type = task_type and status = 'next_run_not_scheduled' and priority is null order by next_run limit num_tasks for update skip locked; $$; comment on function swh_scheduler_peek_no_priority_tasks (text, timestamptz, bigint) is 'Retrieve tasks without priority'; create or replace function swh_scheduler_nb_priority_tasks(num_tasks_priority bigint, task_priority task_priority) returns numeric language sql stable as $$ select ceil(num_tasks_priority * (select ratio from priority_ratio where id = task_priority)) :: numeric $$; comment on function swh_scheduler_nb_priority_tasks (bigint, task_priority) is 'Given a priority task and a total number, compute the number of tasks to read'; create or replace function swh_scheduler_peek_tasks_with_priority (task_type text, ts timestamptz default now(), num_tasks_priority bigint default NULL, task_priority task_priority default 'normal') returns setof task language sql stable as $$ select * from task t where t.next_run <= ts and t.type = task_type and t.status = 'next_run_not_scheduled' and t.priority = task_priority order by t.next_run limit num_tasks_priority for update skip locked; $$; comment on function swh_scheduler_peek_tasks_with_priority(text, timestamptz, bigint, task_priority) is 'Retrieve tasks with a given priority'; create or replace function swh_scheduler_peek_priority_tasks (task_type text, ts timestamptz default now(), num_tasks_priority bigint default NULL) returns setof task language plpgsql as $$ declare r record; count_row bigint; nb_diff bigint; nb_high bigint; nb_normal bigint; nb_low bigint; begin -- expected values to fetch select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'high') into nb_high; select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'normal') into nb_normal; select swh_scheduler_nb_priority_tasks(num_tasks_priority, 'low') into nb_low; nb_diff := 0; count_row := 0; for r in select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_high, 'high') loop count_row := count_row + 1; return next r; end loop; if count_row < nb_high then nb_normal := nb_normal + nb_high - count_row; end if; count_row := 0; for r in select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_normal, 'normal') loop count_row := count_row + 1; return next r; end loop; if count_row < nb_normal then nb_low := nb_low + nb_normal - count_row; end if; return query select * from swh_scheduler_peek_tasks_with_priority(task_type, ts, nb_low, 'low'); end $$; comment on function swh_scheduler_peek_priority_tasks(text, timestamptz, bigint) is 'Retrieve priority tasks'; create or replace function swh_scheduler_peek_ready_tasks (task_type text, ts timestamptz default now(), num_tasks bigint default NULL, num_tasks_priority bigint default NULL) returns setof task language plpgsql as $$ declare r record; count_row bigint; nb_diff bigint; nb_tasks bigint; begin count_row := 0; for r in select * from swh_scheduler_peek_priority_tasks(task_type, ts, num_tasks_priority) order by priority, next_run loop count_row := count_row + 1; return next r; end loop; if count_row < num_tasks_priority then nb_tasks := num_tasks + num_tasks_priority - count_row; else nb_tasks := num_tasks; end if; for r in select * from swh_scheduler_peek_no_priority_tasks(task_type, ts, nb_tasks) order by priority, next_run loop return next r; end loop; return; end $$; comment on function swh_scheduler_peek_ready_tasks(text, timestamptz, bigint, bigint) is 'Retrieve tasks with/without priority in order'; create or replace function swh_scheduler_grab_ready_tasks (task_type text, ts timestamptz default now(), num_tasks bigint default NULL, num_tasks_priority bigint default NULL) returns setof task language sql as $$ update task set status='next_run_scheduled' from ( select id from swh_scheduler_peek_ready_tasks(task_type, ts, num_tasks, num_tasks_priority) ) next_tasks where task.id = next_tasks.id returning task.*; $$; comment on function swh_scheduler_grab_ready_tasks (text, timestamptz, bigint, bigint) is 'Grab tasks ready for scheduling and change their status'; create or replace function swh_scheduler_schedule_task_run (task_id bigint, backend_id text, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ insert into task_run (task, backend_id, metadata, scheduled, status) values (task_id, backend_id, metadata, ts, 'scheduled') returning *; $$; create or replace function swh_scheduler_mktemp_task_run () returns void language sql as $$ create temporary table tmp_task_run ( like task_run excluding indexes ) on commit drop; alter table tmp_task_run drop column id, drop column status; $$; comment on function swh_scheduler_mktemp_task_run () is 'Create a temporary table for bulk task run scheduling'; create or replace function swh_scheduler_schedule_task_run_from_temp () returns void language plpgsql as $$ begin insert into task_run (task, backend_id, metadata, scheduled, status) select task, backend_id, metadata, scheduled, 'scheduled' from tmp_task_run; return; end; $$; create or replace function swh_scheduler_start_task_run (backend_id text, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ update task_run set started = ts, status = 'started', metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_start_task_run.metadata where task_run.backend_id = swh_scheduler_start_task_run.backend_id returning *; $$; create or replace function swh_scheduler_end_task_run (backend_id text, status task_run_status, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ update task_run set ended = ts, status = swh_scheduler_end_task_run.status, metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_end_task_run.metadata where task_run.backend_id = swh_scheduler_end_task_run.backend_id returning *; $$; create type task_record as ( task_id bigint, task_policy task_policy, task_status task_status, task_run_id bigint, arguments jsonb, type text, backend_id text, metadata jsonb, scheduled timestamptz, started timestamptz, ended timestamptz, task_run_status task_run_status ); create or replace function swh_scheduler_task_to_archive( ts_after timestamptz, ts_before timestamptz, last_id bigint default -1, lim bigint default 10) returns setof task_record language sql stable as $$ select t.id as task_id, t.policy as task_policy, t.status as task_status, tr.id as task_run_id, t.arguments, t.type, tr.backend_id, tr.metadata, tr.scheduled, tr.started, tr.ended, tr.status as task_run_status from task_run tr inner join task t on tr.task=t.id where ((t.policy = 'oneshot' and t.status in ('completed', 'disabled')) or (t.policy = 'recurring' and t.status = 'disabled')) and ((ts_after <= tr.started and tr.started < ts_before) or tr.started is null) and t.id > last_id order by tr.task, tr.started limit lim; $$; comment on function swh_scheduler_task_to_archive (timestamptz, timestamptz, bigint, bigint) is 'Read archivable tasks function'; create or replace function swh_scheduler_delete_archived_tasks( task_ids bigint[], task_run_ids bigint[]) returns void language sql as $$ -- clean up task_run_ids delete from task_run where id in (select * from unnest(task_run_ids)); -- clean up only tasks whose associated task_run are all cleaned up. -- Remaining tasks will stay there and will be cleaned up when -- remaining data have been indexed delete from task where id in (select t.id from task t left outer join task_run tr on t.id=tr.task where t.id in (select * from unnest(task_ids)) and tr.task is null); $$; comment on function swh_scheduler_delete_archived_tasks(bigint[], bigint[]) is 'Clean up archived tasks function'; create or replace function swh_scheduler_update_task_on_task_end () returns trigger language plpgsql as $$ declare cur_task task%rowtype; cur_task_type task_type%rowtype; adjustment_factor float; new_interval interval; begin select * from task where id = new.task into cur_task; select * from task_type where type = cur_task.type into cur_task_type; case when new.status = 'permfailed' then update task set status = 'disabled' where id = cur_task.id; when new.status in ('eventful', 'uneventful') then case when cur_task.policy = 'oneshot' then update task set status = 'completed' where id = cur_task.id; when cur_task.policy = 'recurring' then if new.status = 'uneventful' then adjustment_factor := 1/cur_task_type.backoff_factor; else adjustment_factor := 1/cur_task_type.backoff_factor; end if; new_interval := greatest( cur_task_type.min_interval, least( cur_task_type.max_interval, adjustment_factor * cur_task.current_interval)); update task set status = 'next_run_not_scheduled', next_run = now() + new_interval, current_interval = new_interval, retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; else -- new.status in 'failed', 'lost' if cur_task.retries_left > 0 then update task set status = 'next_run_not_scheduled', next_run = now() + coalesce(cur_task_type.retry_delay, interval '1 hour'), retries_left = cur_task.retries_left - 1 where id = cur_task.id; else -- no retries left case when cur_task.policy = 'oneshot' then update task set status = 'disabled' where id = cur_task.id; when cur_task.policy = 'recurring' then update task set status = 'next_run_not_scheduled', next_run = now() + cur_task.current_interval, retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; end if; -- retries end case; return null; end; $$; create trigger update_task_on_task_end after update of status on task_run for each row when (new.status NOT IN ('scheduled', 'started')) execute procedure swh_scheduler_update_task_on_task_end (); diff --git a/swh/scheduler/sql/50-swh-data.sql b/swh/scheduler/sql/50-swh-data.sql index 2c801ef..0fab9f0 100644 --- a/swh/scheduler/sql/50-swh-data.sql +++ b/swh/scheduler/sql/50-swh-data.sql @@ -1,195 +1,182 @@ insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'load-svn-from-archive', 'Loading svn repositories from svn dump', 'swh.loader.svn.tasks.MountAndLoadSvnRepository', '1 day', '1 day', '1 day', 1, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'load-svn', 'Create dump of a remote svn repository, mount it and load it', 'swh.loader.svn.tasks.DumpMountAndLoadSvnRepository', '1 day', '1 day', '1 day', 1, 1000); -insert into task_type( - type, - description, - backend_name, - default_interval, min_interval, max_interval, backoff_factor, - num_retries, - max_queue_length) -values ( - 'load-deposit', - 'Loading deposit archive into swh through swh-loader-tar', - 'swh.deposit.loader.tasks.LoadDepositArchiveTsk', - '1 day', '1 day', '1 day', 1, 3, 1000); - insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, num_retries, max_queue_length) values ( 'check-deposit', 'Pre-checking deposit step before loading into swh archive', 'swh.deposit.loader.tasks.ChecksDepositTsk', '1 day', '1 day', '1 day', 1, 3, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'cook-vault-bundle', 'Cook a Vault bundle', 'swh.vault.cooking_tasks.SWHCookingTask', '1 day', '1 day', '1 day', 1, 10000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'load-hg', 'Loading mercurial repository swh-loader-mercurial', 'swh.loader.mercurial.tasks.LoadMercurial', '1 day', '1 day', '1 day', 1, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'load-hg-from-archive', 'Loading archive mercurial repository swh-loader-mercurial', 'swh.loader.mercurial.tasks.LoadArchiveMercurial', '1 day', '1 day', '1 day', 1, 1000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'load-git', 'Update an origin of type git', 'swh.loader.git.tasks.UpdateGitRepository', '64 days', '12:00:00', '64 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-mimetype', 'Mimetype indexer task', 'swh.indexer.tasks.ContentMimetype', '1 day', '12:00:00', '1 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-mimetype-for-range', 'Mimetype Range indexer task', 'swh.indexer.tasks.ContentRangeMimetype', '1 day', '12:00:00', '1 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-fossology-license', 'Fossology license indexer task', 'swh.indexer.tasks.ContentFossologyLicense', '1 day', '12:00:00', '1 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-fossology-license-for-range', 'Fossology license range indexer task', 'swh.indexer.tasks.ContentRangeFossologyLicense', '1 day', '12:00:00', '1 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-origin-head', 'Origin Head indexer task', 'swh.indexer.tasks.OriginHead', '1 day', '12:00:00', '1 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-revision-metadata', 'Revision Metadata indexer task', 'swh.indexer.tasks.RevisionMetadata', '1 day', '12:00:00', '1 days', 2, 5000); insert into task_type( type, description, backend_name, default_interval, min_interval, max_interval, backoff_factor, max_queue_length) values ( 'index-origin-metadata', 'Origin Metadata indexer task', 'swh.indexer.tasks.OriginMetadata', '1 day', '12:00:00', '1 days', 2, 20000); diff --git a/swh/scheduler/sql/60-swh-indexes.sql b/swh/scheduler/sql/60-swh-indexes.sql index efb58a9..7da9519 100644 --- a/swh/scheduler/sql/60-swh-indexes.sql +++ b/swh/scheduler/sql/60-swh-indexes.sql @@ -1,11 +1,13 @@ create index on task(type); create index on task(next_run); -create index task_args on task using btree ((arguments -> 'args')); -create index task_kwargs on task using gin ((arguments -> 'kwargs')); + +-- used for quick equality checking +create index on task using btree(type, md5(arguments::text)); + create index on task(priority); create index on task_run(task); create index on task_run(backend_id); create index task_run_id_asc_idx on task_run(task asc, started asc); diff --git a/swh/scheduler/sql/updater/10-swh-init.sql b/swh/scheduler/sql/updater/10-swh-init.sql deleted file mode 100644 index 43774e3..0000000 --- a/swh/scheduler/sql/updater/10-swh-init.sql +++ /dev/null @@ -1,4 +0,0 @@ -create extension if not exists btree_gist; -create extension if not exists pgcrypto; - -create or replace language plpgsql; diff --git a/swh/scheduler/sql/updater/30-swh-schema.sql b/swh/scheduler/sql/updater/30-swh-schema.sql deleted file mode 100644 index 5833df8..0000000 --- a/swh/scheduler/sql/updater/30-swh-schema.sql +++ /dev/null @@ -1,29 +0,0 @@ -create table dbversion -( - version int primary key, - release timestamptz not null, - description text not null -); - -comment on table dbversion is 'Schema update tracking'; - --- a SHA1 checksum (not necessarily originating from Git) -create domain sha1 as bytea check (length(value) = 20); - -insert into dbversion (version, release, description) - values (1, now(), 'Work In Progress'); - -create type origin_type as enum ('git', 'svn', 'hg', 'deb'); -comment on type origin_type is 'Url''s repository type'; - -create table cache ( - id sha1 primary key, - url text not null, - origin_type origin_type not null, - cnt int default 1, - first_seen timestamptz not null default now(), - last_seen timestamptz not null - ); - -create index on cache(url); -create index on cache(last_seen); diff --git a/swh/scheduler/sql/updater/40-swh-func.sql b/swh/scheduler/sql/updater/40-swh-func.sql deleted file mode 100644 index 786dee1..0000000 --- a/swh/scheduler/sql/updater/40-swh-func.sql +++ /dev/null @@ -1,48 +0,0 @@ --- Postgresql index helper function -create or replace function hash_sha1(text) - returns sha1 -as $$ - select public.digest($1, 'sha1') :: sha1 -$$ language sql strict immutable; - -comment on function hash_sha1(text) is 'Compute sha1 hash as text'; - --- create a temporary table for cache tmp_cache, -create or replace function swh_mktemp_cache() - returns void - language sql -as $$ - create temporary table tmp_cache ( - like cache including defaults - ) on commit drop; - alter table tmp_cache drop column id; -$$; - -create or replace function swh_cache_put() - returns void - language plpgsql -as $$ -begin - insert into cache (id, url, origin_type, cnt, last_seen) - select hash_sha1(url), url, origin_type, cnt, last_seen - from tmp_cache t - on conflict(id) - do update set cnt = (select cnt from cache where id=excluded.id) + excluded.cnt, - last_seen = excluded.last_seen; - return; -end -$$; - -comment on function swh_cache_put() is 'Write to cache temporary events'; - -create or replace function swh_cache_read(ts timestamptz, lim integer) - returns setof cache - language sql stable -as $$ - select id, url, origin_type, cnt, first_seen, last_seen - from cache - where last_seen <= ts - limit lim; -$$; - -comment on function swh_cache_read(timestamptz, integer) is 'Read cache entries'; diff --git a/swh/scheduler/task.py b/swh/scheduler/task.py index 3e2e47a..83d3c0e 100644 --- a/swh/scheduler/task.py +++ b/swh/scheduler/task.py @@ -1,66 +1,86 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +from datetime import datetime + from celery import current_app import celery.app.task from celery.utils.log import get_task_logger from swh.core.statsd import Statsd +def ts(): + return int(datetime.utcnow().timestamp()) + + class SWHTask(celery.app.task.Task): """a schedulable task (abstract class) Current implementation is based on Celery. See http://docs.celeryproject.org/en/latest/reference/celery.app.task.html for how to use tasks once instantiated """ _statsd = None _log = None @property def statsd(self): if self._statsd: return self._statsd worker_name = current_app.conf.get('worker_name') if worker_name: self._statsd = Statsd(constant_tags={ 'task': self.name, 'worker': worker_name, }) return self._statsd else: - return Statsd(constant_tags={ + statsd = Statsd(constant_tags={ 'task': self.name, 'worker': 'unknown worker', }) + return statsd def __call__(self, *args, **kwargs): self.statsd.increment('swh_task_called_count') + self.statsd.gauge('swh_task_start_ts', ts()) with self.statsd.timed('swh_task_duration_seconds'): - return super().__call__(*args, **kwargs) + result = super().__call__(*args, **kwargs) + try: + status = result['status'] + if status == 'success': + status = 'eventful' if result.get('eventful') \ + else 'uneventful' + except Exception: + status = 'eventful' if result else 'uneventful' + + self.statsd.gauge( + 'swh_task_end_ts', ts(), + tags={'status': status}) + return result def on_failure(self, exc, task_id, args, kwargs, einfo): self.statsd.increment('swh_task_failure_count') def on_success(self, retval, task_id, args, kwargs): self.statsd.increment('swh_task_success_count') # this is a swh specific event. Used to attach the retval to the # task_run self.send_event('task-result', result=retval) @property def log(self): if self._log is None: self._log = get_task_logger(self.name) return self._log def run(self, *args, **kwargs): self.log.debug('%s: args=%s, kwargs=%s', self.name, args, kwargs) ret = super().run(*args, **kwargs) self.log.debug('%s: OK => %s', self.name, ret) return ret diff --git a/swh/scheduler/tests/conftest.py b/swh/scheduler/tests/conftest.py index e96f15f..6534132 100644 --- a/swh/scheduler/tests/conftest.py +++ b/swh/scheduler/tests/conftest.py @@ -1,109 +1,109 @@ # Copyright (C) 2016-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import pytest import glob from datetime import timedelta import pkg_resources from swh.core.utils import numfile_sortkey as sortkey from swh.scheduler import get_scheduler from swh.scheduler.tests import SQL_DIR # make sure we are not fooled by CELERY_ config environment vars for var in [x for x in os.environ.keys() if x.startswith('CELERY')]: os.environ.pop(var) # test_cli tests depends on a en/C locale, so ensure it os.environ['LC_ALL'] = 'C.UTF-8' DUMP_FILES = os.path.join(SQL_DIR, '*.sql') # celery tasks for testing purpose; tasks themselves should be # in swh/scheduler/tests/tasks.py -TASK_NAMES = ['ping', 'multiping', 'add', 'error'] +TASK_NAMES = ['ping', 'multiping', 'add', 'error', 'echo'] @pytest.fixture(scope='session') def celery_enable_logging(): return True @pytest.fixture(scope='session') def celery_includes(): task_modules = [ 'swh.scheduler.tests.tasks', ] for entrypoint in pkg_resources.iter_entry_points('swh.workers'): task_modules.extend(entrypoint.load()().get('task_modules', [])) return task_modules @pytest.fixture(scope='session') def celery_parameters(): return { 'task_cls': 'swh.scheduler.task:SWHTask', } @pytest.fixture(scope='session') def celery_config(): return { 'accept_content': ['application/x-msgpack', 'application/json'], 'task_serializer': 'msgpack', 'result_serializer': 'json', } # use the celery_session_app fixture to monkeypatch the 'main' # swh.scheduler.celery_backend.config.app Celery application # with the test application @pytest.fixture(scope='session') def swh_app(celery_session_app): from swh.scheduler.celery_backend import config config.app = celery_session_app yield celery_session_app @pytest.fixture def swh_scheduler_config(request, postgresql): scheduler_config = { 'db': postgresql.dsn, } all_dump_files = sorted(glob.glob(DUMP_FILES), key=sortkey) cursor = postgresql.cursor() for fname in all_dump_files: with open(fname) as fobj: cursor.execute(fobj.read()) postgresql.commit() return scheduler_config @pytest.fixture def swh_scheduler(swh_scheduler_config): scheduler = get_scheduler('local', swh_scheduler_config) for taskname in TASK_NAMES: scheduler.create_task_type({ 'type': 'swh-test-{}'.format(taskname), 'description': 'The {} testing task'.format(taskname), 'backend_name': 'swh.scheduler.tests.tasks.{}'.format(taskname), 'default_interval': timedelta(days=1), 'min_interval': timedelta(hours=6), 'max_interval': timedelta(days=12), }) return scheduler # this alias is used to be able to easily instantiate a db-backed Scheduler # eg. for the RPC client/server test suite. swh_db_scheduler = swh_scheduler diff --git a/swh/scheduler/tests/tasks.py b/swh/scheduler/tests/tasks.py index be7628d..2426478 100644 --- a/swh/scheduler/tests/tasks.py +++ b/swh/scheduler/tests/tasks.py @@ -1,36 +1,42 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from celery import group, shared_task @shared_task(name='swh.scheduler.tests.tasks.ping', bind=True) def ping(self, **kw): # check this is a SWHTask assert hasattr(self, 'log') assert not hasattr(self, 'run_task') assert 'SWHTask' in [x.__name__ for x in self.__class__.__mro__] self.log.debug(self.name) if kw: return 'OK (kw=%s)' % kw return 'OK' @shared_task(name='swh.scheduler.tests.tasks.multiping', bind=True) def multiping(self, n=10): promise = group(ping.s(i=i) for i in range(n))() self.log.debug('%s OK (spawned %s subtasks)' % (self.name, n)) promise.save() return promise.id @shared_task(name='swh.scheduler.tests.tasks.error') def not_implemented(): raise NotImplementedError('Nope') @shared_task(name='swh.scheduler.tests.tasks.add') def add(x, y): return x + y + + +@shared_task(name='swh.scheduler.tests.tasks.echo') +def echo(**kw): + "Does nothing, just return the given kwargs dict" + return kw diff --git a/swh/scheduler/tests/test_celery_tasks.py b/swh/scheduler/tests/test_celery_tasks.py index 6f9e2ab..78bed28 100644 --- a/swh/scheduler/tests/test_celery_tasks.py +++ b/swh/scheduler/tests/test_celery_tasks.py @@ -1,93 +1,163 @@ from time import sleep +from itertools import count + from celery.result import GroupResult from celery.result import AsyncResult import pytest from swh.scheduler.utils import create_task_dict from swh.scheduler.celery_backend.runner import run_ready_tasks def test_ping(swh_app, celery_session_worker): res = swh_app.send_task( 'swh.scheduler.tests.tasks.ping') assert res res.wait() assert res.successful() assert res.result == 'OK' +def test_ping_with_kw(swh_app, celery_session_worker): + res = swh_app.send_task( + 'swh.scheduler.tests.tasks.ping', kwargs={'a': 1}) + assert res + res.wait() + assert res.successful() + assert res.result == "OK (kw={'a': 1})" + + def test_multiping(swh_app, celery_session_worker): "Test that a task that spawns subtasks (group) works" res = swh_app.send_task( - 'swh.scheduler.tests.tasks.multiping', n=5) + 'swh.scheduler.tests.tasks.multiping', kwargs={'n': 5}) assert res res.wait() assert res.successful() # retrieve the GroupResult for this task and wait for all the subtasks # to complete promise_id = res.result assert promise_id promise = GroupResult.restore(promise_id, app=swh_app) for i in range(5): if promise.ready(): break sleep(1) results = [x.get() for x in promise.results] + assert len(results) == 5 for i in range(5): assert ("OK (kw={'i': %s})" % i) in results @pytest.mark.db def test_scheduler_fixture(swh_app, celery_session_worker, swh_scheduler): "Test that the scheduler fixture works properly" task_type = swh_scheduler.get_task_type('swh-test-ping') assert task_type assert task_type['backend_name'] == 'swh.scheduler.tests.tasks.ping' swh_scheduler.create_tasks([create_task_dict( 'swh-test-ping', 'oneshot')]) backend_tasks = run_ready_tasks(swh_scheduler, swh_app) assert backend_tasks for task in backend_tasks: # Make sure the task completed AsyncResult(id=task['backend_id']).get() @pytest.mark.db def test_task_return_value(swh_app, celery_session_worker, swh_scheduler): task_type = swh_scheduler.get_task_type('swh-test-add') assert task_type assert task_type['backend_name'] == 'swh.scheduler.tests.tasks.add' swh_scheduler.create_tasks([create_task_dict( 'swh-test-add', 'oneshot', 12, 30)]) backend_tasks = run_ready_tasks(swh_scheduler, swh_app) assert len(backend_tasks) == 1 task = backend_tasks[0] value = AsyncResult(id=task['backend_id']).get() assert value == 42 @pytest.mark.db def test_task_exception(swh_app, celery_session_worker, swh_scheduler): task_type = swh_scheduler.get_task_type('swh-test-error') assert task_type assert task_type['backend_name'] == 'swh.scheduler.tests.tasks.error' swh_scheduler.create_tasks([create_task_dict( 'swh-test-error', 'oneshot')]) backend_tasks = run_ready_tasks(swh_scheduler, swh_app) assert len(backend_tasks) == 1 task = backend_tasks[0] result = AsyncResult(id=task['backend_id']) with pytest.raises(NotImplementedError): result.get() + + +def test_statsd(swh_app, celery_session_worker, mocker): + m = mocker.patch('swh.scheduler.task.Statsd._send_to_server') + mocker.patch('swh.scheduler.task.ts', side_effect=count()) + mocker.patch('swh.core.statsd.monotonic', side_effect=count()) + res = swh_app.send_task( + 'swh.scheduler.tests.tasks.echo') + assert res + res.wait() + assert res.successful() + assert res.result == {} + + m.assert_any_call( + 'swh_task_called_count:1|c|' + '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + m.assert_any_call( + 'swh_task_start_ts:0|g|' + '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + m.assert_any_call( + 'swh_task_end_ts:1|g|' + '#status:uneventful,task:swh.scheduler.tests.tasks.echo,' + 'worker:unknown worker') + m.assert_any_call( + 'swh_task_duration_seconds:1000|ms|' + '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + m.assert_any_call( + 'swh_task_success_count:1|c|' + '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + + +def test_statsd_with_status(swh_app, celery_session_worker, mocker): + m = mocker.patch('swh.scheduler.task.Statsd._send_to_server') + mocker.patch('swh.scheduler.task.ts', side_effect=count()) + mocker.patch('swh.core.statsd.monotonic', side_effect=count()) + res = swh_app.send_task( + 'swh.scheduler.tests.tasks.echo', kwargs={'status': 'eventful'}) + assert res + res.wait() + assert res.successful() + assert res.result == {'status': 'eventful'} + + m.assert_any_call( + 'swh_task_called_count:1|c|' + '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + m.assert_any_call( + 'swh_task_start_ts:0|g|' + '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + m.assert_any_call( + 'swh_task_end_ts:1|g|' + '#status:eventful,task:swh.scheduler.tests.tasks.echo,' + 'worker:unknown worker') + m.assert_any_call( + 'swh_task_duration_seconds:1000|ms|' + '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') + m.assert_any_call( + 'swh_task_success_count:1|c|' + '#task:swh.scheduler.tests.tasks.echo,worker:unknown worker') diff --git a/swh/scheduler/tests/updater/__init__.py b/swh/scheduler/tests/updater/__init__.py deleted file mode 100644 index bd7747b..0000000 --- a/swh/scheduler/tests/updater/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -from arrow import utcnow - -try: - from hypothesis.strategies import from_regex -except ImportError: - from hypothesis.strategies import text - - # Revert to using basic text generation - def from_regex(*args, **kwargs): - return text() - - -class UpdaterTestUtil: - """Mixin intended for event generation purposes - - """ - def _make_event(self, event_type, name, origin_type): - return { - 'type': event_type, - 'repo': { - 'name': name, - }, - 'created_at': utcnow(), - 'origin_type': origin_type, - } - - def _make_events(self, events): - for event_type, repo_name, origin_type in events: - yield self._make_event(event_type, repo_name, origin_type) - - def _make_incomplete_event(self, event_type, name, origin_type, - missing_data_key): - event = self._make_event(event_type, name, origin_type) - del event[missing_data_key] - return event - - def _make_incomplete_events(self, events): - for event_type, repo_name, origin_type, missing_data_key in events: - yield self._make_incomplete_event(event_type, repo_name, - origin_type, missing_data_key) - - def _make_simple_event(self, event_type, name, origin_type): - return { - 'type': event_type, - 'url': 'https://fakeurl/%s' % name, - 'origin_type': origin_type, - 'created_at': utcnow(), - } diff --git a/swh/scheduler/tests/updater/conftest.py b/swh/scheduler/tests/updater/conftest.py deleted file mode 100644 index 87584c5..0000000 --- a/swh/scheduler/tests/updater/conftest.py +++ /dev/null @@ -1,68 +0,0 @@ -import pytest -import glob -import os -from arrow import utcnow # XXX - -from swh.core.utils import numfile_sortkey as sortkey -from swh.scheduler.updater.backend import SchedulerUpdaterBackend -from swh.scheduler.tests import SQL_DIR -import swh.scheduler.tests.conftest # noqa - - -DUMP_FILES = os.path.join(SQL_DIR, 'updater', '*.sql') - - -@pytest.fixture -def swh_scheduler_updater(postgresql): - config = { - 'db': postgresql.dsn, - } - - all_dump_files = sorted(glob.glob(DUMP_FILES), key=sortkey) - - cursor = postgresql.cursor() - for fname in all_dump_files: - with open(fname) as fobj: - cursor.execute(fobj.read()) - postgresql.commit() - - backend = SchedulerUpdaterBackend(**config) - return backend - - -def make_event(event_type, name, origin_type): - return { - 'type': event_type, - 'repo': { - 'name': name, - }, - 'created_at': utcnow(), - 'origin_type': origin_type, - } - - -def make_simple_event(event_type, name, origin_type): - return { - 'type': event_type, - 'url': 'https://fakeurl/%s' % name, - 'origin_type': origin_type, - 'created_at': utcnow(), - } - - -def make_events(events): - for event_type, repo_name, origin_type in events: - yield make_event(event_type, repo_name, origin_type) - - -def make_incomplete_event(event_type, name, origin_type, - missing_data_key): - event = make_event(event_type, name, origin_type) - del event[missing_data_key] - return event - - -def make_incomplete_events(events): - for event_type, repo_name, origin_type, missing_data_key in events: - yield make_incomplete_event(event_type, repo_name, - origin_type, missing_data_key) diff --git a/swh/scheduler/tests/updater/test_backend.py b/swh/scheduler/tests/updater/test_backend.py deleted file mode 100644 index 7caa85a..0000000 --- a/swh/scheduler/tests/updater/test_backend.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -from hypothesis import given -from hypothesis.strategies import sets - -from swh.scheduler.updater.events import SWHEvent - -from . import from_regex - - -@given(urls=sets( - from_regex( - r'^https://somewhere[.]org/[a-z0-9]{5,7}/[a-z0-9]{3,10}$'), - min_size=10, max_size=15)) -def test_cache_read(urls, swh_scheduler_updater): - # beware that the fixture is only called once for all the tests - # generated by hypothesis, so the db is not cleared between calls. - # see the end of - # https://hypothesis.works/articles/hypothesis-pytest-fixtures/ - def gen_events(urls): - for url in urls: - yield SWHEvent({ - 'url': url, - 'type': 'create', - 'origin_type': 'git', - }) - known_urls = set(e['url'] for e in - swh_scheduler_updater.cache_read(limit=1000000)) - swh_scheduler_updater.cache_put(gen_events(urls)) - new_urls = {u.strip() for u in urls} - known_urls - all_urls = set(e['url'] for e in - swh_scheduler_updater.cache_read(limit=1000000)) - assert (all_urls - known_urls) == new_urls diff --git a/swh/scheduler/tests/updater/test_consumer.py b/swh/scheduler/tests/updater/test_consumer.py deleted file mode 100644 index 93656a0..0000000 --- a/swh/scheduler/tests/updater/test_consumer.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import unittest -from itertools import chain - -from hypothesis import given, settings, HealthCheck -from hypothesis.strategies import lists, sampled_from, text, tuples - -from swh.scheduler.updater.consumer import UpdaterConsumer -from swh.scheduler.updater.events import LISTENED_EVENTS, SWHEvent - -from . import UpdaterTestUtil, from_regex - - -class FakeSchedulerUpdaterBackend: - def __init__(self): - self.events = [] - - def cache_put(self, events): - self.events.append(events) - - -class FakeUpdaterConsumerBase(UpdaterConsumer): - def __init__(self, backend): - super().__init__(backend) - self.connection_opened = False - self.connection_closed = False - self.consume_called = False - self.has_events_called = False - - def open_connection(self): - self.connection_opened = True - - def close_connection(self): - self.connection_closed = True - - def convert_event(self, event): - pass - - -class FakeUpdaterConsumerRaise(FakeUpdaterConsumerBase): - def has_events(self): - self.has_events_called = True - return True - - def consume_events(self): - self.consume_called = True - raise ValueError('Broken stuff') - - -class UpdaterConsumerRaisingTest(unittest.TestCase): - def setUp(self): - self.updater = FakeUpdaterConsumerRaise( - FakeSchedulerUpdaterBackend()) - - def test_running_raise(self): - """Raising during run should finish fine. - - """ - # given - self.assertEqual(self.updater.count, 0) - self.assertEqual(self.updater.seen_events, set()) - self.assertEqual(self.updater.events, []) - - # when - with self.assertRaisesRegex(ValueError, 'Broken stuff'): - self.updater.run() - - # then - self.assertEqual(self.updater.count, 0) - self.assertEqual(self.updater.seen_events, set()) - self.assertEqual(self.updater.events, []) - self.assertTrue(self.updater.connection_opened) - self.assertTrue(self.updater.has_events_called) - self.assertTrue(self.updater.connection_closed) - self.assertTrue(self.updater.consume_called) - - -class FakeUpdaterConsumerNoEvent(FakeUpdaterConsumerBase): - def has_events(self): - self.has_events_called = True - return False - - def consume_events(self): - self.consume_called = True - - -class UpdaterConsumerNoEventTest(unittest.TestCase): - def setUp(self): - self.updater = FakeUpdaterConsumerNoEvent( - FakeSchedulerUpdaterBackend()) - - def test_running_does_not_consume(self): - """Run with no events should do just fine""" - # given - self.assertEqual(self.updater.count, 0) - self.assertEqual(self.updater.seen_events, set()) - self.assertEqual(self.updater.events, []) - - # when - self.updater.run() - - # then - self.assertEqual(self.updater.count, 0) - self.assertEqual(self.updater.seen_events, set()) - self.assertEqual(self.updater.events, []) - self.assertTrue(self.updater.connection_opened) - self.assertTrue(self.updater.has_events_called) - self.assertTrue(self.updater.connection_closed) - self.assertFalse(self.updater.consume_called) - - -EVENT_KEYS = ['type', 'repo', 'created_at', 'origin_type'] - - -class FakeUpdaterConsumer(FakeUpdaterConsumerBase): - def __init__(self, backend, messages): - super().__init__(backend) - self.messages = messages - self.debug = False - - def has_events(self): - self.has_events_called = True - return len(self.messages) > 0 - - def consume_events(self): - self.consume_called = True - for msg in self.messages: - yield msg - self.messages.pop() - - def convert_event(self, event, keys=EVENT_KEYS): - for k in keys: - v = event.get(k) - if v is None: - return None - - e = { - 'type': event['type'], - 'url': 'https://fake.url/%s' % event['repo']['name'], - 'last_seen': event['created_at'], - 'origin_type': event['origin_type'], - } - return SWHEvent(e) - - -class UpdaterConsumerWithEventTest(UpdaterTestUtil, unittest.TestCase): - @settings(suppress_health_check=[HealthCheck.too_slow]) - @given(lists(tuples(sampled_from(LISTENED_EVENTS), # event type - from_regex(r'^[a-z0-9]{5,10}/[a-z0-9]{7,12}$'), # name - text()), # origin type - min_size=3, max_size=10), - lists(tuples(text(), # event type - from_regex(r'^[a-z0-9]{5,7}/[a-z0-9]{3,10}$'), # name - text()), # origin type - min_size=3, max_size=10), - lists(tuples(sampled_from(LISTENED_EVENTS), # event type - from_regex(r'^[a-z0-9]{5,10}/[a-z0-9]{7,12}$'), # name - text(), # origin type - sampled_from(EVENT_KEYS)), # keys to drop - min_size=3, max_size=10)) - def test_running(self, events, uninteresting_events, incomplete_events): - """Interesting events are written to cache, others are dropped - - """ - # given - ready_events = self._make_events(events) - ready_uninteresting_events = self._make_events(uninteresting_events) - ready_incomplete_events = self._make_incomplete_events( - incomplete_events) - - updater = FakeUpdaterConsumer( - FakeSchedulerUpdaterBackend(), - list(chain( - ready_events, ready_incomplete_events, - ready_uninteresting_events))) - - self.assertEqual(updater.count, 0) - self.assertEqual(updater.seen_events, set()) - self.assertEqual(updater.events, []) - - # when - updater.run() - - # then - self.assertEqual(updater.count, 0) - self.assertEqual(updater.seen_events, set()) - self.assertEqual(updater.events, []) - self.assertTrue(updater.connection_opened) - self.assertTrue(updater.has_events_called) - self.assertTrue(updater.connection_closed) - self.assertTrue(updater.consume_called) - - self.assertEqual(updater.messages, []) - # uninteresting or incomplete events are dropped - self.assertTrue(len(updater.backend.events), len(events)) diff --git a/swh/scheduler/tests/updater/test_events.py b/swh/scheduler/tests/updater/test_events.py deleted file mode 100644 index 2f00bd7..0000000 --- a/swh/scheduler/tests/updater/test_events.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import unittest - -from hypothesis import given -from hypothesis.strategies import sampled_from, text - -from swh.scheduler.updater.events import LISTENED_EVENTS, SWHEvent -from swh.scheduler.updater.ghtorrent import events - -from . import UpdaterTestUtil - - -def event_values_ko(): - return set(events['evt']).union( - set(events['ent'])).difference( - set(LISTENED_EVENTS)) - - -WRONG_EVENTS = sorted(list(event_values_ko())) - - -class EventTest(UpdaterTestUtil, unittest.TestCase): - @given(sampled_from(LISTENED_EVENTS), text(), text()) - def test_is_interesting_ok(self, event_type, name, origin_type): - evt = self._make_simple_event(event_type, name, origin_type) - self.assertTrue(SWHEvent(evt).is_interesting()) - - @given(text(), text(), text()) - def test_is_interested_with_noisy_event_should_be_ko( - self, event_type, name, origin_type): - if event_type in LISTENED_EVENTS: - # just in case something good is generated, skip it - return - evt = self._make_simple_event(event_type, name, origin_type) - self.assertFalse(SWHEvent(evt).is_interesting()) - - @given(sampled_from(WRONG_EVENTS), text(), text()) - def test_is_interesting_ko(self, event_type, name, origin_type): - evt = self._make_simple_event(event_type, name, origin_type) - self.assertFalse(SWHEvent(evt).is_interesting()) diff --git a/swh/scheduler/tests/updater/test_ghtorrent.py b/swh/scheduler/tests/updater/test_ghtorrent.py deleted file mode 100644 index 92cc89d..0000000 --- a/swh/scheduler/tests/updater/test_ghtorrent.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import unittest -from unittest.mock import patch - -from hypothesis import given -from hypothesis.strategies import sampled_from - -from swh.scheduler.updater.events import SWHEvent -from swh.scheduler.updater.ghtorrent import (INTERESTING_EVENT_KEYS, - GHTorrentConsumer, events) -from swh.scheduler.updater.backend import SchedulerUpdaterBackend - -from . import UpdaterTestUtil, from_regex - - -def event_values(): - return set(events['evt']).union(set(events['ent'])) - - -def ghtorrentize_event_name(event_name): - return '%sEvent' % event_name.capitalize() - - -EVENT_TYPES = sorted([ghtorrentize_event_name(e) for e in event_values()]) - - -class FakeChannel: - """Fake Channel (virtual connection inside a connection) - - """ - def close(self): - self.close = True - - -class FakeConnection: - """Fake Rabbitmq connection for test purposes - - """ - def __init__(self, conn_string): - self._conn_string = conn_string - self._connect = False - self._release = False - self._channel = False - - def connect(self): - self._connect = True - return True - - def release(self): - self._connect = False - self._release = True - - def channel(self): - self._channel = True - return FakeChannel() - - -class GHTorrentConsumerTest(UpdaterTestUtil, unittest.TestCase): - def setUp(self): - config = { - 'ghtorrent': { - 'rabbitmq': { - 'conn': { - 'url': 'amqp://u:p@https://somewhere:9807', - }, - 'prefetch_read': 17, - }, - 'batch_cache_write': 42, - }, - 'scheduler_updater': { - 'cls': 'local', - 'args': { - 'db': 'dbname=softwareheritage-scheduler-updater-dev', - }, - }, - } - - GHTorrentConsumer.connection_class = FakeConnection - with patch.object( - SchedulerUpdaterBackend, '__init__', return_value=None): - self.consumer = GHTorrentConsumer(**config) - - @patch('swh.scheduler.updater.backend.SchedulerUpdaterBackend') - def test_init(self, mock_backend): - # given - # check init is ok - self.assertEqual(self.consumer.batch, 42) - self.assertEqual(self.consumer.prefetch_read, 17) - - def test_has_events(self): - self.assertTrue(self.consumer.has_events()) - - def test_connection(self): - # when - self.consumer.open_connection() - - # then - self.assertEqual(self.consumer.conn._conn_string, - 'amqp://u:p@https://somewhere:9807') - self.assertTrue(self.consumer.conn._connect) - self.assertFalse(self.consumer.conn._release) - - # when - self.consumer.close_connection() - - # then - self.assertFalse(self.consumer.conn._connect) - self.assertTrue(self.consumer.conn._release) - self.assertIsInstance(self.consumer.channel, FakeChannel) - - @given(sampled_from(EVENT_TYPES), - from_regex(r'^[a-z0-9]{5,7}/[a-z0-9]{3,10}$')) - def test_convert_event_ok(self, event_type, name): - input_event = self._make_event(event_type, name, 'git') - actual_event = self.consumer.convert_event(input_event) - - self.assertTrue(isinstance(actual_event, SWHEvent)) - - event = actual_event.get() - - expected_event = { - 'type': event_type.lower().rstrip('Event'), - 'url': 'https://github.com/%s' % name, - 'last_seen': input_event['created_at'], - 'cnt': 1, - 'origin_type': 'git', - } - self.assertEqual(event, expected_event) - - @given(sampled_from(EVENT_TYPES), - from_regex(r'^[a-z0-9]{5,7}/[a-z0-9]{3,10}$'), - sampled_from(INTERESTING_EVENT_KEYS)) - def test_convert_event_ko(self, event_type, name, missing_data_key): - input_event = self._make_incomplete_event( - event_type, name, 'git', missing_data_key) - - logger = self.consumer.log - del self.consumer.log # prevent gazillions of warnings - actual_converted_event = self.consumer.convert_event(input_event) - self.consumer.log = logger - self.assertIsNone(actual_converted_event) - - @patch('swh.scheduler.updater.ghtorrent.collect_replies') - def test_consume_events(self, mock_collect_replies): - # given - self.consumer.queue = 'fake-queue' # hack - self.consumer.open_connection() - - fake_events = [ - self._make_event('PushEvent', 'user/some-repo', 'git'), - self._make_event('PushEvent', 'user2/some-other-repo', 'git'), - ] - - mock_collect_replies.return_value = fake_events - - # when - actual_events = [] - for e in self.consumer.consume_events(): - actual_events.append(e) - - # then - self.assertEqual(fake_events, actual_events) - - mock_collect_replies.assert_called_once_with( - self.consumer.conn, - self.consumer.channel, - 'fake-queue', - no_ack=False, - limit=17 - ) diff --git a/swh/scheduler/tests/updater/test_writer.py b/swh/scheduler/tests/updater/test_writer.py deleted file mode 100644 index 825f4a2..0000000 --- a/swh/scheduler/tests/updater/test_writer.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import os -from glob import glob - -import pytest -from pytest_postgresql.factories import postgresql as pg_fixture_factory - -from os.path import join -from swh.core.utils import numfile_sortkey as sortkey -from swh.scheduler.tests import SQL_DIR -from swh.scheduler.updater.events import LISTENED_EVENTS, SWHEvent -from swh.scheduler.updater.writer import UpdaterWriter - -from .conftest import make_simple_event - - -pg_scheduler = pg_fixture_factory('postgresql_proc', 'scheduler') -pg_updater = pg_fixture_factory('postgresql_proc', 'updater') - - -def pg_sched_fact(dbname, sqldir): - @pytest.fixture - def pg_scheduler_db(request): - pg = request.getfixturevalue('pg_%s' % dbname) - dump_files = sorted(glob(os.path.join(sqldir, '*.sql')), - key=sortkey) - with pg.cursor() as cur: - for fname in dump_files: - with open(fname) as fobj: - sql = fobj.read().replace('concurrently', '') - cur.execute(sql) - pg.commit() - yield pg - - return pg_scheduler_db - - -scheduler_db = pg_sched_fact('scheduler', SQL_DIR) -updater_db = pg_sched_fact('updater', join(SQL_DIR, 'updater')) - - -@pytest.fixture -def swh_updater_writer(scheduler_db, updater_db): - config = { - 'scheduler': { - 'cls': 'local', - 'args': { - 'db': scheduler_db.dsn, - }, - }, - 'scheduler_updater': { - 'cls': 'local', - 'args': { - 'db': updater_db.dsn, - 'cache_read_limit': 5, - }, - }, - 'updater_writer': { - 'pause': 0.1, - 'verbose': False, - }, - } - return UpdaterWriter(**config) - - -def test_run_ko(swh_updater_writer): - """Only git tasks are supported for now, other types are dismissed. - - """ - scheduler = swh_updater_writer.scheduler_backend - updater = swh_updater_writer.scheduler_updater_backend - - ready_events = [ - SWHEvent( - make_simple_event(event_type, 'origin-%s' % i, - 'svn')) - for i, event_type in enumerate(LISTENED_EVENTS) - ] - - updater.cache_put(ready_events) - list(updater.cache_read()) - - r = scheduler.peek_ready_tasks('load-git') - - # first read on an empty scheduling db results with nothing in it - assert not r - - # Read from cache to scheduler db - swh_updater_writer.run() - - r = scheduler.peek_ready_tasks('load-git') - - # other reads after writes are still empty since it's not supported - assert not r - - -def test_run_ok(swh_updater_writer): - """Only git origin are supported for now - - """ - scheduler = swh_updater_writer.scheduler_backend - updater = swh_updater_writer.scheduler_updater_backend - - ready_events = [ - SWHEvent( - make_simple_event(event_type, 'origin-%s' % i, 'git')) - for i, event_type in enumerate(LISTENED_EVENTS) - ] - - expected_length = len(ready_events) - - updater.cache_put(ready_events) - - data = list(updater.cache_read()) - assert len(data) == expected_length - - r = scheduler.peek_ready_tasks('load-git') - - # first read on an empty scheduling db results with nothing in it - assert not r - - # Read from cache to scheduler db - swh_updater_writer.run() - - # now, we should have scheduling task ready - r = scheduler.peek_ready_tasks('load-git') - - assert len(r) == expected_length - - # Check the task has been scheduled - for t in r: - assert t['type'] == 'load-git' - assert t['priority'] == 'normal' - assert t['policy'] == 'oneshot' - assert t['status'] == 'next_run_not_scheduled' - - # writer has nothing to do now - swh_updater_writer.run() - - # so no more data in cache - data = list(updater.cache_read()) - - assert not data - - # provided, no runner is ran, still the same amount of scheduling tasks - r = scheduler.peek_ready_tasks('load-git') - - assert len(r) == expected_length diff --git a/swh/scheduler/updater/__init__.py b/swh/scheduler/updater/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/swh/scheduler/updater/backend.py b/swh/scheduler/updater/backend.py deleted file mode 100644 index 919cb0b..0000000 --- a/swh/scheduler/updater/backend.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - - -from arrow import utcnow -import psycopg2.pool -import psycopg2.extras - -from swh.core.db import BaseDb -from swh.core.db.common import db_transaction, db_transaction_generator -from swh.scheduler.backend import format_query - - -class SchedulerUpdaterBackend: - CONFIG_BASE_FILENAME = 'backend/scheduler-updater' -# 'cache_read_limit': ('int', 1000), - - def __init__(self, db, cache_read_limit=1000, - min_pool_conns=1, max_pool_conns=10): - """ - Args: - db_conn: either a libpq connection string, or a psycopg2 connection - - """ - if isinstance(db, psycopg2.extensions.connection): - self._pool = None - self._db = BaseDb(db) - else: - self._pool = psycopg2.pool.ThreadedConnectionPool( - min_pool_conns, max_pool_conns, db, - cursor_factory=psycopg2.extras.RealDictCursor, - ) - self._db = None - self.limit = cache_read_limit - - def get_db(self): - if self._db: - return self._db - return BaseDb.from_pool(self._pool) - - def put_db(self, db): - if db is not self._db: - db.put_conn() - - cache_put_keys = ['url', 'cnt', 'last_seen', 'origin_type'] - - @db_transaction() - def cache_put(self, events, timestamp=None, db=None, cur=None): - """Write new events in the backend. - - """ - cur.execute('select swh_mktemp_cache()') - db.copy_to(prepare_events(events, timestamp), - 'tmp_cache', self.cache_put_keys, cur=cur) - cur.execute('select swh_cache_put()') - - cache_read_keys = ['id', 'url', 'origin_type', 'cnt', 'first_seen', - 'last_seen'] - - @db_transaction_generator() - def cache_read(self, timestamp=None, limit=None, db=None, cur=None): - """Read events from the cache prior to timestamp. - - Note that limit=None does not mean 'no limit' but use the default - limit (see cache_read_limit constructor argument). - - """ - if not timestamp: - timestamp = utcnow() - - if not limit: - limit = self.limit - - q = format_query('select {keys} from swh_cache_read(%s, %s)', - self.cache_read_keys) - cur.execute(q, (timestamp, limit)) - yield from cur.fetchall() - - @db_transaction() - def cache_remove(self, entries, db=None, cur=None): - """Clean events from the cache - - """ - q = 'delete from cache where url in (%s)' % ( - ', '.join(("'%s'" % e for e in entries)), ) - cur.execute(q) - - -def prepare_events(events, timestamp=None): - if timestamp is None: - timestamp = utcnow() - outevents = [] - urls = [] - for e in events: - event = e.get() - url = event['url'].strip() - if event['last_seen'] is None: - event['last_seen'] = timestamp - event['url'] = url - - if url in urls: - idx = urls.index(url) - urls.append(urls.pop(idx)) - prev_event = outevents.pop(idx) - event['cnt'] += prev_event['cnt'] - event['last_seen'] = max( - event['last_seen'], prev_event['last_seen']) - else: - urls.append(url) - outevents.append(event) - return outevents diff --git a/swh/scheduler/updater/consumer.py b/swh/scheduler/updater/consumer.py deleted file mode 100644 index a41404b..0000000 --- a/swh/scheduler/updater/consumer.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import logging - -from abc import ABCMeta, abstractmethod - - -class UpdaterConsumer(metaclass=ABCMeta): - """Event consumer - - """ - def __init__(self, backend, batch_cache_write=1000): - super().__init__() - self._reset_cache() - self.backend = backend - self.batch = int(batch_cache_write) - logging.basicConfig(level=logging.DEBUG) - self.log = logging.getLogger('%s.%s' % ( - self.__class__.__module__, self.__class__.__name__)) - - def _reset_cache(self): - """Reset internal cache. - - """ - self.count = 0 - self.seen_events = set() - self.events = [] - - def is_interesting(self, event): - """Determine if an event is interesting or not. - - Args: - event (SWHEvent): SWH event - - """ - return event.is_interesting() - - @abstractmethod - def convert_event(self, event): - """Parse an event into an SWHEvent. - - """ - pass - - def process_event(self, event): - """Process converted and interesting event. - - Args: - event (SWHEvent): Event to process if deemed interesting - - """ - try: - if event.url in self.seen_events: - event.cnt += 1 - else: - self.events.append(event) - self.seen_events.add(event.url) - self.count += 1 - finally: - if self.count >= self.batch: - if self.events: - self.backend.cache_put(self.events) - self._reset_cache() - - def _flush(self): - """Flush remaining internal cache if any. - - """ - if self.events: - self.backend.cache_put(self.events) - self._reset_cache() - - @abstractmethod - def has_events(self): - """Determine if there remains events to consume. - - Returns - boolean value, true for remaining events, False otherwise - - """ - pass - - @abstractmethod - def consume_events(self): - """The main entry point to consume events. - - This should either yield or return message for consumption. - - """ - pass - - @abstractmethod - def open_connection(self): - """Open a connection to the remote system we are supposed to consume - from. - - """ - pass - - @abstractmethod - def close_connection(self): - """Close opened connection to the remote system. - - """ - pass - - def run(self): - """The main entry point to consume events. - - """ - try: - self.open_connection() - while self.has_events(): - for _event in self.consume_events(): - event = self.convert_event(_event) - if not event: - self.log.warning( - 'Incomplete event dropped %s' % _event) - continue - if not self.is_interesting(event): - continue - if self.debug: - self.log.debug('Event: %s' % event) - try: - self.process_event(event) - except Exception: - self.log.exception( - 'Problem when processing event %s' % _event) - continue - except Exception as e: - self.log.error('Error raised during consumption: %s' % e) - raise e - finally: - self.close_connection() - self._flush() diff --git a/swh/scheduler/updater/events.py b/swh/scheduler/updater/events.py deleted file mode 100644 index d70efbe..0000000 --- a/swh/scheduler/updater/events.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - - -LISTENED_EVENTS = [ - 'delete', - 'public', - 'push' -] - - -class SWHEvent: - """SWH's interesting event (resulting in an origin update) - - """ - def __init__(self, evt, cnt=1): - self.event = evt - self.type = evt['type'].lower() - self.url = evt['url'] - self.last_seen = evt.get('last_seen') - self.cnt = cnt - self.origin_type = evt.get('origin_type') - - def is_interesting(self): - return self.type in LISTENED_EVENTS - - def get(self): - return { - 'type': self.type, - 'url': self.url, - 'last_seen': self.last_seen, - 'cnt': self.cnt, - 'origin_type': self.origin_type - } - - def __str__(self): - return str(self.get()) diff --git a/swh/scheduler/updater/ghtorrent/__init__.py b/swh/scheduler/updater/ghtorrent/__init__.py deleted file mode 100644 index 8ece752..0000000 --- a/swh/scheduler/updater/ghtorrent/__init__.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import json - -from kombu import Connection, Exchange, Queue -from kombu.common import collect_replies - -from swh.core.config import merge_configs - -from swh.scheduler.updater.events import SWHEvent -from swh.scheduler.updater.consumer import UpdaterConsumer -from swh.scheduler.updater.backend import SchedulerUpdaterBackend - - -events = { - # ghtorrent events related to github events (interesting) - 'evt': [ - 'commitcomment', 'create', 'delete', 'deployment', - 'deploymentstatus', 'download', 'follow', 'fork', 'forkapply', - 'gist', 'gollum', 'issuecomment', 'issues', 'member', - 'membership', 'pagebuild', 'public', 'pullrequest', - 'pullrequestreviewcomment', 'push', 'release', 'repository', - 'status', 'teamadd', 'watch' - ], - # ghtorrent events related to mongodb insert (not interesting) - 'ent': [ - 'commit_comments', 'commits', 'followers', 'forks', - 'geo_cache', 'issue_comments', 'issue_events', 'issues', - 'org_members', 'pull_request_comments', 'pull_requests', - 'repo_collaborators', 'repo_labels', 'repos', 'users', 'watchers' - ] -} - -INTERESTING_EVENT_KEYS = ['type', 'repo', 'created_at'] - -DEFAULT_CONFIG = { - 'ghtorrent': { - 'batch_cache_write': 1000, - 'rabbitmq': { - 'prefetch_read': 100, - 'conn': { - 'url': 'amqp://guest:guest@localhost:5672', - 'exchange_name': 'ght-streams', - 'routing_key': 'something', - 'queue_name': 'fake-events', - }, - }, - }, - 'scheduler_updater': { - 'cls': 'local', - 'args': { - 'db': 'dbname=softwareheritage-scheduler-updater-dev', - 'cache_read_limit': 1000, - }, - }, -} - - -class GHTorrentConsumer(UpdaterConsumer): - """GHTorrent events consumer - - """ - connection_class = Connection - - def __init__(self, **config): - self.config = merge_configs(DEFAULT_CONFIG, config) - - ght_config = self.config['ghtorrent'] - rmq_config = ght_config['rabbitmq'] - self.prefetch_read = int(rmq_config.get('prefetch_read', 100)) - - exchange = Exchange( - rmq_config['conn']['exchange_name'], - 'topic', durable=True) - routing_key = rmq_config['conn']['routing_key'] - self.queue = Queue(rmq_config['conn']['queue_name'], - exchange=exchange, - routing_key=routing_key, - auto_delete=True) - - if self.config['scheduler_updater']['cls'] != 'local': - raise ValueError( - 'The scheduler_updater can only be a cls=local for now') - backend = SchedulerUpdaterBackend( - **self.config['scheduler_updater']['args']) - - super().__init__(backend, ght_config.get('batch_cache_write', 1000)) - - def has_events(self): - """Always has events - - """ - return True - - def convert_event(self, event): - """Given ghtorrent event, convert it to a SWHEvent instance. - - """ - if isinstance(event, str): - event = json.loads(event) - for k in INTERESTING_EVENT_KEYS: - if k not in event: - if hasattr(self, 'log'): - self.log.warning( - 'Event should have the \'%s\' entry defined' % k) - return None - - _type = event['type'].lower().rstrip('Event') - _repo_name = 'https://github.com/%s' % event['repo']['name'] - - return SWHEvent({ - 'type': _type, - 'url': _repo_name, - 'last_seen': event['created_at'], - 'origin_type': 'git', - }) - - def open_connection(self): - """Open rabbitmq connection - - """ - self.conn = self.connection_class( - self.config['ghtorrent']['rabbitmq']['conn']['url']) - self.conn.connect() - self.channel = self.conn.channel() - - def close_connection(self): - """Close rabbitmq connection - - """ - self.channel.close() - self.conn.release() - - def consume_events(self): - """Consume and yield queue messages - - """ - yield from collect_replies( - self.conn, self.channel, self.queue, - no_ack=False, limit=self.prefetch_read) diff --git a/swh/scheduler/updater/ghtorrent/cli.py b/swh/scheduler/updater/ghtorrent/cli.py deleted file mode 100644 index f94290b..0000000 --- a/swh/scheduler/updater/ghtorrent/cli.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import click - - -@click.command() -@click.option('--verbose/--no-verbose', '-v', default=False, - help='Verbose mode') -@click.pass_context -def main(ctx, verbose): - """Consume events from ghtorrent and write them to cache. - - """ - click.echo("Deprecated! Use 'swh-scheduler updater' instead.", - err=True) - ctx.exit(1) - - -if __name__ == '__main__': - main() diff --git a/swh/scheduler/updater/writer.py b/swh/scheduler/updater/writer.py deleted file mode 100644 index cec13b5..0000000 --- a/swh/scheduler/updater/writer.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (C) 2018 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import click -import logging -import time - -from arrow import utcnow - -from swh.core import utils -from swh.scheduler import get_scheduler -from swh.scheduler.utils import create_oneshot_task_dict -from swh.scheduler.updater.backend import SchedulerUpdaterBackend - - -class UpdaterWriter: - """Updater writer in charge of updating the scheduler db with latest - prioritized oneshot tasks - - In effect, this: - - reads the events from scheduler updater's db - - converts those events into priority oneshot tasks - - dumps them into the scheduler db - - """ - - def __init__(self, **config): - self.config = config - if self.config['scheduler_updater']['cls'] != 'local': - raise ValueError( - 'The scheduler_updater can only be a cls=local for now') - self.scheduler_updater_backend = SchedulerUpdaterBackend( - **self.config['scheduler_updater']['args']) - self.scheduler_backend = get_scheduler(**self.config['scheduler']) - self.pause = self.config.get('updater_writer', {}).get('pause', 10) - self.log = logging.getLogger( - 'swh.scheduler.updater.writer.UpdaterWriter') - - def convert_to_oneshot_task(self, event): - """Given an event, convert it into oneshot task with priority - - Args: - event (dict): The event to convert to task - - """ - if event['origin_type'] == 'git': - return create_oneshot_task_dict( - 'load-git', - event['url'], - priority='normal') - self.log.warning('Type %s is not supported for now, only git' % ( - event['origin_type'], )) - return None - - def write_event_to_scheduler(self, events): - """Write events to the scheduler and yield ids when done""" - # convert events to oneshot tasks - oneshot_tasks = filter(lambda e: e is not None, - map(self.convert_to_oneshot_task, events)) - # write event to scheduler - self.scheduler_backend.create_tasks(list(oneshot_tasks)) - for e in events: - yield e['url'] - - def run(self): - """First retrieve events from cache (including origin_type, cnt), - then convert them into oneshot tasks with priority, then - write them to the scheduler db, at last remove them from - cache. - - """ - while True: - timestamp = utcnow() - events = list(self.scheduler_updater_backend.cache_read(timestamp)) - if not events: - break - for urls in utils.grouper(self.write_event_to_scheduler(events), - n=100): - self.scheduler_updater_backend.cache_remove(urls) - time.sleep(self.pause) - - -@click.command() -@click.option('--verbose/--no-verbose', '-v', default=False, - help='Verbose mode') -@click.pass_context -def main(ctx, verbose): - click.echo("Deprecated! Use 'swh-scheduler updater' instead.", - err=True) - ctx.exit(1) - - -if __name__ == '__main__': - main() diff --git a/version.txt b/version.txt index 9dc4992..f4832a1 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.64-0-gc973ec0 \ No newline at end of file +v0.0.65-0-gee162fe \ No newline at end of file