diff --git a/PKG-INFO b/PKG-INFO index 5e45c0e..381adf0 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,37 +1,37 @@ Metadata-Version: 2.1 Name: swh.scheduler -Version: 0.22.0 +Version: 0.23.0 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-scheduler Project-URL: Documentation, https://docs.softwareheritage.org/devel/swh-scheduler/ Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Requires-Python: >=3.7 Description-Content-Type: text/markdown Provides-Extra: testing Provides-Extra: journal Provides-Extra: simulator License-File: LICENSE License-File: LICENSE.Celery License-File: AUTHORS swh-scheduler ============= Job scheduler for the Software Heritage project. Task manager for asynchronous/delayed tasks, used for both recurrent (e.g., listing a forge, loading new stuff from a Git repository) and one-off activities (e.g., loading a specific version of a source package). diff --git a/requirements.txt b/requirements.txt index 1b8e3a4..64c6305 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,17 +1,16 @@ # Add here external Python modules dependencies, one per line. Module names # should match https://pypi.python.org/pypi names. For the full spec or # dependency lines, see https://pip.readthedocs.org/en/1.1/requirements.html attrs attrs-strict celery >= 4.3, != 5.0.3 click < 8.0 -elasticsearch > 5.4 flask humanize pika >= 1.1.0 psycopg2 pyyaml requests setuptools typing-extensions diff --git a/sql/updates/32.sql b/sql/updates/32.sql new file mode 100644 index 0000000..d04821a --- /dev/null +++ b/sql/updates/32.sql @@ -0,0 +1,57 @@ +-- SWH DB schema upgrade +-- from_version: 31 +-- to_version: 32 +-- description: Use a temporary table to update scheduler_metrics + +insert into dbversion (version, release, description) + values (32, now(), 'Work In Progress'); + +create or replace function update_metrics(lister_id uuid default NULL, ts timestamptz default now()) + returns setof scheduler_metrics + language plpgsql +as $$ + begin + -- If we do the following select as a subquery in the insert statement below, + -- PostgreSQL prevents the use of parallel queries. So we do the select into a + -- temporary table, which doesn't suffer this limitation. + + create temporary table tmp_update_metrics + on commit drop + as select + lo.lister_id, lo.visit_type, coalesce(ts, now()) as last_update, + count(*) as origins_known, + count(*) filter (where enabled) as origins_enabled, + count(*) filter (where + enabled and last_snapshot is NULL + ) as origins_never_visited, + count(*) filter (where + enabled and lo.last_update > last_successful + ) as origins_with_pending_changes + from listed_origins lo + left join origin_visit_stats ovs using (url, visit_type) + where + -- update only for the requested lister + update_metrics.lister_id = lo.lister_id + -- or for all listers if the function argument is null + or update_metrics.lister_id is null + group by (lo.lister_id, lo.visit_type); + + return query + insert into scheduler_metrics ( + lister_id, visit_type, last_update, + origins_known, origins_enabled, + origins_never_visited, origins_with_pending_changes + ) + select * from tmp_update_metrics + on conflict on constraint scheduler_metrics_pkey do update + set + last_update = EXCLUDED.last_update, + origins_known = EXCLUDED.origins_known, + origins_enabled = EXCLUDED.origins_enabled, + origins_never_visited = EXCLUDED.origins_never_visited, + origins_with_pending_changes = EXCLUDED.origins_with_pending_changes + returning *; + end; +$$; + +comment on function update_metrics(uuid, timestamptz) is 'Update metrics for the given lister_id'; diff --git a/swh.scheduler.egg-info/PKG-INFO b/swh.scheduler.egg-info/PKG-INFO index 5e45c0e..381adf0 100644 --- a/swh.scheduler.egg-info/PKG-INFO +++ b/swh.scheduler.egg-info/PKG-INFO @@ -1,37 +1,37 @@ Metadata-Version: 2.1 Name: swh.scheduler -Version: 0.22.0 +Version: 0.23.0 Summary: Software Heritage Scheduler Home-page: https://forge.softwareheritage.org/diffusion/DSCH/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-scheduler Project-URL: Documentation, https://docs.softwareheritage.org/devel/swh-scheduler/ Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Requires-Python: >=3.7 Description-Content-Type: text/markdown Provides-Extra: testing Provides-Extra: journal Provides-Extra: simulator License-File: LICENSE License-File: LICENSE.Celery License-File: AUTHORS swh-scheduler ============= Job scheduler for the Software Heritage project. Task manager for asynchronous/delayed tasks, used for both recurrent (e.g., listing a forge, loading new stuff from a Git repository) and one-off activities (e.g., loading a specific version of a source package). diff --git a/swh.scheduler.egg-info/SOURCES.txt b/swh.scheduler.egg-info/SOURCES.txt index 3e2ae69..959bfa7 100644 --- a/swh.scheduler.egg-info/SOURCES.txt +++ b/swh.scheduler.egg-info/SOURCES.txt @@ -1,138 +1,132 @@ .gitignore .pre-commit-config.yaml AUTHORS CODE_OF_CONDUCT.md CONTRIBUTORS LICENSE LICENSE.Celery MANIFEST.in Makefile README.md conftest.py mypy.ini pyproject.toml pytest.ini requirements-journal.txt requirements-simulator.txt requirements-swh.txt requirements-test.txt requirements.txt setup.cfg setup.py tox.ini data/README.md data/elastic-template.json data/update-index-settings.json docs/.gitignore docs/Makefile docs/cli.rst docs/conf.py docs/index.rst docs/simulator.rst docs/_static/.placeholder docs/_templates/.placeholder sql/.gitignore sql/Makefile sql/updates/02.sql sql/updates/03.sql sql/updates/04.sql sql/updates/05.sql sql/updates/06.sql sql/updates/07.sql sql/updates/08.sql sql/updates/09.sql sql/updates/10.sql sql/updates/11.sql sql/updates/12.sql sql/updates/13.sql sql/updates/14.sql sql/updates/15.sql sql/updates/16.sql sql/updates/17.sql sql/updates/18.sql sql/updates/19.sql sql/updates/20.sql sql/updates/23.sql sql/updates/24.sql sql/updates/25.sql sql/updates/26.sql sql/updates/27.sql sql/updates/28.sql sql/updates/29.sql sql/updates/30-bis.sql sql/updates/30.sql sql/updates/31.sql +sql/updates/32.sql swh/__init__.py swh.scheduler.egg-info/PKG-INFO swh.scheduler.egg-info/SOURCES.txt swh.scheduler.egg-info/dependency_links.txt swh.scheduler.egg-info/entry_points.txt swh.scheduler.egg-info/requires.txt swh.scheduler.egg-info/top_level.txt swh/scheduler/__init__.py swh/scheduler/backend.py -swh/scheduler/backend_es.py swh/scheduler/cli_utils.py -swh/scheduler/elasticsearch_memory.py swh/scheduler/exc.py swh/scheduler/interface.py swh/scheduler/journal_client.py swh/scheduler/model.py swh/scheduler/py.typed swh/scheduler/pytest_plugin.py swh/scheduler/task.py swh/scheduler/utils.py swh/scheduler/api/__init__.py swh/scheduler/api/client.py swh/scheduler/api/serializers.py swh/scheduler/api/server.py swh/scheduler/celery_backend/__init__.py swh/scheduler/celery_backend/config.py swh/scheduler/celery_backend/pika_listener.py swh/scheduler/celery_backend/recurrent_visits.py swh/scheduler/celery_backend/runner.py swh/scheduler/cli/__init__.py swh/scheduler/cli/admin.py swh/scheduler/cli/celery_monitor.py swh/scheduler/cli/journal.py swh/scheduler/cli/origin.py swh/scheduler/cli/simulator.py swh/scheduler/cli/task.py swh/scheduler/cli/task_type.py swh/scheduler/cli/utils.py swh/scheduler/simulator/__init__.py swh/scheduler/simulator/common.py swh/scheduler/simulator/origin_scheduler.py swh/scheduler/simulator/origins.py swh/scheduler/simulator/task_scheduler.py swh/scheduler/sql/10-superuser-init.sql swh/scheduler/sql/30-schema.sql swh/scheduler/sql/40-func.sql swh/scheduler/sql/50-data.sql swh/scheduler/sql/60-indexes.sql swh/scheduler/tests/__init__.py swh/scheduler/tests/common.py swh/scheduler/tests/conftest.py swh/scheduler/tests/tasks.py swh/scheduler/tests/test_api_client.py swh/scheduler/tests/test_celery_tasks.py swh/scheduler/tests/test_cli.py swh/scheduler/tests/test_cli_celery_monitor.py swh/scheduler/tests/test_cli_journal.py swh/scheduler/tests/test_cli_origin.py swh/scheduler/tests/test_cli_task_type.py swh/scheduler/tests/test_common.py swh/scheduler/tests/test_config.py swh/scheduler/tests/test_init.py swh/scheduler/tests/test_journal_client.py swh/scheduler/tests/test_model.py swh/scheduler/tests/test_recurrent_visits.py swh/scheduler/tests/test_scheduler.py swh/scheduler/tests/test_server.py swh/scheduler/tests/test_simulator.py -swh/scheduler/tests/test_utils.py -swh/scheduler/tests/es/__init__.py -swh/scheduler/tests/es/conftest.py -swh/scheduler/tests/es/test_backend_es.py -swh/scheduler/tests/es/test_cli_task.py -swh/scheduler/tests/es/test_elasticsearch_memory.py \ No newline at end of file +swh/scheduler/tests/test_utils.py \ No newline at end of file diff --git a/swh.scheduler.egg-info/requires.txt b/swh.scheduler.egg-info/requires.txt index 0fb232c..03cdbc7 100644 --- a/swh.scheduler.egg-info/requires.txt +++ b/swh.scheduler.egg-info/requires.txt @@ -1,38 +1,37 @@ attrs attrs-strict celery!=5.0.3,>=4.3 click<8.0 -elasticsearch>5.4 flask humanize pika>=1.1.0 psycopg2 pyyaml requests setuptools typing-extensions swh.core[db,http]>=0.14.0 swh.storage>=0.11.1 [journal] swh.journal [simulator] plotille simpy<4,>=3 [testing] pytest pytest-mock celery>=4.3 hypothesis>=3.11.0 swh.lister swh.storage[testing] types-click types-flask types-pyyaml types-requests types-Deprecated swh.journal plotille simpy<4,>=3 diff --git a/swh/__init__.py b/swh/__init__.py index 8d9f151..b36383a 100644 --- a/swh/__init__.py +++ b/swh/__init__.py @@ -1,4 +1,3 @@ from pkgutil import extend_path -from typing import List -__path__: List[str] = extend_path(__path__, __name__) +__path__ = extend_path(__path__, __name__) diff --git a/swh/scheduler/backend_es.py b/swh/scheduler/backend_es.py deleted file mode 100644 index e1b949c..0000000 --- a/swh/scheduler/backend_es.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright (C) 2018-2020 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -"""Elastic Search backend - -""" - -from copy import deepcopy -import datetime # noqa -import logging -from typing import Any, Dict - -from elasticsearch import helpers - -from swh.core import utils - -logger = logging.getLogger(__name__) - - -DEFAULT_CONFIG = { - "elasticsearch": { - "cls": "local", - "args": { - "index_name_prefix": "swh-tasks", - "storage_nodes": ["localhost:9200"], - "client_options": { - "sniff_on_start": False, - "sniff_on_connection_fail": True, - "http_compress": False, - "sniffer_timeout": 60, - }, - }, - } -} - - -def get_elasticsearch(cls: str, args: Dict[str, Any] = {}): - """Instantiate an elastic search instance - - """ - if cls == "local": - from elasticsearch import Elasticsearch - elif cls == "memory": - from .elasticsearch_memory import ( # type: ignore # noqa - MemoryElasticsearch as Elasticsearch, - ) - else: - raise ValueError("Unknown elasticsearch class `%s`" % cls) - - return Elasticsearch(**args) - - -class ElasticSearchBackend: - """ElasticSearch backend to index tasks - - This uses an elasticsearch client to actually discuss with the - elasticsearch instance. - - """ - - def __init__(self, **config): - self.config = deepcopy(DEFAULT_CONFIG) - self.config.update(config) - es_conf = self.config["elasticsearch"] - args = deepcopy(es_conf["args"]) - self.index_name_prefix = args.pop("index_name_prefix") - self.storage = get_elasticsearch( - cls=es_conf["cls"], - args={ - "hosts": args.get("storage_nodes", []), - **args.get("client_options", {}), - }, - ) - # document's index type (cf. /data/elastic-template.json) - self.doc_type = "task" - - def initialize(self): - self.storage.indices.put_mapping( - index=f"{self.index_name_prefix}-*", - doc_type=self.doc_type, - # to allow type definition below - include_type_name=True, - # to allow install mapping even if no index yet - allow_no_indices=True, - body={ - "properties": { - "task_id": {"type": "double"}, - "task_policy": {"type": "text"}, - "task_status": {"type": "text"}, - "task_run_id": {"type": "double"}, - "arguments": { - "type": "object", - "properties": { - "args": {"type": "nested", "dynamic": False}, - "kwargs": {"type": "text"}, - }, - }, - "type": {"type": "text"}, - "backend_id": {"type": "text"}, - "metadata": {"type": "object", "enabled": False}, - "scheduled": { - "type": "date", - "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis", # noqa - }, - "started": { - "type": "date", - "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis", # noqa - }, - "ended": { - "type": "date", - "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis", # noqa - }, - } - }, - ) - - self.storage.indices.put_settings( - index=f"{self.index_name_prefix}-*", - allow_no_indices=True, - body={ - "index": { - "codec": "best_compression", - "refresh_interval": "1s", - "number_of_shards": 1, - } - }, - ) - - def create(self, index_name) -> None: - """Create and initialize index_name with mapping for all indices - matching `swh-tasks-` pattern - - """ - assert index_name.startswith(self.index_name_prefix) - self.storage.indices.create(index_name) - - def compute_index_name(self, year, month): - """Given a year, month, compute the index's name. - - """ - return "%s-%s-%s" % (self.index_name_prefix, year, "%02d" % month) - - def mget(self, index_name, doc_ids, chunk_size=500, source=True): - """Retrieve document's full content according to their ids as per - source's setup. - - The `source` allows to retrieve only what's interesting, e.g: - - source=True ; gives back the original indexed data - - source=False ; returns without the original _source field - - source=['task_id'] ; returns only task_id in the _source field - - Args: - index_name (str): Name of the concerned index. - doc_ids (generator): Generator of ids to retrieve - chunk_size (int): Number of documents chunk to send for retrieval - source (bool/[str]): Source of information to return - - Yields: - document indexed as per source's setup - - """ - if isinstance(source, list): - source = {"_source": ",".join(source)} - else: - source = {"_source": str(source).lower()} - - for ids in utils.grouper(doc_ids, n=1000): - res = self.storage.mget( - body={"ids": list(ids)}, - index=index_name, - doc_type=self.doc_type, - params=source, - ) - if not res: - logger.error("Error during retrieval of data, skipping!") - continue - - for doc in res["docs"]: - found = doc.get("found") - if not found: - msg = "Doc id %s not found, not indexed yet" % doc["_id"] - logger.warning(msg) - continue - yield doc["_source"] - - def _streaming_bulk(self, index_name, doc_stream, chunk_size=500): - """Bulk index data and returns the successful indexed data's - identifier. - - Args: - index_name (str): Name of the concerned index. - doc_stream (generator): Generator of documents to index - chunk_size (int): Number of documents chunk to send for indexation - - Yields: - document id indexed - - """ - actions = ( - { - "_index": index_name, - "_op_type": "index", - "_type": self.doc_type, - "_source": data, - } - for data in doc_stream - ) - for ok, result in helpers.streaming_bulk( - client=self.storage, - actions=actions, - chunk_size=chunk_size, - raise_on_error=False, - raise_on_exception=False, - ): - if not ok: - logger.error("Error during %s indexation. Skipping.", result) - continue - yield result["index"]["_id"] - - def is_index_opened(self, index_name: str) -> bool: - """Determine if an index is opened or not - - """ - try: - self.storage.indices.stats(index_name) - return True - except Exception: - # fails when indice is closed (no other api call found) - return False - - def streaming_bulk(self, index_name, doc_stream, chunk_size=500, source=True): - """Bulk index data and returns the successful indexed data as per - source's setup. - - the `source` permits to retrieve only what's of interest to - us, e.g: - - - source=True ; gives back the original indexed data - - source=False ; returns without the original _source field - - source=['task_id'] ; returns only task_id in the _source field - - Note that: - - if the index is closed, it will be opened - - if the index does not exist, it will be created and opened - - This keeps the index opened for performance reasons. - - Args: - index_name (str): Name of the concerned index. - doc_stream (generator): Document generator to index - chunk_size (int): Number of documents chunk to send - source (bool, [str]): the information to return - - """ - # index must exist - if not self.storage.indices.exists(index_name): - self.create(index_name) - # index must be opened - if not self.is_index_opened(index_name): - self.storage.indices.open(index_name) - - indexed_ids = self._streaming_bulk( - index_name, doc_stream, chunk_size=chunk_size - ) - yield from self.mget( - index_name, indexed_ids, chunk_size=chunk_size, source=source - ) diff --git a/swh/scheduler/celery_backend/recurrent_visits.py b/swh/scheduler/celery_backend/recurrent_visits.py index 8f43d8e..2a9ebe6 100644 --- a/swh/scheduler/celery_backend/recurrent_visits.py +++ b/swh/scheduler/celery_backend/recurrent_visits.py @@ -1,311 +1,323 @@ # Copyright (C) 2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """This schedules the recurrent visits, for listed origins, in Celery. For "oneshot" (save code now, lister) tasks, check the :mod:`swh.scheduler.celery_backend.runner` and :mod:`swh.scheduler.celery_backend.pika_listener` modules. """ from __future__ import annotations from itertools import chain import logging from queue import Empty, Queue import random from threading import Thread import time from typing import TYPE_CHECKING, Any, Dict, List, Tuple from kombu.utils.uuid import uuid from swh.scheduler.celery_backend.config import get_available_slots if TYPE_CHECKING: from ..interface import SchedulerInterface from ..model import ListedOrigin logger = logging.getLogger(__name__) _VCS_POLICY_WEIGHTS: Dict[str, float] = { "already_visited_order_by_lag": 49, "never_visited_oldest_update_first": 49, "origins_without_last_update": 2, } POLICY_WEIGHTS: Dict[str, Dict[str, float]] = { "default": { "already_visited_order_by_lag": 50, "never_visited_oldest_update_first": 50, }, "git": _VCS_POLICY_WEIGHTS, "hg": _VCS_POLICY_WEIGHTS, "svn": _VCS_POLICY_WEIGHTS, "cvs": _VCS_POLICY_WEIGHTS, "bzr": _VCS_POLICY_WEIGHTS, } + +POLICY_ADDITIONAL_PARAMETERS: Dict[str, Dict[str, Any]] = { + "git": { + "already_visited_order_by_lag": {"tablesample": 0.1}, + "never_visited_oldest_update_first": {"tablesample": 0.1}, + "origins_without_last_update": {"tablesample": 0.1}, + } +} + """Scheduling policies to use to retrieve visits for the given visit types, with their relative weights""" MIN_SLOTS_RATIO = 0.05 """Quantity of slots that need to be available (with respect to max_queue_length) for :py:func:`~swh.scheduler.interface.SchedulerInterface.grab_next_visits` to trigger""" QUEUE_FULL_BACKOFF = 60 """Backoff time (in seconds) if there's fewer than :py:data:`MIN_SLOTS_RATIO` slots available in the queue.""" NO_ORIGINS_SCHEDULED_BACKOFF = 20 * 60 """Backoff time (in seconds) if no origins have been scheduled in the current iteration""" BACKOFF_SPLAY = 5.0 """Amplitude of the fuzziness between backoffs""" TERMINATE = object() """Termination request received from command queue (singleton used for identity comparison)""" def grab_next_visits_policy_weights( scheduler: SchedulerInterface, visit_type: str, num_visits: int ) -> List[ListedOrigin]: """Get the next ``num_visits`` for the given ``visit_type`` using the corresponding set of scheduling policies. The :py:data:`POLICY_WEIGHTS` dict sets, for each visit type, the scheduling policies used to pull the next tasks, and what proportion of the available num_visits they take. This function emits a warning if the ratio of retrieved origins is off of the requested ratio by more than 5%. Returns: at most ``num_visits`` :py:class:`~swh.scheduler.model.ListedOrigin` objects """ policy_weights = POLICY_WEIGHTS.get(visit_type, POLICY_WEIGHTS["default"]) total_weight = sum(policy_weights.values()) if not total_weight: raise ValueError(f"No policy weights set for visit type {visit_type}") policy_ratio = { policy: weight / total_weight for policy, weight in policy_weights.items() } fetched_origins: Dict[str, List[ListedOrigin]] = {} for policy, ratio in policy_ratio.items(): num_tasks_to_send = int(num_visits * ratio) fetched_origins[policy] = scheduler.grab_next_visits( - visit_type, num_tasks_to_send, policy=policy + visit_type, + num_tasks_to_send, + policy=policy, + **POLICY_ADDITIONAL_PARAMETERS.get(visit_type, {}).get(policy, {}), ) all_origins: List[ListedOrigin] = list( chain.from_iterable(fetched_origins.values()) ) if not all_origins: return [] # Check whether the ratios of origins fetched are skewed with respect to the # ones we requested fetched_origin_ratios = { policy: len(origins) / len(all_origins) for policy, origins in fetched_origins.items() } for policy, expected_ratio in policy_ratio.items(): # 5% of skew with respect to request if abs(fetched_origin_ratios[policy] - expected_ratio) / expected_ratio > 0.05: logger.info( "Skewed fetch for visit type %s with policy %s: fetched %s, " "requested %s", visit_type, policy, fetched_origin_ratios[policy], expected_ratio, ) return all_origins def splay(): """Return a random short interval by which to vary the backoffs for the visit scheduling threads""" return random.uniform(0, BACKOFF_SPLAY) def send_visits_for_visit_type( scheduler: SchedulerInterface, app, visit_type: str, task_type: Dict, ) -> float: """Schedule the next batch of visits for the given ``visit_type``. First, we determine the number of available slots by introspecting the RabbitMQ queue. If there's fewer than :py:data:`MIN_SLOTS_RATIO` slots available in the queue, we wait for :py:data:`QUEUE_FULL_BACKOFF` seconds. This avoids running the expensive :py:func:`~swh.scheduler.interface.SchedulerInterface.grab_next_visits` queries when there's not many jobs to queue. Once there's more than :py:data:`MIN_SLOTS_RATIO` slots available, we run :py:func:`grab_next_visits_policy_weights` to retrieve the next set of origin visits to schedule, and we send them to celery. If the last scheduling attempt didn't return any origins, we sleep for :py:data:`NO_ORIGINS_SCHEDULED_BACKOFF` seconds. This avoids running the expensive :py:func:`~swh.scheduler.interface.SchedulerInterface.grab_next_visits` queries too often if there's nothing left to schedule. Returns: the earliest :py:func:`time.monotonic` value at which to run the next iteration of the loop. """ queue_name = task_type["backend_name"] max_queue_length = task_type.get("max_queue_length") or 0 min_available_slots = max_queue_length * MIN_SLOTS_RATIO current_iteration_start = time.monotonic() # Check queue level available_slots = get_available_slots(app, queue_name, max_queue_length) logger.debug( "%s available slots for visit type %s in queue %s", available_slots, visit_type, queue_name, ) if available_slots < min_available_slots: return current_iteration_start + QUEUE_FULL_BACKOFF origins = grab_next_visits_policy_weights(scheduler, visit_type, available_slots) if not origins: logger.debug("No origins to visit for type %s", visit_type) return current_iteration_start + NO_ORIGINS_SCHEDULED_BACKOFF # Try to smooth the ingestion load, origins pulled by different # scheduling policies have different resource usage patterns random.shuffle(origins) for origin in origins: task_dict = origin.as_task_dict() app.send_task( queue_name, task_id=uuid(), args=task_dict["arguments"]["args"], kwargs=task_dict["arguments"]["kwargs"], queue=queue_name, ) logger.info( "%s: %s visits scheduled in queue %s", visit_type, len(origins), queue_name, ) # When everything worked, we can try to schedule origins again ASAP. return time.monotonic() def visit_scheduler_thread( config: Dict, visit_type: str, command_queue: Queue[object], exc_queue: Queue[Tuple[str, BaseException]], ): """Target function for the visit sending thread, which initializes local connections and handles exceptions by sending them back to the main thread.""" from swh.scheduler import get_scheduler from swh.scheduler.celery_backend.config import build_app try: # We need to reinitialize these connections because they're not generally # thread-safe app = build_app(config.get("celery")) scheduler = get_scheduler(**config["scheduler"]) task_type = scheduler.get_task_type(f"load-{visit_type}") if task_type is None: raise ValueError(f"Unknown task type: load-{visit_type}") next_iteration = time.monotonic() while True: # vary the next iteration time a little bit next_iteration = next_iteration + splay() while time.monotonic() < next_iteration: # Wait for next iteration to start. Listen for termination message. try: msg = command_queue.get(block=True, timeout=1) except Empty: continue if msg is TERMINATE: return else: logger.warn("Received unexpected message %s in command queue", msg) next_iteration = send_visits_for_visit_type( scheduler, app, visit_type, task_type ) except BaseException as e: exc_queue.put((visit_type, e)) VisitSchedulerThreads = Dict[str, Tuple[Thread, Queue]] """Dict storing the visit scheduler threads and their command queues""" def spawn_visit_scheduler_thread( threads: VisitSchedulerThreads, exc_queue: Queue[Tuple[str, BaseException]], config: Dict[str, Any], visit_type: str, ): """Spawn a new thread to schedule the visits of type ``visit_type``.""" command_queue: Queue[object] = Queue() thread = Thread( target=visit_scheduler_thread, kwargs={ "config": config, "visit_type": visit_type, "command_queue": command_queue, "exc_queue": exc_queue, }, ) threads[visit_type] = (thread, command_queue) thread.start() def terminate_visit_scheduler_threads(threads: VisitSchedulerThreads) -> List[str]: """Terminate all visit scheduler threads""" logger.info("Termination requested...") for _, command_queue in threads.values(): command_queue.put(TERMINATE) loops = 0 while threads and loops < 10: logger.info( "Terminating visit scheduling threads: %s", ", ".join(sorted(threads)) ) loops += 1 for visit_type, (thread, _) in list(threads.items()): thread.join(timeout=1) if not thread.is_alive(): logger.debug("Thread %s terminated", visit_type) del threads[visit_type] if threads: logger.warn( "Could not reap the following threads after 10 attempts: %s", ", ".join(sorted(threads)), ) return list(sorted(threads)) diff --git a/swh/scheduler/cli/task.py b/swh/scheduler/cli/task.py index c79d0fc..e003985 100644 --- a/swh/scheduler/cli/task.py +++ b/swh/scheduler/cli/task.py @@ -1,754 +1,590 @@ # Copyright (C) 2016-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from __future__ import annotations # WARNING: do not import unnecessary things here to keep cli startup time under # control import locale -import logging -from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional +from typing import TYPE_CHECKING, Iterator, List, Optional import click from . import cli if TYPE_CHECKING: import datetime # importing swh.storage.interface triggers the load of 300+ modules, so... from swh.model.model import Origin from swh.storage.interface import StorageInterface locale.setlocale(locale.LC_ALL, "") CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) DATETIME = click.DateTime() def format_dict(d): """Recursively format date objects in the dict passed as argument""" import datetime ret = {} for k, v in d.items(): if isinstance(v, (datetime.date, datetime.datetime)): v = v.isoformat() elif isinstance(v, dict): v = format_dict(v) ret[k] = v return ret def pretty_print_list(list, indent=0): """Pretty-print a list""" return "".join("%s%r\n" % (" " * indent, item) for item in list) def pretty_print_dict(dict, indent=0): """Pretty-print a list""" return "".join( "%s%s: %r\n" % (" " * indent, click.style(key, bold=True), value) for key, value in sorted(dict.items()) ) def pretty_print_run(run, indent=4): fmt = ( "{indent}{backend_id} [{status}]\n" "{indent} scheduled: {scheduled} [{started}:{ended}]" ) return fmt.format(indent=" " * indent, **format_dict(run)) def pretty_print_task(task, full=False): """Pretty-print a task If 'full' is True, also print the status and priority fields. >>> import datetime >>> task = { ... 'id': 1234, ... 'arguments': { ... 'args': ['foo', 'bar', True], ... 'kwargs': {'key': 'value', 'key2': 42}, ... }, ... 'current_interval': datetime.timedelta(hours=1), ... 'next_run': datetime.datetime(2019, 2, 21, 13, 52, 35, 407818), ... 'policy': 'oneshot', ... 'priority': None, ... 'status': 'next_run_not_scheduled', ... 'type': 'test_task', ... } >>> print(click.unstyle(pretty_print_task(task))) Task 1234 Next run: ... (2019-02-21T13:52:35.407818) Interval: 1:00:00 Type: test_task Policy: oneshot Args: 'foo' 'bar' True Keyword args: key: 'value' key2: 42 >>> print(click.unstyle(pretty_print_task(task, full=True))) Task 1234 Next run: ... (2019-02-21T13:52:35.407818) Interval: 1:00:00 Type: test_task Policy: oneshot Status: next_run_not_scheduled Priority:\x20 Args: 'foo' 'bar' True Keyword args: key: 'value' key2: 42 """ import humanize next_run = task["next_run"] lines = [ "%s %s\n" % (click.style("Task", bold=True), task["id"]), click.style(" Next run: ", bold=True), "%s (%s)" % (humanize.naturaldate(next_run), next_run.isoformat()), "\n", click.style(" Interval: ", bold=True), str(task["current_interval"]), "\n", click.style(" Type: ", bold=True), task["type"] or "", "\n", click.style(" Policy: ", bold=True), task["policy"] or "", "\n", ] if full: lines += [ click.style(" Status: ", bold=True), task["status"] or "", "\n", click.style(" Priority: ", bold=True), task["priority"] or "", "\n", ] lines += [ click.style(" Args:\n", bold=True), pretty_print_list(task["arguments"]["args"], indent=4), click.style(" Keyword args:\n", bold=True), pretty_print_dict(task["arguments"]["kwargs"], indent=4), ] return "".join(lines) @cli.group("task") @click.pass_context def task(ctx): """Manipulate tasks.""" pass @task.command("schedule") @click.option( "--columns", "-c", multiple=True, default=["type", "args", "kwargs", "next_run"], type=click.Choice(["type", "args", "kwargs", "policy", "next_run"]), help="columns present in the CSV file", ) @click.option("--delimiter", "-d", default=",") @click.argument("file", type=click.File(encoding="utf-8")) @click.pass_context def schedule_tasks(ctx, columns, delimiter, file): """Schedule tasks from a CSV input file. The following columns are expected, and can be set through the -c option: - type: the type of the task to be scheduled (mandatory) - args: the arguments passed to the task (JSON list, defaults to an empty list) - kwargs: the keyword arguments passed to the task (JSON object, defaults to an empty dict) - next_run: the date at which the task should run (datetime, defaults to now) The CSV can be read either from a named file, or from stdin (use - as filename). Use sample: cat scheduling-task.txt | \ python3 -m swh.scheduler.cli \ --database 'service=swh-scheduler-dev' \ task schedule \ --columns type --columns kwargs --columns policy \ --delimiter ';' - """ import csv import json from swh.scheduler.utils import utcnow tasks = [] now = utcnow() scheduler = ctx.obj["scheduler"] if not scheduler: raise ValueError("Scheduler class (local/remote) must be instantiated") reader = csv.reader(file, delimiter=delimiter) for line in reader: task = dict(zip(columns, line)) args = json.loads(task.pop("args", "[]")) kwargs = json.loads(task.pop("kwargs", "{}")) task["arguments"] = { "args": args, "kwargs": kwargs, } task["next_run"] = task.get("next_run", now) tasks.append(task) created = scheduler.create_tasks(tasks) output = [ "Created %d tasks\n" % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo_via_pager("\n".join(output)) @task.command("add") @click.argument("type", nargs=1, required=True) @click.argument("options", nargs=-1) @click.option( "--policy", "-p", default="recurring", type=click.Choice(["recurring", "oneshot"]) ) @click.option( "--priority", "-P", default=None, type=click.Choice(["low", "normal", "high"]) ) @click.option("--next-run", "-n", default=None) @click.pass_context def schedule_task(ctx, type, options, policy, priority, next_run): """Schedule one task from arguments. The first argument is the name of the task type, further ones are positional and keyword argument(s) of the task, in YAML format. Keyword args are of the form key=value. Usage sample: swh-scheduler --database 'service=swh-scheduler' \ task add list-pypi swh-scheduler --database 'service=swh-scheduler' \ task add list-debian-distribution --policy=oneshot distribution=stretch Note: if the priority is not given, the task won't have the priority set, which is considered as the lowest priority level. """ from swh.scheduler.utils import utcnow from .utils import parse_options scheduler = ctx.obj["scheduler"] if not scheduler: raise ValueError("Scheduler class (local/remote) must be instantiated") now = utcnow() (args, kw) = parse_options(options) task = { "type": type, "policy": policy, "priority": priority, "arguments": {"args": args, "kwargs": kw,}, "next_run": next_run or now, } created = scheduler.create_tasks([task]) output = [ "Created %d tasks\n" % len(created), ] for task in created: output.append(pretty_print_task(task)) click.echo("\n".join(output)) def iter_origins( # use string annotations to prevent some pkg loading storage: "StorageInterface", page_token: "Optional[str]" = None, ) -> "Iterator[Origin]": """Iterate over origins in the storage. Optionally starting from page_token. This logs regularly an info message during pagination with the page_token. This, in order to feed it back to the cli if the process interrupted. Yields origin model objects from the storage """ while True: page_result = storage.origin_list(page_token=page_token) page_token = page_result.next_page_token yield from page_result.results if not page_token: break click.echo(f"page_token: {page_token}\n") @task.command("schedule_origins") @click.argument("type", nargs=1, required=True) @click.argument("options", nargs=-1) @click.option( "--batch-size", "-b", "origin_batch_size", default=10, show_default=True, type=int, help="Number of origins per task", ) @click.option( "--page-token", default=0, show_default=True, type=str, help="Only schedule tasks for origins whose ID is greater", ) @click.option( "--limit", default=None, type=int, help="Limit the tasks scheduling up to this number of tasks", ) @click.option("--storage-url", "-g", help="URL of the (graph) storage API") @click.option( "--dry-run/--no-dry-run", is_flag=True, default=False, help="List only what would be scheduled.", ) @click.pass_context def schedule_origin_metadata_index( ctx, type, options, storage_url, origin_batch_size, page_token, limit, dry_run ): """Schedules tasks for origins that are already known. The first argument is the name of the task type, further ones are keyword argument(s) of the task in the form key=value, where value is in YAML format. Usage sample: swh-scheduler --database 'service=swh-scheduler' \ task schedule_origins index-origin-metadata """ from itertools import islice from swh.storage import get_storage from .utils import parse_options, schedule_origin_batches scheduler = ctx.obj["scheduler"] storage = get_storage("remote", url=storage_url) if dry_run: scheduler = None (args, kw) = parse_options(options) if args: raise click.ClickException("Only keywords arguments are allowed.") origins = iter_origins(storage, page_token=page_token) if limit: origins = islice(origins, limit) origin_urls = (origin.url for origin in origins) schedule_origin_batches(scheduler, type, origin_urls, origin_batch_size, kw) @task.command("list-pending") @click.argument("task-types", required=True, nargs=-1) @click.option( "--limit", "-l", "num_tasks", required=False, type=click.INT, help="The maximum number of tasks to fetch", ) @click.option( "--before", "-b", required=False, type=DATETIME, help="List all jobs supposed to run before the given date", ) @click.pass_context def list_pending_tasks(ctx, task_types, num_tasks, before): """List tasks with no priority that are going to be run. You can override the number of tasks to fetch with the --limit flag. """ scheduler = ctx.obj["scheduler"] if not scheduler: raise ValueError("Scheduler class (local/remote) must be instantiated") output = [] for task_type in task_types: pending = scheduler.peek_ready_tasks( task_type, timestamp=before, num_tasks=num_tasks, ) output.append("Found %d %s tasks\n" % (len(pending), task_type)) for task in pending: output.append(pretty_print_task(task)) click.echo("\n".join(output)) @task.command("list") @click.option( "--task-id", "-i", default=None, multiple=True, metavar="ID", help="List only tasks whose id is ID.", ) @click.option( "--task-type", "-t", default=None, multiple=True, metavar="TYPE", help="List only tasks of type TYPE", ) @click.option( "--limit", "-l", required=False, type=click.INT, help="The maximum number of tasks to fetch.", ) @click.option( "--status", "-s", multiple=True, metavar="STATUS", type=click.Choice( ("next_run_not_scheduled", "next_run_scheduled", "completed", "disabled") ), default=None, help="List tasks whose status is STATUS.", ) @click.option( "--policy", "-p", default=None, type=click.Choice(["recurring", "oneshot"]), help="List tasks whose policy is POLICY.", ) @click.option( "--priority", "-P", default=None, multiple=True, type=click.Choice(["all", "low", "normal", "high"]), help="List tasks whose priority is PRIORITY.", ) @click.option( "--before", "-b", required=False, type=DATETIME, metavar="DATETIME", help="Limit to tasks supposed to run before the given date.", ) @click.option( "--after", "-a", required=False, type=DATETIME, metavar="DATETIME", help="Limit to tasks supposed to run after the given date.", ) @click.option( "--list-runs", "-r", is_flag=True, default=False, help="Also list past executions of each task.", ) @click.pass_context def list_tasks( ctx, task_id, task_type, limit, status, policy, priority, before, after, list_runs ): """List tasks. """ from operator import itemgetter scheduler = ctx.obj["scheduler"] if not scheduler: raise ValueError("Scheduler class (local/remote) must be instantiated") if not task_type: task_type = [x["type"] for x in scheduler.get_task_types()] # if task_id is not given, default value for status is # 'next_run_not_scheduled' # if task_id is given, default status is 'all' if task_id is None and status is None: status = ["next_run_not_scheduled"] if status and "all" in status: status = None if priority and "all" in priority: priority = None output = [] tasks = scheduler.search_tasks( task_id=task_id, task_type=task_type, status=status, priority=priority, policy=policy, before=before, after=after, limit=limit, ) if list_runs: runs = {t["id"]: [] for t in tasks} for r in scheduler.get_task_runs([task["id"] for task in tasks]): runs[r["task"]].append(r) else: runs = {} output.append("Found %d tasks\n" % (len(tasks))) for task in sorted(tasks, key=itemgetter("id")): output.append(pretty_print_task(task, full=True)) if runs.get(task["id"]): output.append(click.style(" Executions:", bold=True)) for run in sorted(runs[task["id"]], key=itemgetter("id")): output.append(pretty_print_run(run, indent=4)) click.echo("\n".join(output)) @task.command("respawn") @click.argument("task-ids", required=True, nargs=-1) @click.option( "--next-run", "-n", required=False, type=DATETIME, metavar="DATETIME", default=None, help="Re spawn the selected tasks at this date", ) @click.pass_context def respawn_tasks(ctx, task_ids: List[str], next_run: datetime.datetime): """Respawn tasks. Respawn tasks given by their ids (see the 'task list' command to find task ids) at the given date (immediately by default). Eg. swh-scheduler task respawn 1 3 12 """ from swh.scheduler.utils import utcnow scheduler = ctx.obj["scheduler"] if not scheduler: raise ValueError("Scheduler class (local/remote) must be instantiated") if next_run is None: next_run = utcnow() output = [] task_ids_int = [int(id_) for id_ in task_ids] scheduler.set_status_tasks( task_ids_int, status="next_run_not_scheduled", next_run=next_run ) output.append("Respawn tasks %s\n" % (task_ids_int,)) click.echo("\n".join(output)) - - -@task.command("archive") -@click.option( - "--before", - "-b", - default=None, - help="""Task whose ended date is anterior will be archived. - Default to current month's first day.""", -) -@click.option( - "--after", - "-a", - default=None, - help="""Task whose ended date is after the specified date will - be archived. Default to prior month's first day.""", -) -@click.option( - "--batch-index", - default=1000, - type=click.INT, - help="Batch size of tasks to read from db to archive", -) -@click.option( - "--bulk-index", - default=200, - type=click.INT, - help="Batch size of tasks to bulk index", -) -@click.option( - "--batch-clean", - default=1000, - type=click.INT, - help="Batch size of task to clean after archival", -) -@click.option( - "--dry-run/--no-dry-run", - is_flag=True, - default=False, - help="Default to list only what would be archived.", -) -@click.option("--verbose", is_flag=True, default=False, help="Verbose mode") -@click.option( - "--cleanup/--no-cleanup", - is_flag=True, - default=True, - help="Clean up archived tasks (default)", -) -@click.option( - "--start-from", - type=click.STRING, - default=None, - help="(Optional) default page to start from.", -) -@click.pass_context -def archive_tasks( - ctx, - before, - after, - batch_index, - bulk_index, - batch_clean, - dry_run, - verbose, - cleanup, - start_from, -): - """Archive task/task_run whose (task_type is 'oneshot' and task_status - is 'completed') or (task_type is 'recurring' and task_status is - 'disabled'). - - With --dry-run flag set (default), only list those. - - """ - from itertools import groupby - - from swh.core.utils import grouper - from swh.scheduler.backend_es import ElasticSearchBackend - from swh.scheduler.utils import utcnow - - config = ctx.obj["config"] - scheduler = ctx.obj["scheduler"] - - if not scheduler: - raise ValueError("Scheduler class (local/remote) must be instantiated") - - logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) - logger = logging.getLogger(__name__) - logging.getLogger("urllib3").setLevel(logging.WARN) - logging.getLogger("elasticsearch").setLevel(logging.ERROR) - if dry_run: - logger.info("**DRY-RUN** (only reading db)") - if not cleanup: - logger.info("**NO CLEANUP**") - - es_storage = ElasticSearchBackend(**config) - now = utcnow() - - # Default to archive tasks from a rolling month starting the week - # prior to the current one - if not before: - before = now.shift(weeks=-1).format("YYYY-MM-DD") - - if not after: - after = now.shift(weeks=-1).shift(months=-1).format("YYYY-MM-DD") - - logger.debug( - "index: %s; cleanup: %s; period: [%s ; %s]" - % (not dry_run, not dry_run and cleanup, after, before) - ) - - def get_index_name( - data: Dict[str, Any], es_storage: ElasticSearchBackend = es_storage - ) -> str: - """Given a data record, determine the index's name through its ending - date. This varies greatly depending on the task_run's - status. - - """ - date = data.get("started") - if not date: - date = data["scheduled"] - return es_storage.compute_index_name(date.year, date.month) - - def index_data(before, page_token, batch_index): - while True: - result = scheduler.filter_task_to_archive( - after, before, page_token=page_token, limit=batch_index - ) - tasks_sorted = sorted(result["tasks"], key=get_index_name) - groups = groupby(tasks_sorted, key=get_index_name) - for index_name, tasks_group in groups: - logger.debug("Index tasks to %s" % index_name) - if dry_run: - for task in tasks_group: - yield task - continue - - yield from es_storage.streaming_bulk( - index_name, - tasks_group, - source=["task_id", "task_run_id"], - chunk_size=bulk_index, - ) - - page_token = result.get("next_page_token") - if page_token is None: - break - - gen = index_data(before, page_token=start_from, batch_index=batch_index) - if cleanup: - for task_ids in grouper(gen, n=batch_clean): - task_ids = list(task_ids) - logger.info("Clean up %s tasks: [%s, ...]" % (len(task_ids), task_ids[0])) - if dry_run: # no clean up - continue - ctx.obj["scheduler"].delete_archived_tasks(task_ids) - else: - for task_ids in grouper(gen, n=batch_index): - task_ids = list(task_ids) - logger.info("Indexed %s tasks: [%s, ...]" % (len(task_ids), task_ids[0])) - - logger.debug("Done!") diff --git a/swh/scheduler/elasticsearch_memory.py b/swh/scheduler/elasticsearch_memory.py deleted file mode 100644 index 2dd958c..0000000 --- a/swh/scheduler/elasticsearch_memory.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (C) 2018-2019 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -"""Memory Elastic Search backend - -""" - -from ast import literal_eval -import datetime # noqa serialization purposes -import hashlib -import logging -from typing import Optional - -import psycopg2 # noqa serialization purposes - -logger = logging.getLogger(__name__) - - -class BasicSerializer: - """For memory elastic search implementation (not for production) - - """ - - def __init__(self, *args, **kwargs): - pass - - def dumps(self, *args, **kwargs): - return str(*args) - - -class BasicTransport: - """For memory elastic search implementation, (not for production) - - """ - - def __init__(self, *args, **kwargs): - self.serializer = BasicSerializer() - - -class MemoryElasticsearch: - """Memory Elasticsearch instance (for test purposes) - - Partial implementation oriented towards index storage (and not search) - - For now, its sole client is the scheduler for task archival purposes. - - """ - - def __init__(self, *args, **kwargs): - self.index = {} - self.mapping = {} - self.settings = {} - self.indices = self # HACK - self.main_mapping_key: Optional[str] = None - self.main_settings_key: Optional[str] = None - self.transport = BasicTransport() - - def create(self, index, **kwargs): - logger.debug(f"create index {index}") - logger.debug(f"indices: {self.index}") - logger.debug(f"mapping: {self.mapping}") - logger.debug(f"settings: {self.settings}") - self.index[index] = { - "status": "opened", - "data": {}, - "mapping": self.get_mapping(self.main_mapping_key), - "settings": self.get_settings(self.main_settings_key), - } - logger.debug(f"index {index} created") - - def close(self, index, **kwargs): - """Close index""" - idx = self.index.get(index) - if idx: - idx["status"] = "closed" - - def open(self, index, **kwargs): - """Open index""" - idx = self.index.get(index) - if idx: - idx["status"] = "opened" - - def bulk(self, body, **kwargs): - """Bulk insert document in index""" - assert isinstance(body, str) - all_data = body.split("\n") - if all_data[-1] == "": - all_data = all_data[:-1] # drop the empty line if any - ids = [] - # data is sent as tuple (index, data-to-index) - for i in range(0, len(all_data), 2): - # The first entry is about the index to use - # not about a data to index - # find the index - index_data = literal_eval(all_data[i]) - idx_name = index_data["index"]["_index"] - # associated data to index - data = all_data[i + 1] - _id = hashlib.sha1(data.encode("utf-8")).hexdigest() - parsed_data = eval(data) # for datetime - self.index[idx_name]["data"][_id] = parsed_data - ids.append(_id) - - # everything is indexed fine - return {"items": [{"index": {"status": 200, "_id": _id,}} for _id in ids]} - - def mget(self, *args, body, index, **kwargs): - """Bulk indexed documents retrieval""" - idx = self.index[index] - docs = [] - idx_docs = idx["data"] - for _id in body["ids"]: - doc = idx_docs.get(_id) - if doc: - d = { - "found": True, - "_source": doc, - } - docs.append(d) - return {"docs": docs} - - def stats(self, index, **kwargs): - idx = self.index[index] # will raise if it does not exist - if not idx or idx["status"] == "closed": - raise ValueError("Closed index") # simulate issue if index closed - - def exists(self, index, **kwargs): - return self.index.get(index) is not None - - def put_mapping(self, index, body, **kwargs): - self.mapping[index] = body - self.main_mapping_key = index - - def get_mapping(self, index, **kwargs): - return self.mapping.get(index) or self.index.get(index, {}).get("mapping", {}) - - def put_settings(self, index, body, **kwargs): - self.settings[index] = body - self.main_settings_key = index - - def get_settings(self, index, **kwargs): - return self.settings.get(index) or self.index.get(index, {}).get("settings", {}) diff --git a/swh/scheduler/sql/30-schema.sql b/swh/scheduler/sql/30-schema.sql index 4fce1ae..7de6d48 100644 --- a/swh/scheduler/sql/30-schema.sql +++ b/swh/scheduler/sql/30-schema.sql @@ -1,229 +1,229 @@ create table dbversion ( version int primary key, release timestamptz not null, description text not null ); comment on table dbversion is 'Schema update tracking'; comment on column dbversion.version is 'SQL schema version'; comment on column dbversion.release is 'Version deployment timestamp'; comment on column dbversion.description is 'Version description'; insert into dbversion (version, release, description) - values (31, now(), 'Work In Progress'); + values (32, now(), 'Work In Progress'); create table task_type ( type text primary key, description text not null, backend_name text not null, default_interval interval, min_interval interval, max_interval interval, backoff_factor float, max_queue_length bigint, num_retries bigint, retry_delay interval ); comment on table task_type is 'Types of schedulable tasks'; comment on column task_type.type is 'Short identifier for the task type'; comment on column task_type.description is 'Human-readable task description'; comment on column task_type.backend_name is 'Name of the task in the job-running backend'; comment on column task_type.default_interval is 'Default interval for newly scheduled tasks'; comment on column task_type.min_interval is 'Minimum interval between two runs of a task'; comment on column task_type.max_interval is 'Maximum interval between two runs of a task'; comment on column task_type.backoff_factor is 'Adjustment factor for the backoff between two task runs'; comment on column task_type.max_queue_length is 'Maximum length of the queue for this type of tasks'; comment on column task_type.num_retries is 'Default number of retries on transient failures'; comment on column task_type.retry_delay is 'Retry delay for the task'; create type task_status as enum ('next_run_not_scheduled', 'next_run_scheduled', 'completed', 'disabled'); comment on type task_status is 'Status of a given task'; create type task_policy as enum ('recurring', 'oneshot'); comment on type task_policy is 'Recurrence policy of the given task'; create type task_priority as enum('high', 'normal', 'low'); comment on type task_priority is 'Priority of the given task'; create table priority_ratio( id task_priority primary key, ratio float not null ); comment on table priority_ratio is 'Oneshot task''s reading ratio per priority'; comment on column priority_ratio.id is 'Task priority id'; comment on column priority_ratio.ratio is 'Percentage of tasks to read per priority'; insert into priority_ratio (id, ratio) values ('high', 0.5); insert into priority_ratio (id, ratio) values ('normal', 0.3); insert into priority_ratio (id, ratio) values ('low', 0.2); create table task ( id bigserial primary key, type text not null references task_type(type), arguments jsonb not null, next_run timestamptz not null, current_interval interval, status task_status not null, policy task_policy not null default 'recurring', retries_left bigint not null default 0, priority task_priority references priority_ratio(id), check (policy <> 'recurring' or current_interval is not null) ); comment on table task is 'Schedule of recurring tasks'; comment on column task.arguments is 'Arguments passed to the underlying job scheduler. ' 'Contains two keys, ''args'' (list) and ''kwargs'' (object).'; comment on column task.next_run is 'The next run of this task should be run on or after that time'; comment on column task.current_interval is 'The interval between two runs of this task, ' 'taking into account the backoff factor'; comment on column task.policy is 'Whether the task is one-shot or recurring'; comment on column task.retries_left is 'The number of "short delay" retries of the task in case of ' 'transient failure'; comment on column task.priority is 'Policy of the given task'; comment on column task.id is 'Task Identifier'; comment on column task.type is 'References task_type table'; comment on column task.status is 'Task status (''next_run_not_scheduled'', ''next_run_scheduled'', ''completed'', ''disabled'')'; create type task_run_status as enum ('scheduled', 'started', 'eventful', 'uneventful', 'failed', 'permfailed', 'lost'); comment on type task_run_status is 'Status of a given task run'; create table task_run ( id bigserial primary key, task bigint not null references task(id), backend_id text, scheduled timestamptz, started timestamptz, ended timestamptz, metadata jsonb, status task_run_status not null default 'scheduled' ); comment on table task_run is 'History of task runs sent to the job-running backend'; comment on column task_run.backend_id is 'id of the task run in the job-running backend'; comment on column task_run.metadata is 'Useful metadata for the given task run. ' 'For instance, the worker that took on the job, ' 'or the logs for the run.'; comment on column task_run.id is 'Task run identifier'; comment on column task_run.task is 'References task table'; comment on column task_run.scheduled is 'Scheduled run time for task'; comment on column task_run.started is 'Task starting time'; comment on column task_run.ended is 'Task ending time'; create table if not exists listers ( id uuid primary key default uuid_generate_v4(), name text not null, instance_name text not null, created timestamptz not null default now(), -- auto_now_add in the model current_state jsonb not null, updated timestamptz not null ); comment on table listers is 'Lister instances known to the origin visit scheduler'; comment on column listers.name is 'Name of the lister (e.g. github, gitlab, debian, ...)'; comment on column listers.instance_name is 'Name of the current instance of this lister (e.g. framagit, bitbucket, ...)'; comment on column listers.created is 'Timestamp at which the lister was originally created'; comment on column listers.current_state is 'Known current state of this lister'; comment on column listers.updated is 'Timestamp at which the lister state was last updated'; create table if not exists listed_origins ( -- Basic information lister_id uuid not null references listers(id), url text not null, visit_type text not null, extra_loader_arguments jsonb not null, -- Whether this origin still exists or not enabled boolean not null, -- time-based information first_seen timestamptz not null default now(), last_seen timestamptz not null, -- potentially provided by the lister last_update timestamptz, primary key (lister_id, url, visit_type) ); comment on table listed_origins is 'Origins known to the origin visit scheduler'; comment on column listed_origins.lister_id is 'Lister instance which owns this origin'; comment on column listed_origins.url is 'URL of the origin listed'; comment on column listed_origins.visit_type is 'Type of the visit which should be scheduled for the given url'; comment on column listed_origins.extra_loader_arguments is 'Extra arguments that should be passed to the loader for this origin'; comment on column listed_origins.enabled is 'Whether this origin has been seen during the last listing, and visits should be scheduled.'; comment on column listed_origins.first_seen is 'Time at which the origin was first seen by a lister'; comment on column listed_origins.last_seen is 'Time at which the origin was last seen by the lister'; comment on column listed_origins.last_update is 'Time of the last update to the origin recorded by the remote'; create type last_visit_status as enum ('successful', 'failed', 'not_found'); comment on type last_visit_status is 'Record of the status of the last visit of an origin'; create table origin_visit_stats ( url text not null, visit_type text not null, last_successful timestamptz, last_visit timestamptz, last_visit_status last_visit_status, -- visit scheduling information last_scheduled timestamptz, -- last snapshot resulting from an eventful visit last_snapshot bytea, -- position in the global queue, at which time we expect the origin to have new -- objects next_visit_queue_position bigint, -- duration that we expect to wait between visits of this origin next_position_offset int not null default 4, successive_visits int not null default 1, primary key (url, visit_type) ); comment on table origin_visit_stats is 'Aggregated information on visits for each origin in the archive'; comment on column origin_visit_stats.url is 'Origin URL'; comment on column origin_visit_stats.visit_type is 'Type of the visit for the given url'; comment on column origin_visit_stats.last_successful is 'Date of the last successful visit, at which we recorded the `last_snapshot`'; comment on column origin_visit_stats.last_visit is 'Date of the last visit overall'; comment on column origin_visit_stats.last_visit_status is 'Status of the last visit'; comment on column origin_visit_stats.last_scheduled is 'Time when this origin was scheduled to be visited last'; comment on column origin_visit_stats.last_snapshot is 'sha1_git of the last visit snapshot'; comment on column origin_visit_stats.next_visit_queue_position is 'Position in the global per origin-type queue at which some new objects are expected to be found'; comment on column origin_visit_stats.next_position_offset is 'Duration that we expect to wait between visits of this origin'; comment on column origin_visit_stats.successive_visits is 'number of successive visits with the same status'; create table visit_scheduler_queue_position ( visit_type text not null, position bigint not null, primary key (visit_type) ); comment on table visit_scheduler_queue_position is 'Current queue position for the recurrent visit scheduler'; comment on column visit_scheduler_queue_position.visit_type is 'Visit type'; comment on column visit_scheduler_queue_position.position is 'Current position for the runner of this visit type'; create table scheduler_metrics ( lister_id uuid not null references listers(id), visit_type text not null, last_update timestamptz not null, origins_known int not null default 0, origins_enabled int not null default 0, origins_never_visited int not null default 0, origins_with_pending_changes int not null default 0, primary key (lister_id, visit_type) ); comment on table scheduler_metrics is 'Cache of per-lister metrics for the scheduler, collated between the listed_origins and origin_visit_stats tables.'; comment on column scheduler_metrics.lister_id is 'Lister instance on which metrics have been aggregated'; comment on column scheduler_metrics.visit_type is 'Visit type on which metrics have been aggregated'; comment on column scheduler_metrics.last_update is 'Last update of these metrics'; comment on column scheduler_metrics.origins_known is 'Number of known (enabled or disabled) origins'; comment on column scheduler_metrics.origins_enabled is 'Number of origins that were present in the latest listing'; comment on column scheduler_metrics.origins_never_visited is 'Number of origins that have never been successfully visited'; comment on column scheduler_metrics.origins_with_pending_changes is 'Number of enabled origins with known activity since our last visit'; diff --git a/swh/scheduler/sql/40-func.sql b/swh/scheduler/sql/40-func.sql index 3090819..57ff3da 100644 --- a/swh/scheduler/sql/40-func.sql +++ b/swh/scheduler/sql/40-func.sql @@ -1,401 +1,412 @@ create or replace function swh_scheduler_mktemp_task () returns void language sql as $$ create temporary table tmp_task ( like task excluding indexes ) on commit drop; alter table tmp_task alter column retries_left drop not null, drop column id; $$; comment on function swh_scheduler_mktemp_task () is 'Create a temporary table for bulk task creation'; create or replace function swh_scheduler_create_tasks_from_temp () returns setof task language plpgsql as $$ begin -- update the default values in one go -- this is separated from the insert/select to avoid too much -- juggling update tmp_task t set current_interval = tt.default_interval, retries_left = coalesce(retries_left, tt.num_retries, 0) from task_type tt where tt.type=t.type; insert into task (type, arguments, next_run, status, current_interval, policy, retries_left, priority) select type, arguments, next_run, status, current_interval, policy, retries_left, priority from tmp_task t where not exists(select 1 from task where type = t.type and md5(arguments::text) = md5(t.arguments::text) and arguments = t.arguments and policy = t.policy and priority is not distinct from t.priority and status = t.status); return query select distinct t.* from tmp_task tt inner join task t on ( tt.type = t.type and md5(tt.arguments::text) = md5(t.arguments::text) and tt.arguments = t.arguments and tt.policy = t.policy and tt.priority is not distinct from t.priority and tt.status = t.status ); end; $$; comment on function swh_scheduler_create_tasks_from_temp () is 'Create tasks in bulk from the temporary table'; create or replace function swh_scheduler_peek_no_priority_tasks (task_type text, ts timestamptz default now(), num_tasks bigint default NULL) returns setof task language sql stable as $$ select * from task where next_run <= ts and type = task_type and status = 'next_run_not_scheduled' and priority is null order by next_run limit num_tasks; $$; comment on function swh_scheduler_peek_no_priority_tasks (text, timestamptz, bigint) is 'Retrieve tasks without priority'; create or replace function swh_scheduler_nb_priority_tasks(num_tasks_priority bigint, task_priority task_priority) returns numeric language sql stable as $$ select ceil(num_tasks_priority * (select ratio from priority_ratio where id = task_priority)) :: numeric $$; comment on function swh_scheduler_nb_priority_tasks (bigint, task_priority) is 'Given a priority task and a total number, compute the number of tasks to read'; create or replace function swh_scheduler_peek_tasks_with_priority (task_type text, ts timestamptz default now(), num_tasks_priority bigint default NULL, task_priority task_priority default 'normal') returns setof task language sql stable as $$ select * from task t where t.next_run <= ts and t.type = task_type and t.status = 'next_run_not_scheduled' and t.priority = task_priority order by t.next_run limit num_tasks_priority; $$; comment on function swh_scheduler_peek_tasks_with_priority(text, timestamptz, bigint, task_priority) is 'Retrieve tasks with a given priority'; create or replace function swh_scheduler_grab_ready_tasks (task_type text, ts timestamptz default now(), num_tasks bigint default NULL) returns setof task language sql as $$ update task set status='next_run_scheduled' from ( select id from swh_scheduler_peek_no_priority_tasks(task_type, ts, num_tasks) ) next_tasks where task.id = next_tasks.id returning task.*; $$; comment on function swh_scheduler_grab_ready_tasks (text, timestamptz, bigint) is 'Grab (no priority) tasks ready for scheduling and change their status'; create or replace function swh_scheduler_peek_any_ready_priority_tasks ( task_type text, ts timestamptz default now(), num_tasks bigint default NULL ) returns setof task language sql stable as $$ select * from task t where t.next_run <= ts and t.type = task_type and t.status = 'next_run_not_scheduled' and t.priority is not null order by t.next_run limit num_tasks; $$; comment on function swh_scheduler_peek_any_ready_priority_tasks(text, timestamptz, bigint) is 'List tasks with any priority ready for scheduling'; create or replace function swh_scheduler_grab_any_ready_priority_tasks ( task_type text, ts timestamptz default now(), num_tasks bigint default NULL ) returns setof task language sql as $$ update task set status='next_run_scheduled' from ( select id from swh_scheduler_peek_any_ready_priority_tasks( task_type, ts, num_tasks ) ) next_tasks where task.id = next_tasks.id returning task.*; $$; comment on function swh_scheduler_grab_any_ready_priority_tasks (text, timestamptz, bigint) is 'Grab any priority tasks ready for scheduling and change their status'; create or replace function swh_scheduler_schedule_task_run (task_id bigint, backend_id text, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ insert into task_run (task, backend_id, metadata, scheduled, status) values (task_id, backend_id, metadata, ts, 'scheduled') returning *; $$; create or replace function swh_scheduler_mktemp_task_run () returns void language sql as $$ create temporary table tmp_task_run ( like task_run excluding indexes ) on commit drop; alter table tmp_task_run drop column id, drop column status; $$; comment on function swh_scheduler_mktemp_task_run () is 'Create a temporary table for bulk task run scheduling'; create or replace function swh_scheduler_schedule_task_run_from_temp () returns void language plpgsql as $$ begin insert into task_run (task, backend_id, metadata, scheduled, status) select task, backend_id, metadata, scheduled, 'scheduled' from tmp_task_run; return; end; $$; create or replace function swh_scheduler_start_task_run (backend_id text, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ update task_run set started = ts, status = 'started', metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_start_task_run.metadata where task_run.backend_id = swh_scheduler_start_task_run.backend_id returning *; $$; create or replace function swh_scheduler_end_task_run (backend_id text, status task_run_status, metadata jsonb default '{}'::jsonb, ts timestamptz default now()) returns task_run language sql as $$ update task_run set ended = ts, status = swh_scheduler_end_task_run.status, metadata = coalesce(task_run.metadata, '{}'::jsonb) || swh_scheduler_end_task_run.metadata where task_run.backend_id = swh_scheduler_end_task_run.backend_id returning *; $$; create type task_record as ( task_id bigint, task_policy task_policy, task_status task_status, task_run_id bigint, arguments jsonb, type text, backend_id text, metadata jsonb, scheduled timestamptz, started timestamptz, ended timestamptz, task_run_status task_run_status ); create or replace function swh_scheduler_task_to_archive( ts_after timestamptz, ts_before timestamptz, last_id bigint default -1, lim bigint default 10) returns setof task_record language sql stable as $$ select t.id as task_id, t.policy as task_policy, t.status as task_status, tr.id as task_run_id, t.arguments, t.type, tr.backend_id, tr.metadata, tr.scheduled, tr.started, tr.ended, tr.status as task_run_status from task_run tr inner join task t on tr.task=t.id where ((t.policy = 'oneshot' and t.status in ('completed', 'disabled')) or (t.policy = 'recurring' and t.status = 'disabled')) and ((ts_after <= tr.started and tr.started < ts_before) or (tr.started is null and (ts_after <= tr.scheduled and tr.scheduled < ts_before))) and t.id >= last_id order by tr.task, tr.started limit lim; $$; comment on function swh_scheduler_task_to_archive (timestamptz, timestamptz, bigint, bigint) is 'Read archivable tasks function'; create or replace function swh_scheduler_delete_archived_tasks( task_ids bigint[], task_run_ids bigint[]) returns void language sql as $$ -- clean up task_run_ids delete from task_run where id in (select * from unnest(task_run_ids)); -- clean up only tasks whose associated task_run are all cleaned up. -- Remaining tasks will stay there and will be cleaned up when -- remaining data have been indexed delete from task where id in (select t.id from task t left outer join task_run tr on t.id=tr.task where t.id in (select * from unnest(task_ids)) and tr.task is null); $$; comment on function swh_scheduler_delete_archived_tasks(bigint[], bigint[]) is 'Clean up archived tasks function'; create or replace function swh_scheduler_update_task_on_task_end () returns trigger language plpgsql as $$ declare cur_task task%rowtype; cur_task_type task_type%rowtype; adjustment_factor float; new_interval interval; begin select * from task where id = new.task into cur_task; select * from task_type where type = cur_task.type into cur_task_type; case when new.status = 'permfailed' then update task set status = 'disabled' where id = cur_task.id; when new.status in ('eventful', 'uneventful') then case when cur_task.policy = 'oneshot' then update task set status = 'completed' where id = cur_task.id; when cur_task.policy = 'recurring' then if new.status = 'uneventful' then adjustment_factor := 1/cur_task_type.backoff_factor; else adjustment_factor := 1/cur_task_type.backoff_factor; end if; new_interval := greatest( cur_task_type.min_interval, least( cur_task_type.max_interval, adjustment_factor * cur_task.current_interval)); update task set status = 'next_run_not_scheduled', next_run = new.ended + new_interval, current_interval = new_interval, retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; else -- new.status in 'failed', 'lost' if cur_task.retries_left > 0 then update task set status = 'next_run_not_scheduled', next_run = new.ended + coalesce(cur_task_type.retry_delay, interval '1 hour'), retries_left = cur_task.retries_left - 1 where id = cur_task.id; else -- no retries left case when cur_task.policy = 'oneshot' then update task set status = 'disabled' where id = cur_task.id; when cur_task.policy = 'recurring' then update task set status = 'next_run_not_scheduled', next_run = new.ended + cur_task.current_interval, retries_left = coalesce(cur_task_type.num_retries, 0) where id = cur_task.id; end case; end if; -- retries end case; return null; end; $$; create trigger update_task_on_task_end after update of status on task_run for each row when (new.status NOT IN ('scheduled', 'started')) execute procedure swh_scheduler_update_task_on_task_end (); create or replace function update_metrics(lister_id uuid default NULL, ts timestamptz default now()) returns setof scheduler_metrics - language sql + language plpgsql as $$ - insert into scheduler_metrics ( - lister_id, visit_type, last_update, - origins_known, origins_enabled, - origins_never_visited, origins_with_pending_changes - ) - select + begin + -- If we do the following select as a subquery in the insert statement below, + -- PostgreSQL prevents the use of parallel queries. So we do the select into a + -- temporary table, which doesn't suffer this limitation. + + create temporary table tmp_update_metrics + on commit drop + as select lo.lister_id, lo.visit_type, coalesce(ts, now()) as last_update, count(*) as origins_known, count(*) filter (where enabled) as origins_enabled, count(*) filter (where enabled and last_snapshot is NULL ) as origins_never_visited, count(*) filter (where enabled and lo.last_update > last_successful ) as origins_with_pending_changes from listed_origins lo left join origin_visit_stats ovs using (url, visit_type) where -- update only for the requested lister update_metrics.lister_id = lo.lister_id -- or for all listers if the function argument is null or update_metrics.lister_id is null - group by (lister_id, visit_type) - on conflict (lister_id, visit_type) do update - set - last_update = EXCLUDED.last_update, - origins_known = EXCLUDED.origins_known, - origins_enabled = EXCLUDED.origins_enabled, - origins_never_visited = EXCLUDED.origins_never_visited, - origins_with_pending_changes = EXCLUDED.origins_with_pending_changes - returning * + group by (lo.lister_id, lo.visit_type); + + return query + insert into scheduler_metrics ( + lister_id, visit_type, last_update, + origins_known, origins_enabled, + origins_never_visited, origins_with_pending_changes + ) + select * from tmp_update_metrics + on conflict on constraint scheduler_metrics_pkey do update + set + last_update = EXCLUDED.last_update, + origins_known = EXCLUDED.origins_known, + origins_enabled = EXCLUDED.origins_enabled, + origins_never_visited = EXCLUDED.origins_never_visited, + origins_with_pending_changes = EXCLUDED.origins_with_pending_changes + returning *; + end; $$; comment on function update_metrics(uuid, timestamptz) is 'Update metrics for the given lister_id'; diff --git a/swh/scheduler/tests/es/__init__.py b/swh/scheduler/tests/es/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/swh/scheduler/tests/es/conftest.py b/swh/scheduler/tests/es/conftest.py deleted file mode 100644 index 389dfe8..0000000 --- a/swh/scheduler/tests/es/conftest.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2019 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import pytest -import yaml - -from swh.scheduler import get_scheduler - - -@pytest.fixture -def swh_sched_config(swh_scheduler_config): - return { - "scheduler": {"cls": "local", **swh_scheduler_config,}, - "elasticsearch": { - "cls": "memory", - "args": {"index_name_prefix": "swh-tasks",}, - }, - } - - -@pytest.fixture -def swh_sched_config_file(swh_sched_config, monkeypatch, tmp_path): - conffile = str(tmp_path / "elastic.yml") - with open(conffile, "w") as f: - f.write(yaml.dump(swh_sched_config)) - monkeypatch.setenv("SWH_CONFIG_FILENAME", conffile) - return conffile - - -@pytest.fixture -def swh_sched(swh_sched_config): - return get_scheduler(**swh_sched_config["scheduler"]) - - -@pytest.fixture -def swh_elasticsearch_backend(swh_sched_config): - from swh.scheduler.backend_es import ElasticSearchBackend - - backend = ElasticSearchBackend(**swh_sched_config) - backend.initialize() - return backend - - -@pytest.fixture -def swh_elasticsearch_memory(swh_elasticsearch_backend): - return swh_elasticsearch_backend.storage diff --git a/swh/scheduler/tests/es/test_backend_es.py b/swh/scheduler/tests/es/test_backend_es.py deleted file mode 100644 index 1c7e2c0..0000000 --- a/swh/scheduler/tests/es/test_backend_es.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (C) 2019-2020 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import datetime - -import elasticsearch -import pytest - -from swh.scheduler.backend_es import get_elasticsearch - -from ..common import TEMPLATES, tasks_from_template - - -def test_get_elasticsearch(): - with pytest.raises(ValueError, match="Unknown elasticsearch class"): - get_elasticsearch("unknown") - - es = get_elasticsearch("memory") - assert es - from swh.scheduler.elasticsearch_memory import MemoryElasticsearch - - assert isinstance(es, MemoryElasticsearch) - - es = get_elasticsearch("local") - assert es - assert isinstance(es, elasticsearch.Elasticsearch) - - -def test_backend_setup_basic(swh_elasticsearch_backend): - """Elastic search instance should allow to create/close/check index - - """ - index_name = "swh-tasks-2010-01" - try: - swh_elasticsearch_backend.storage.indices.get_mapping(index_name) - except (elasticsearch.exceptions.NotFoundError, KeyError): - pass - - assert not swh_elasticsearch_backend.storage.indices.exists(index_name) - swh_elasticsearch_backend.create(index_name) - assert swh_elasticsearch_backend.storage.indices.exists(index_name) - assert swh_elasticsearch_backend.is_index_opened(index_name) - - # index exists with a mapping - mapping = swh_elasticsearch_backend.storage.indices.get_mapping(index_name) - assert mapping != {} - - -def test_backend_setup_index(swh_elasticsearch_backend): - """Elastic search instance should allow to bulk index - - """ - template_git = TEMPLATES["git"] - next_run_date = datetime.datetime.utcnow() - datetime.timedelta(days=1) - tasks = tasks_from_template(template_git, next_run_date, 1) - index_name = swh_elasticsearch_backend.compute_index_name( - next_run_date.year, next_run_date.month - ) - assert not swh_elasticsearch_backend.storage.indices.exists(index_name) - - tasks = list(swh_elasticsearch_backend.streaming_bulk(index_name, tasks)) - assert len(tasks) > 0 - - for output_task in tasks: - assert output_task is not None - assert output_task["type"] == template_git["type"] - assert output_task["arguments"] is not None - next_run = output_task["next_run"] - if isinstance(next_run, str): # real elasticsearch - assert next_run == next_run_date.isoformat() - else: # memory implem. does not really index - assert next_run == next_run_date - - assert swh_elasticsearch_backend.storage.indices.exists(index_name) - assert swh_elasticsearch_backend.is_index_opened(index_name) - mapping = swh_elasticsearch_backend.storage.indices.get_mapping(index_name) - assert mapping != {} diff --git a/swh/scheduler/tests/es/test_cli_task.py b/swh/scheduler/tests/es/test_cli_task.py deleted file mode 100644 index f426198..0000000 --- a/swh/scheduler/tests/es/test_cli_task.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (C) 2019 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import datetime -import logging -import random -import uuid - -from click.testing import CliRunner -import pytest - -from swh.scheduler.cli import cli -from swh.scheduler.utils import utcnow - -from ..common import TASK_TYPES, TEMPLATES, tasks_from_template - -logger = logging.getLogger(__name__) - - -@pytest.mark.usefixtures("swh_elasticsearch_backend") -def test_cli_archive_tasks(swh_sched, swh_sched_config_file): - scheduler = swh_sched - template_git = TEMPLATES["git"] - template_hg = TEMPLATES["hg"] - # first initialize scheduler's db (is this still needed?) - for tt in TASK_TYPES.values(): - scheduler.create_task_type(tt) - - next_run_start = utcnow() - datetime.timedelta(days=1) - - recurring = tasks_from_template(template_git, next_run_start, 100) - oneshots = tasks_from_template( - template_hg, next_run_start - datetime.timedelta(days=1), 50 - ) - - past_time = next_run_start - datetime.timedelta(days=7) - - all_tasks = recurring + oneshots - result = scheduler.create_tasks(all_tasks) - assert len(result) == len(all_tasks) - - # simulate task run - backend_tasks = [ - { - "task": task["id"], - "backend_id": str(uuid.uuid4()), - "scheduled": next_run_start - datetime.timedelta(minutes=i % 60), - } - for i, task in enumerate(result) - ] - scheduler.mass_schedule_task_runs(backend_tasks) - - # Disable some tasks - tasks_to_disable = set() - for task in result: - status = random.choice(["disabled", "completed"]) - if status == "disabled": - tasks_to_disable.add(task["id"]) - - scheduler.disable_tasks(tasks_to_disable) - - git_tasks = scheduler.search_tasks(task_type=template_git["type"]) - hg_tasks = scheduler.search_tasks(task_type=template_hg["type"]) - assert len(git_tasks) + len(hg_tasks) == len(all_tasks) - - # Ensure the task_run are in expected state - task_runs = scheduler.get_task_runs([t["id"] for t in git_tasks + hg_tasks]) - - # Same for the tasks - for t in git_tasks + hg_tasks: - if t["id"] in tasks_to_disable: - assert t["status"] == "disabled" - - future_time = next_run_start + datetime.timedelta(days=1) - for tr in task_runs: - assert past_time <= tr["scheduled"] - assert tr["scheduled"] < future_time - - runner = CliRunner() - result = runner.invoke( - cli, - [ - "--config-file", - swh_sched_config_file, - "task", - "archive", - "--after", - past_time.isoformat(), - "--before", - future_time.isoformat(), - "--cleanup", - ], - obj={"log_level": logging.DEBUG,}, - ) - - assert result.exit_code == 0, result.output - - # disabled tasks should no longer be in the scheduler - git_tasks = scheduler.search_tasks(task_type=template_git["type"]) - hg_tasks = scheduler.search_tasks(task_type=template_hg["type"]) - remaining_tasks = git_tasks + hg_tasks - count_disabled = 0 - for task in remaining_tasks: - logger.debug(f"task status: {task['status']}") - if task["status"] == "disabled": - count_disabled += 1 - - assert count_disabled == 0 - assert len(remaining_tasks) == len(all_tasks) - len(tasks_to_disable) diff --git a/swh/scheduler/tests/es/test_elasticsearch_memory.py b/swh/scheduler/tests/es/test_elasticsearch_memory.py deleted file mode 100644 index 5c79622..0000000 --- a/swh/scheduler/tests/es/test_elasticsearch_memory.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (C) 2019 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import datetime -import hashlib -import logging -import random -from typing import Any, Dict - -import pytest - -from swh.scheduler.elasticsearch_memory import BasicSerializer, BasicTransport - -from ..common import TEMPLATES, tasks_from_template - -logger = logging.getLogger(__name__) - - -def test_serializer(): - s = BasicSerializer() - assert s - - data = {"something": [1, 2, 3], "cool": {"1": "2"}} - actual_data = s.dumps(data) - - assert isinstance(actual_data, str) - assert actual_data == str(data) - - -def test_basic_transport(): - b = BasicTransport() - assert b - - assert isinstance(b.serializer, BasicSerializer) - - -def test_index_manipulation(swh_elasticsearch_memory): - index_name = "swh-tasks-xxxx" - indices = swh_elasticsearch_memory.index - - assert not swh_elasticsearch_memory.exists(index_name) - assert index_name not in indices - - # so stat raises - with pytest.raises(Exception): - swh_elasticsearch_memory.stats(index_name) - - # we create the index - swh_elasticsearch_memory.create(index_name) - - # now the index exists - assert swh_elasticsearch_memory.exists(index_name) - assert index_name in indices - # it's opened - assert indices[index_name]["status"] == "opened" - - # so stats is happy - swh_elasticsearch_memory.stats(index_name) - - # open the index, nothing changes - swh_elasticsearch_memory.open(index_name) - assert indices[index_name]["status"] == "opened" - - # close the index - swh_elasticsearch_memory.close(index_name) - - assert indices[index_name]["status"] == "closed" - - # reopen the index (fun times) - swh_elasticsearch_memory.open(index_name) - assert indices[index_name]["status"] == "opened" - - -def test_bulk_and_mget(swh_elasticsearch_memory): - # initialize tasks - template_git = TEMPLATES["git"] - next_run_start = datetime.datetime.utcnow() - datetime.timedelta(days=1) - - tasks = tasks_from_template(template_git, next_run_start, 100) - - def compute_id(stask): - return hashlib.sha1(stask.encode("utf-8")).hexdigest() - - body = [] - ids_to_task = {} - for task in tasks: - date = task["next_run"] - index_name = f"swh-tasks-{date.year}-{date.month}" - idx = {"index": {"_index": index_name}} - sidx = swh_elasticsearch_memory.transport.serializer.dumps(idx) - body.append(sidx) - - stask = swh_elasticsearch_memory.transport.serializer.dumps(task) - body.append(stask) - - _id = compute_id(stask) - ids_to_task[_id] = task - logger.debug(f"_id: {_id}, task: {task}") - - # store - - # create the index first - swh_elasticsearch_memory.create(index_name) - - # then bulk insert new data - result = swh_elasticsearch_memory.bulk("\n".join(body)) - - # no guarantee in the order - assert result - actual_items = result["items"] - assert len(actual_items) == len(ids_to_task) - - def get_id(data: Dict[str, Any]) -> str: - return data["index"]["_id"] - - actual_items = sorted(actual_items, key=get_id) - - expected_items = { - "items": [{"index": {"status": 200, "_id": _id}} for _id in list(ids_to_task)] - } - - expected_items = sorted(expected_items["items"], key=get_id) - assert actual_items == expected_items - - # retrieve - - nb_docs = 10 - ids = list(ids_to_task) - random_ids = [] - # add some inexistent ids - for i in range(16): - noisy_id = f"{i}" * 40 - random_ids.append(noisy_id) - random_ids.extend(random.sample(ids, nb_docs)) # add relevant ids - for i in range(16, 32): - noisy_id = f"{i}" * 40 - random_ids.append(noisy_id) - - result = swh_elasticsearch_memory.mget(index=index_name, body={"ids": random_ids}) - assert result["docs"] - assert len(result["docs"]) == nb_docs, "no random and inexistent id found" - for doc in result["docs"]: - assert doc["found"] - - actual_task = doc["_source"] - _id = compute_id(str(actual_task)) - expected_task = ids_to_task[_id] - assert actual_task == expected_task diff --git a/swh/scheduler/tests/test_recurrent_visits.py b/swh/scheduler/tests/test_recurrent_visits.py index d118fbf..efa8d67 100644 --- a/swh/scheduler/tests/test_recurrent_visits.py +++ b/swh/scheduler/tests/test_recurrent_visits.py @@ -1,180 +1,202 @@ # Copyright (C) 2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from datetime import timedelta import logging from queue import Queue from unittest.mock import MagicMock import pytest from swh.scheduler.celery_backend.recurrent_visits import ( + POLICY_ADDITIONAL_PARAMETERS, VisitSchedulerThreads, + grab_next_visits_policy_weights, send_visits_for_visit_type, spawn_visit_scheduler_thread, terminate_visit_scheduler_threads, visit_scheduler_thread, ) from .test_cli import invoke TEST_MAX_QUEUE = 10000 MODULE_NAME = "swh.scheduler.celery_backend.recurrent_visits" def _compute_backend_name(visit_type: str) -> str: "Build a dummy reproducible backend name" return f"swh.loader.{visit_type}.tasks" @pytest.fixture def swh_scheduler(swh_scheduler): """Override default fixture of the scheduler to install some more task types.""" for visit_type in ["git", "hg", "svn"]: task_type = f"load-{visit_type}" swh_scheduler.create_task_type( { "type": task_type, "max_queue_length": TEST_MAX_QUEUE, "description": "The {} testing task".format(task_type), "backend_name": _compute_backend_name(visit_type), "default_interval": timedelta(days=1), "min_interval": timedelta(hours=6), "max_interval": timedelta(days=12), } ) return swh_scheduler def test_cli_schedule_recurrent_unknown_visit_type(swh_scheduler): """When passed an unknown visit type, the recurrent visit scheduler should refuse to start.""" with pytest.raises(ValueError, match="Unknown"): invoke( swh_scheduler, False, ["schedule-recurrent", "--visit-type", "unknown", "--visit-type", "git"], ) def test_cli_schedule_recurrent_noop(swh_scheduler, mocker): """When passing no visit types, the recurrent visit scheduler should start.""" spawn_visit_scheduler_thread = mocker.patch( f"{MODULE_NAME}.spawn_visit_scheduler_thread" ) spawn_visit_scheduler_thread.side_effect = SystemExit # The actual scheduling threads won't spawn, they'll immediately terminate. This # only exercises the logic to pull task types out of the database result = invoke(swh_scheduler, False, ["schedule-recurrent"]) assert result.exit_code == 0, result.output def test_recurrent_visit_scheduling( swh_scheduler, caplog, listed_origins_by_type, mocker, ): """Scheduling known tasks is ok.""" caplog.set_level(logging.DEBUG, MODULE_NAME) nb_origins = 1000 mock_celery_app = MagicMock() mock_available_slots = mocker.patch(f"{MODULE_NAME}.get_available_slots") mock_available_slots.return_value = nb_origins # Slots available in queue # Make sure the scheduler is properly configured in terms of visit/task types all_task_types = { task_type_d["type"]: task_type_d for task_type_d in swh_scheduler.get_task_types() } visit_types = list(listed_origins_by_type.keys()) assert len(visit_types) > 0 task_types = [] origins = [] for visit_type, _origins in listed_origins_by_type.items(): origins.extend(swh_scheduler.record_listed_origins(_origins)) task_type_name = f"load-{visit_type}" assert task_type_name in all_task_types.keys() task_type = all_task_types[task_type_name] task_type["visit_type"] = visit_type # we'll limit the orchestrator to the origins' type we know task_types.append(task_type) for visit_type in ["git", "svn"]: task_type = f"load-{visit_type}" send_visits_for_visit_type( swh_scheduler, mock_celery_app, visit_type, all_task_types[task_type] ) assert mock_available_slots.called, "The available slots functions should be called" records = [record.message for record in caplog.records] # Mapping over the dict ratio/policies entries can change overall order so let's # check the set of records expected_records = set() for task_type in task_types: visit_type = task_type["visit_type"] queue_name = task_type["backend_name"] msg = ( f"{nb_origins} available slots for visit type {visit_type} " f"in queue {queue_name}" ) expected_records.add(msg) for expected_record in expected_records: assert expected_record in set(records) +@pytest.mark.parametrize( + "visit_type, tablesamples", + [("hg", {}), ("git", POLICY_ADDITIONAL_PARAMETERS["git"])], +) +def test_recurrent_visit_additional_parameters( + swh_scheduler, mocker, visit_type, tablesamples +): + """Testing additional policy parameters""" + + mock_grab_next_visits = mocker.patch.object(swh_scheduler, "grab_next_visits") + mock_grab_next_visits.return_value = [] + + grab_next_visits_policy_weights(swh_scheduler, visit_type, 10) + + for call in mock_grab_next_visits.call_args_list: + assert call[1].get("tablesample") == tablesamples.get( + call[1]["policy"], {} + ).get("tablesample") + + @pytest.fixture def scheduler_config(swh_scheduler_config): return {"scheduler": {"cls": "local", **swh_scheduler_config}, "celery": {}} def test_visit_scheduler_thread_unknown_task( swh_scheduler, scheduler_config, ): """Starting a thread with unknown task type reports the error""" unknown_visit_type = "unknown" command_queue = Queue() exc_queue = Queue() visit_scheduler_thread( scheduler_config, unknown_visit_type, command_queue, exc_queue ) assert command_queue.empty() is True assert exc_queue.empty() is False assert len(exc_queue.queue) == 1 result = exc_queue.queue.pop() assert result[0] == unknown_visit_type assert isinstance(result[1], ValueError) def test_spawn_visit_scheduler_thread_noop(scheduler_config, visit_types, mocker): """Spawning and terminating threads runs smoothly""" threads: VisitSchedulerThreads = {} exc_queue = Queue() mock_build_app = mocker.patch("swh.scheduler.celery_backend.config.build_app") mock_build_app.return_value = MagicMock() assert len(threads) == 0 for visit_type in visit_types: spawn_visit_scheduler_thread(threads, exc_queue, scheduler_config, visit_type) # This actually only checks the spawning and terminating logic is sound assert len(threads) == len(visit_types) actual_threads = terminate_visit_scheduler_threads(threads) assert not len(actual_threads) assert mock_build_app.called diff --git a/tox.ini b/tox.ini index c68556a..da9a4bc 100644 --- a/tox.ini +++ b/tox.ini @@ -1,80 +1,80 @@ [tox] envlist=black,flake8,mypy,py3 [testenv] extras = testing deps = pytest-cov dev: ipdb setenv = LC_ALL=C.UTF-8 LC_CTYPE=C.UTF-8 LANG=C.UTF-8 commands = pytest --doctest-modules \ !slow: --hypothesis-profile=fast \ slow: --hypothesis-profile=slow \ --cov={envsitepackagesdir}/swh/scheduler \ {envsitepackagesdir}/swh/scheduler \ --cov-branch {posargs} [testenv:black] skip_install = true deps = black==19.10b0 commands = {envpython} -m black --check swh [testenv:flake8] skip_install = true deps = flake8 commands = {envpython} -m flake8 [testenv:mypy] extras = testing deps = - mypy + mypy==0.920 commands = mypy swh # build documentation outside swh-environment using the current # git HEAD of swh-docs, is executed on CI for each diff to prevent # breaking doc build [testenv:sphinx] whitelist_externals = make usedevelop = true extras = testing deps = # fetch and install swh-docs in develop mode -e git+https://forge.softwareheritage.org/source/swh-docs#egg=swh.docs setenv = SWH_PACKAGE_DOC_TOX_BUILD = 1 # turn warnings into errors SPHINXOPTS = -W commands = make -I ../.tox/sphinx/src/swh-docs/swh/ -C docs # build documentation only inside swh-environment using local state # of swh-docs package [testenv:sphinx-dev] whitelist_externals = make usedevelop = true extras = testing deps = # install swh-docs in develop mode -e ../swh-docs setenv = SWH_PACKAGE_DOC_TOX_BUILD = 1 # turn warnings into errors SPHINXOPTS = -W commands = make -I ../.tox/sphinx-dev/src/swh-docs/swh/ -C docs