diff --git a/setup.py b/setup.py
index fe21851..ec39d15 100755
--- a/setup.py
+++ b/setup.py
@@ -1,72 +1,72 @@
#!/usr/bin/env python3
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
-from setuptools import setup, find_packages
-
-from os import path
from io import open
+from os import path
+
+from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
def parse_requirements(name=None):
if name:
reqf = "requirements-%s.txt" % name
else:
reqf = "requirements.txt"
requirements = []
if not path.exists(reqf):
return requirements
with open(reqf) as f:
for line in f.readlines():
line = line.strip()
if not line or line.startswith("#"):
continue
requirements.append(line)
return requirements
setup(
name="swh.scheduler",
description="Software Heritage Scheduler",
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=3.7",
author="Software Heritage developers",
author_email="swh-devel@inria.fr",
url="https://forge.softwareheritage.org/diffusion/DSCH/",
packages=find_packages(),
setup_requires=["setuptools-scm"],
use_scm_version=True,
install_requires=parse_requirements() + parse_requirements("swh"),
extras_require={"testing": parse_requirements("test")},
include_package_data=True,
entry_points="""
[console_scripts]
swh-scheduler=swh.scheduler.cli:main
[swh.cli.subcommands]
scheduler=swh.scheduler.cli:cli
""",
classifiers=[
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
],
project_urls={
"Bug Reports": "https://forge.softwareheritage.org/maniphest",
"Funding": "https://www.softwareheritage.org/donate",
"Source": "https://forge.softwareheritage.org/source/swh-scheduler",
"Documentation": "https://docs.softwareheritage.org/devel/swh-scheduler/",
},
)
diff --git a/swh/scheduler/__init__.py b/swh/scheduler/__init__.py
index a0b72f2..2d3892f 100644
--- a/swh/scheduler/__init__.py
+++ b/swh/scheduler/__init__.py
@@ -1,68 +1,67 @@
# Copyright (C) 2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import Any, Dict
-
# Percentage of tasks with priority to schedule
PRIORITY_SLOT = 0.6
DEFAULT_CONFIG = {
"scheduler": (
"dict",
{"cls": "local", "args": {"db": "dbname=softwareheritage-scheduler-dev",},},
)
}
# current configuration. To be set by the config loading mechanism
CONFIG = {} # type: Dict[str, Any]
def compute_nb_tasks_from(num_tasks):
"""Compute and returns the tuple, number of tasks without priority,
number of tasks with priority.
Args:
num_tasks (int):
Returns:
tuple number of tasks without priority (int), number of tasks with
priority (int)
"""
if not num_tasks:
return None, None
return (int((1 - PRIORITY_SLOT) * num_tasks), int(PRIORITY_SLOT * num_tasks))
def get_scheduler(cls, args={}):
"""
Get a scheduler object of class `scheduler_class` with arguments
`scheduler_args`.
Args:
scheduler (dict): dictionary with keys:
cls (str): scheduler's class, either 'local' or 'remote'
args (dict): dictionary with keys, default to empty.
Returns:
an instance of swh.scheduler, either local or remote:
local: swh.scheduler.backend.SchedulerBackend
remote: swh.scheduler.api.client.RemoteScheduler
Raises:
ValueError if passed an unknown storage class.
"""
if cls == "remote":
from .api.client import RemoteScheduler as SchedulerBackend
elif cls == "local":
from .backend import SchedulerBackend
else:
raise ValueError("Unknown swh.scheduler class `%s`" % cls)
return SchedulerBackend(**args)
diff --git a/swh/scheduler/api/client.py b/swh/scheduler/api/client.py
index 15d8e54..8a36d05 100644
--- a/swh/scheduler/api/client.py
+++ b/swh/scheduler/api/client.py
@@ -1,24 +1,24 @@
# Copyright (C) 2018-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from swh.core.api import RPCClient
-from .serializers import ENCODERS, DECODERS
from .. import exc
from ..interface import SchedulerInterface
+from .serializers import DECODERS, ENCODERS
class RemoteScheduler(RPCClient):
"""Proxy to a remote scheduler API
"""
backend_class = SchedulerInterface
reraise_exceptions = [getattr(exc, a) for a in exc.__all__]
extra_type_decoders = DECODERS
extra_type_encoders = ENCODERS
diff --git a/swh/scheduler/api/server.py b/swh/scheduler/api/server.py
index 7a9d8f8..3a6c173 100644
--- a/swh/scheduler/api/server.py
+++ b/swh/scheduler/api/server.py
@@ -1,155 +1,154 @@
# Copyright (C) 2018-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import logging
import os
from swh.core import config
from swh.core.api import JSONFormatter, MsgpackFormatter, RPCServerApp
from swh.core.api import encode_data_server as encode_data
from swh.core.api import error_handler, negotiate
-
from swh.scheduler import get_scheduler
from swh.scheduler.exc import SchedulerException
from swh.scheduler.interface import SchedulerInterface
-from .serializers import ENCODERS, DECODERS
+from .serializers import DECODERS, ENCODERS
scheduler = None
def get_global_scheduler():
global scheduler
if not scheduler:
scheduler = get_scheduler(**app.config["scheduler"])
return scheduler
class SchedulerServerApp(RPCServerApp):
extra_type_decoders = DECODERS
extra_type_encoders = ENCODERS
app = SchedulerServerApp(
__name__, backend_class=SchedulerInterface, backend_factory=get_global_scheduler
)
@app.errorhandler(SchedulerException)
def argument_error_handler(exception):
return error_handler(exception, encode_data, status_code=400)
@app.errorhandler(Exception)
def my_error_handler(exception):
return error_handler(exception, encode_data)
def has_no_empty_params(rule):
return len(rule.defaults or ()) >= len(rule.arguments or ())
@app.route("/")
def index():
return """
Software Heritage scheduler RPC server
You have reached the
Software Heritage
scheduler RPC server.
See its
documentation
and API for more information
"""
@app.route("/site-map")
@negotiate(MsgpackFormatter)
@negotiate(JSONFormatter)
def site_map():
links = []
for rule in app.url_map.iter_rules():
if has_no_empty_params(rule) and hasattr(SchedulerInterface, rule.endpoint):
links.append(
dict(
rule=rule.rule,
description=getattr(SchedulerInterface, rule.endpoint).__doc__,
)
)
# links is now a list of url, endpoint tuples
return links
def load_and_check_config(config_file, type="local"):
"""Check the minimal configuration is set to run the api or raise an
error explanation.
Args:
config_file (str): Path to the configuration file to load
type (str): configuration type. For 'local' type, more
checks are done.
Raises:
Error if the setup is not as expected
Returns:
configuration as a dict
"""
if not config_file:
raise EnvironmentError("Configuration file must be defined")
if not os.path.exists(config_file):
raise FileNotFoundError("Configuration file %s does not exist" % (config_file,))
cfg = config.read(config_file)
vcfg = cfg.get("scheduler")
if not vcfg:
raise KeyError("Missing '%scheduler' configuration")
if type == "local":
cls = vcfg.get("cls")
if cls != "local":
raise ValueError(
"The scheduler backend can only be started with a 'local' "
"configuration"
)
args = vcfg.get("args")
if not args:
raise KeyError("Invalid configuration; missing 'args' config entry")
db = args.get("db")
if not db:
raise KeyError("Invalid configuration; missing 'db' config entry")
return cfg
api_cfg = None
def make_app_from_configfile():
"""Run the WSGI app from the webserver, loading the configuration from
a configuration file.
SWH_CONFIG_FILENAME environment variable defines the
configuration path to load.
"""
global api_cfg
if not api_cfg:
config_file = os.environ.get("SWH_CONFIG_FILENAME")
api_cfg = load_and_check_config(config_file)
app.config.update(api_cfg)
handler = logging.StreamHandler()
app.logger.addHandler(handler)
return app
if __name__ == "__main__":
print('Please use the "swh-scheduler api-server" command')
diff --git a/swh/scheduler/backend.py b/swh/scheduler/backend.py
index 84cda7c..5160269 100644
--- a/swh/scheduler/backend.py
+++ b/swh/scheduler/backend.py
@@ -1,734 +1,733 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import json
import logging
+from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from uuid import UUID
from arrow import Arrow, utcnow
import attr
-import psycopg2.pool
-import psycopg2.extras
-
-from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from psycopg2.extensions import AsIs
+import psycopg2.extras
+import psycopg2.pool
from swh.core.db import BaseDb
from swh.core.db.common import db_transaction
from .exc import StaleData
from .model import (
- Lister,
ListedOrigin,
ListedOriginPageToken,
+ Lister,
PaginatedListedOriginList,
)
logger = logging.getLogger(__name__)
def adapt_arrow(arrow):
return AsIs("'%s'::timestamptz" % arrow.isoformat())
psycopg2.extensions.register_adapter(dict, psycopg2.extras.Json)
psycopg2.extensions.register_adapter(Arrow, adapt_arrow)
psycopg2.extras.register_uuid()
def format_query(query, keys):
"""Format a query with the given keys"""
query_keys = ", ".join(keys)
placeholders = ", ".join(["%s"] * len(keys))
return query.format(keys=query_keys, placeholders=placeholders)
class SchedulerBackend:
"""Backend for the Software Heritage scheduling database.
"""
def __init__(self, db, min_pool_conns=1, max_pool_conns=10):
"""
Args:
db_conn: either a libpq connection string, or a psycopg2 connection
"""
if isinstance(db, psycopg2.extensions.connection):
self._pool = None
self._db = BaseDb(db)
else:
self._pool = psycopg2.pool.ThreadedConnectionPool(
min_pool_conns,
max_pool_conns,
db,
cursor_factory=psycopg2.extras.RealDictCursor,
)
self._db = None
def get_db(self):
if self._db:
return self._db
return BaseDb.from_pool(self._pool)
def put_db(self, db):
if db is not self._db:
db.put_conn()
task_type_keys = [
"type",
"description",
"backend_name",
"default_interval",
"min_interval",
"max_interval",
"backoff_factor",
"max_queue_length",
"num_retries",
"retry_delay",
]
@db_transaction()
def create_task_type(self, task_type, db=None, cur=None):
"""Create a new task type ready for scheduling.
Args:
task_type (dict): a dictionary with the following keys:
- type (str): an identifier for the task type
- description (str): a human-readable description of what the
task does
- backend_name (str): the name of the task in the
job-scheduling backend
- default_interval (datetime.timedelta): the default interval
between two task runs
- min_interval (datetime.timedelta): the minimum interval
between two task runs
- max_interval (datetime.timedelta): the maximum interval
between two task runs
- backoff_factor (float): the factor by which the interval
changes at each run
- max_queue_length (int): the maximum length of the task queue
for this task type
"""
keys = [key for key in self.task_type_keys if key in task_type]
query = format_query(
"""insert into task_type ({keys}) values ({placeholders})
on conflict do nothing""",
keys,
)
cur.execute(query, [task_type[key] for key in keys])
@db_transaction()
def get_task_type(self, task_type_name, db=None, cur=None):
"""Retrieve the task type with id task_type_name"""
query = format_query(
"select {keys} from task_type where type=%s", self.task_type_keys,
)
cur.execute(query, (task_type_name,))
return cur.fetchone()
@db_transaction()
def get_task_types(self, db=None, cur=None):
"""Retrieve all registered task types"""
query = format_query("select {keys} from task_type", self.task_type_keys,)
cur.execute(query)
return cur.fetchall()
@db_transaction()
def get_or_create_lister(
self, name: str, instance_name: Optional[str] = None, db=None, cur=None
) -> Lister:
"""Retrieve information about the given instance of the lister from the
database, or create the entry if it did not exist.
"""
if instance_name is None:
instance_name = ""
select_cols = ", ".join(Lister.select_columns())
insert_cols, insert_meta = (
", ".join(tup) for tup in Lister.insert_columns_and_metavars()
)
query = f"""
with added as (
insert into listers ({insert_cols}) values ({insert_meta})
on conflict do nothing
returning {select_cols}
)
select {select_cols} from added
union all
select {select_cols} from listers
where (name, instance_name) = (%(name)s, %(instance_name)s);
"""
cur.execute(query, attr.asdict(Lister(name=name, instance_name=instance_name)))
return Lister(**cur.fetchone())
@db_transaction()
def update_lister(self, lister: Lister, db=None, cur=None) -> Lister:
"""Update the state for the given lister instance in the database.
Returns:
a new Lister object, with all fields updated from the database
Raises:
StaleData if the `updated` timestamp for the lister instance in
database doesn't match the one passed by the user.
"""
select_cols = ", ".join(Lister.select_columns())
set_vars = ", ".join(
f"{col} = {meta}"
for col, meta in zip(*Lister.insert_columns_and_metavars())
)
query = f"""update listers
set {set_vars}
where id=%(id)s and updated=%(updated)s
returning {select_cols}"""
cur.execute(query, attr.asdict(lister))
updated = cur.fetchone()
if not updated:
raise StaleData("Stale data; Lister state not updated")
return Lister(**updated)
@db_transaction()
def record_listed_origins(
self, listed_origins: Iterable[ListedOrigin], db=None, cur=None
) -> List[ListedOrigin]:
"""Record a set of origins that a lister has listed.
This performs an "upsert": origins with the same (lister_id, url,
visit_type) values are updated with new values for
extra_loader_arguments, last_update and last_seen.
"""
pk_cols = ListedOrigin.primary_key_columns()
select_cols = ListedOrigin.select_columns()
insert_cols, insert_meta = ListedOrigin.insert_columns_and_metavars()
upsert_cols = [col for col in insert_cols if col not in pk_cols]
upsert_set = ", ".join(f"{col} = EXCLUDED.{col}" for col in upsert_cols)
query = f"""INSERT into listed_origins ({", ".join(insert_cols)})
VALUES %s
ON CONFLICT ({", ".join(pk_cols)}) DO UPDATE
SET {upsert_set}
RETURNING {", ".join(select_cols)}
"""
ret = psycopg2.extras.execute_values(
cur=cur,
sql=query,
argslist=(attr.asdict(origin) for origin in listed_origins),
template=f"({', '.join(insert_meta)})",
page_size=1000,
fetch=True,
)
return [ListedOrigin(**d) for d in ret]
@db_transaction()
def get_listed_origins(
self,
lister_id: Optional[UUID] = None,
url: Optional[str] = None,
limit: int = 1000,
page_token: Optional[ListedOriginPageToken] = None,
db=None,
cur=None,
) -> PaginatedListedOriginList:
"""Get information on the listed origins matching either the `url` or
`lister_id`, or both arguments.
"""
query_filters: List[str] = []
query_params: List[Union[int, str, UUID, Tuple[UUID, str]]] = []
if lister_id:
query_filters.append("lister_id = %s")
query_params.append(lister_id)
if url is not None:
query_filters.append("url = %s")
query_params.append(url)
if page_token is not None:
query_filters.append("(lister_id, url) > %s")
# the typeshed annotation for tuple() is too strict.
query_params.append(tuple(page_token)) # type: ignore
query_params.append(limit)
select_cols = ", ".join(ListedOrigin.select_columns())
if query_filters:
where_clause = "where %s" % (" and ".join(query_filters))
else:
where_clause = ""
query = f"""SELECT {select_cols}
from listed_origins
{where_clause}
ORDER BY lister_id, url
LIMIT %s"""
cur.execute(query, tuple(query_params))
origins = [ListedOrigin(**d) for d in cur]
if len(origins) == limit:
page_token = (origins[-1].lister_id, origins[-1].url)
else:
page_token = None
return PaginatedListedOriginList(origins, page_token)
task_create_keys = [
"type",
"arguments",
"next_run",
"policy",
"status",
"retries_left",
"priority",
]
task_keys = task_create_keys + ["id", "current_interval"]
@db_transaction()
def create_tasks(self, tasks, policy="recurring", db=None, cur=None):
"""Create new tasks.
Args:
tasks (list): each task is a dictionary with the following keys:
- type (str): the task type
- arguments (dict): the arguments for the task runner, keys:
- args (list of str): arguments
- kwargs (dict str -> str): keyword arguments
- next_run (datetime.datetime): the next scheduled run for the
task
Returns:
a list of created tasks.
"""
cur.execute("select swh_scheduler_mktemp_task()")
db.copy_to(
tasks,
"tmp_task",
self.task_create_keys,
default_values={"policy": policy, "status": "next_run_not_scheduled"},
cur=cur,
)
query = format_query(
"select {keys} from swh_scheduler_create_tasks_from_temp()", self.task_keys,
)
cur.execute(query)
return cur.fetchall()
@db_transaction()
def set_status_tasks(
self, task_ids, status="disabled", next_run=None, db=None, cur=None
):
"""Set the tasks' status whose ids are listed.
If given, also set the next_run date.
"""
if not task_ids:
return
query = ["UPDATE task SET status = %s"]
args = [status]
if next_run:
query.append(", next_run = %s")
args.append(next_run)
query.append(" WHERE id IN %s")
args.append(tuple(task_ids))
cur.execute("".join(query), args)
@db_transaction()
def disable_tasks(self, task_ids, db=None, cur=None):
"""Disable the tasks whose ids are listed."""
return self.set_status_tasks(task_ids, db=db, cur=cur)
@db_transaction()
def search_tasks(
self,
task_id=None,
task_type=None,
status=None,
priority=None,
policy=None,
before=None,
after=None,
limit=None,
db=None,
cur=None,
):
"""Search tasks from selected criterions"""
where = []
args = []
if task_id:
if isinstance(task_id, (str, int)):
where.append("id = %s")
else:
where.append("id in %s")
task_id = tuple(task_id)
args.append(task_id)
if task_type:
if isinstance(task_type, str):
where.append("type = %s")
else:
where.append("type in %s")
task_type = tuple(task_type)
args.append(task_type)
if status:
if isinstance(status, str):
where.append("status = %s")
else:
where.append("status in %s")
status = tuple(status)
args.append(status)
if priority:
if isinstance(priority, str):
where.append("priority = %s")
else:
priority = tuple(priority)
where.append("priority in %s")
args.append(priority)
if policy:
where.append("policy = %s")
args.append(policy)
if before:
where.append("next_run <= %s")
args.append(before)
if after:
where.append("next_run >= %s")
args.append(after)
query = "select * from task"
if where:
query += " where " + " and ".join(where)
if limit:
query += " limit %s :: bigint"
args.append(limit)
cur.execute(query, args)
return cur.fetchall()
@db_transaction()
def get_tasks(self, task_ids, db=None, cur=None):
"""Retrieve the info of tasks whose ids are listed."""
query = format_query("select {keys} from task where id in %s", self.task_keys)
cur.execute(query, (tuple(task_ids),))
return cur.fetchall()
@db_transaction()
def peek_ready_tasks(
self,
task_type,
timestamp=None,
num_tasks=None,
num_tasks_priority=None,
db=None,
cur=None,
):
"""Fetch the list of ready tasks
Args:
task_type (str): filtering task per their type
timestamp (datetime.datetime): peek tasks that need to be executed
before that timestamp
num_tasks (int): only peek at num_tasks tasks (with no priority)
num_tasks_priority (int): only peek at num_tasks_priority
tasks (with priority)
Returns:
a list of tasks
"""
if timestamp is None:
timestamp = utcnow()
cur.execute(
"""select * from swh_scheduler_peek_ready_tasks(
%s, %s, %s :: bigint, %s :: bigint)""",
(task_type, timestamp, num_tasks, num_tasks_priority),
)
logger.debug("PEEK %s => %s" % (task_type, cur.rowcount))
return cur.fetchall()
@db_transaction()
def grab_ready_tasks(
self,
task_type,
timestamp=None,
num_tasks=None,
num_tasks_priority=None,
db=None,
cur=None,
):
"""Fetch the list of ready tasks, and mark them as scheduled
Args:
task_type (str): filtering task per their type
timestamp (datetime.datetime): grab tasks that need to be executed
before that timestamp
num_tasks (int): only grab num_tasks tasks (with no priority)
num_tasks_priority (int): only grab oneshot num_tasks tasks (with
priorities)
Returns:
a list of tasks
"""
if timestamp is None:
timestamp = utcnow()
cur.execute(
"""select * from swh_scheduler_grab_ready_tasks(
%s, %s, %s :: bigint, %s :: bigint)""",
(task_type, timestamp, num_tasks, num_tasks_priority),
)
logger.debug("GRAB %s => %s" % (task_type, cur.rowcount))
return cur.fetchall()
task_run_create_keys = ["task", "backend_id", "scheduled", "metadata"]
@db_transaction()
def schedule_task_run(
self, task_id, backend_id, metadata=None, timestamp=None, db=None, cur=None
):
"""Mark a given task as scheduled, adding a task_run entry in the database.
Args:
task_id (int): the identifier for the task being scheduled
backend_id (str): the identifier of the job in the backend
metadata (dict): metadata to add to the task_run entry
timestamp (datetime.datetime): the instant the event occurred
Returns:
a fresh task_run entry
"""
if metadata is None:
metadata = {}
if timestamp is None:
timestamp = utcnow()
cur.execute(
"select * from swh_scheduler_schedule_task_run(%s, %s, %s, %s)",
(task_id, backend_id, metadata, timestamp),
)
return cur.fetchone()
@db_transaction()
def mass_schedule_task_runs(self, task_runs, db=None, cur=None):
"""Schedule a bunch of task runs.
Args:
task_runs (list): a list of dicts with keys:
- task (int): the identifier for the task being scheduled
- backend_id (str): the identifier of the job in the backend
- metadata (dict): metadata to add to the task_run entry
- scheduled (datetime.datetime): the instant the event occurred
Returns:
None
"""
cur.execute("select swh_scheduler_mktemp_task_run()")
db.copy_to(task_runs, "tmp_task_run", self.task_run_create_keys, cur=cur)
cur.execute("select swh_scheduler_schedule_task_run_from_temp()")
@db_transaction()
def start_task_run(
self, backend_id, metadata=None, timestamp=None, db=None, cur=None
):
"""Mark a given task as started, updating the corresponding task_run
entry in the database.
Args:
backend_id (str): the identifier of the job in the backend
metadata (dict): metadata to add to the task_run entry
timestamp (datetime.datetime): the instant the event occurred
Returns:
the updated task_run entry
"""
if metadata is None:
metadata = {}
if timestamp is None:
timestamp = utcnow()
cur.execute(
"select * from swh_scheduler_start_task_run(%s, %s, %s)",
(backend_id, metadata, timestamp),
)
return cur.fetchone()
@db_transaction()
def end_task_run(
self,
backend_id,
status,
metadata=None,
timestamp=None,
result=None,
db=None,
cur=None,
):
"""Mark a given task as ended, updating the corresponding task_run entry in the
database.
Args:
backend_id (str): the identifier of the job in the backend
status (str): how the task ended; one of: 'eventful', 'uneventful',
'failed'
metadata (dict): metadata to add to the task_run entry
timestamp (datetime.datetime): the instant the event occurred
Returns:
the updated task_run entry
"""
if metadata is None:
metadata = {}
if timestamp is None:
timestamp = utcnow()
cur.execute(
"select * from swh_scheduler_end_task_run(%s, %s, %s, %s)",
(backend_id, status, metadata, timestamp),
)
return cur.fetchone()
@db_transaction()
def filter_task_to_archive(
self,
after_ts: str,
before_ts: str,
limit: int = 10,
page_token: Optional[str] = None,
db=None,
cur=None,
) -> Dict[str, Any]:
"""Compute the tasks to archive within the datetime interval
[after_ts, before_ts[. The method returns a paginated result.
Returns:
dict with the following keys:
- **next_page_token**: opaque token to be used as
`page_token` to retrieve the next page of result. If absent,
there is no more pages to gather.
- **tasks**: list of task dictionaries with the following keys:
**id** (str): origin task id
**started** (Optional[datetime]): started date
**scheduled** (datetime): scheduled date
**arguments** (json dict): task's arguments
...
"""
assert not page_token or isinstance(page_token, str)
last_id = -1 if page_token is None else int(page_token)
tasks = []
cur.execute(
"select * from swh_scheduler_task_to_archive(%s, %s, %s, %s)",
(after_ts, before_ts, last_id, limit + 1),
)
for row in cur:
task = dict(row)
# nested type index does not accept bare values
# transform it as a dict to comply with this
task["arguments"]["args"] = {
i: v for i, v in enumerate(task["arguments"]["args"])
}
kwargs = task["arguments"]["kwargs"]
task["arguments"]["kwargs"] = json.dumps(kwargs)
tasks.append(task)
if len(tasks) >= limit + 1: # remains data, add pagination information
result = {
"tasks": tasks[:limit],
"next_page_token": str(tasks[-1]["task_id"]),
}
else:
result = {"tasks": tasks}
return result
@db_transaction()
def delete_archived_tasks(self, task_ids, db=None, cur=None):
"""Delete archived tasks as much as possible. Only the task_ids whose
complete associated task_run have been cleaned up will be.
"""
_task_ids = _task_run_ids = []
for task_id in task_ids:
_task_ids.append(task_id["task_id"])
_task_run_ids.append(task_id["task_run_id"])
cur.execute(
"select * from swh_scheduler_delete_archived_tasks(%s, %s)",
(_task_ids, _task_run_ids),
)
task_run_keys = [
"id",
"task",
"backend_id",
"scheduled",
"started",
"ended",
"metadata",
"status",
]
@db_transaction()
def get_task_runs(self, task_ids, limit=None, db=None, cur=None):
"""Search task run for a task id"""
where = []
args = []
if task_ids:
if isinstance(task_ids, (str, int)):
where.append("task = %s")
else:
where.append("task in %s")
task_ids = tuple(task_ids)
args.append(task_ids)
else:
return ()
query = "select * from task_run where " + " and ".join(where)
if limit:
query += " limit %s :: bigint"
args.append(limit)
cur.execute(query, args)
return cur.fetchall()
@db_transaction()
def get_priority_ratios(self, db=None, cur=None):
cur.execute("select id, ratio from priority_ratio")
return {row["id"]: row["ratio"] for row in cur.fetchall()}
diff --git a/swh/scheduler/backend_es.py b/swh/scheduler/backend_es.py
index ad7b6a3..e1b949c 100644
--- a/swh/scheduler/backend_es.py
+++ b/swh/scheduler/backend_es.py
@@ -1,268 +1,269 @@
# Copyright (C) 2018-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
"""Elastic Search backend
"""
+from copy import deepcopy
import datetime # noqa
import logging
-
-from copy import deepcopy
from typing import Any, Dict
from elasticsearch import helpers
from swh.core import utils
logger = logging.getLogger(__name__)
DEFAULT_CONFIG = {
"elasticsearch": {
"cls": "local",
"args": {
"index_name_prefix": "swh-tasks",
"storage_nodes": ["localhost:9200"],
"client_options": {
"sniff_on_start": False,
"sniff_on_connection_fail": True,
"http_compress": False,
"sniffer_timeout": 60,
},
},
}
}
def get_elasticsearch(cls: str, args: Dict[str, Any] = {}):
"""Instantiate an elastic search instance
"""
if cls == "local":
from elasticsearch import Elasticsearch
elif cls == "memory":
- from .elasticsearch_memory import MemoryElasticsearch as Elasticsearch # type: ignore # noqa
+ from .elasticsearch_memory import ( # type: ignore # noqa
+ MemoryElasticsearch as Elasticsearch,
+ )
else:
raise ValueError("Unknown elasticsearch class `%s`" % cls)
return Elasticsearch(**args)
class ElasticSearchBackend:
"""ElasticSearch backend to index tasks
This uses an elasticsearch client to actually discuss with the
elasticsearch instance.
"""
def __init__(self, **config):
self.config = deepcopy(DEFAULT_CONFIG)
self.config.update(config)
es_conf = self.config["elasticsearch"]
args = deepcopy(es_conf["args"])
self.index_name_prefix = args.pop("index_name_prefix")
self.storage = get_elasticsearch(
cls=es_conf["cls"],
args={
"hosts": args.get("storage_nodes", []),
**args.get("client_options", {}),
},
)
# document's index type (cf. /data/elastic-template.json)
self.doc_type = "task"
def initialize(self):
self.storage.indices.put_mapping(
index=f"{self.index_name_prefix}-*",
doc_type=self.doc_type,
# to allow type definition below
include_type_name=True,
# to allow install mapping even if no index yet
allow_no_indices=True,
body={
"properties": {
"task_id": {"type": "double"},
"task_policy": {"type": "text"},
"task_status": {"type": "text"},
"task_run_id": {"type": "double"},
"arguments": {
"type": "object",
"properties": {
"args": {"type": "nested", "dynamic": False},
"kwargs": {"type": "text"},
},
},
"type": {"type": "text"},
"backend_id": {"type": "text"},
"metadata": {"type": "object", "enabled": False},
"scheduled": {
"type": "date",
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis", # noqa
},
"started": {
"type": "date",
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis", # noqa
},
"ended": {
"type": "date",
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||strict_date_optional_time||epoch_millis", # noqa
},
}
},
)
self.storage.indices.put_settings(
index=f"{self.index_name_prefix}-*",
allow_no_indices=True,
body={
"index": {
"codec": "best_compression",
"refresh_interval": "1s",
"number_of_shards": 1,
}
},
)
def create(self, index_name) -> None:
"""Create and initialize index_name with mapping for all indices
matching `swh-tasks-` pattern
"""
assert index_name.startswith(self.index_name_prefix)
self.storage.indices.create(index_name)
def compute_index_name(self, year, month):
"""Given a year, month, compute the index's name.
"""
return "%s-%s-%s" % (self.index_name_prefix, year, "%02d" % month)
def mget(self, index_name, doc_ids, chunk_size=500, source=True):
"""Retrieve document's full content according to their ids as per
source's setup.
The `source` allows to retrieve only what's interesting, e.g:
- source=True ; gives back the original indexed data
- source=False ; returns without the original _source field
- source=['task_id'] ; returns only task_id in the _source field
Args:
index_name (str): Name of the concerned index.
doc_ids (generator): Generator of ids to retrieve
chunk_size (int): Number of documents chunk to send for retrieval
source (bool/[str]): Source of information to return
Yields:
document indexed as per source's setup
"""
if isinstance(source, list):
source = {"_source": ",".join(source)}
else:
source = {"_source": str(source).lower()}
for ids in utils.grouper(doc_ids, n=1000):
res = self.storage.mget(
body={"ids": list(ids)},
index=index_name,
doc_type=self.doc_type,
params=source,
)
if not res:
logger.error("Error during retrieval of data, skipping!")
continue
for doc in res["docs"]:
found = doc.get("found")
if not found:
msg = "Doc id %s not found, not indexed yet" % doc["_id"]
logger.warning(msg)
continue
yield doc["_source"]
def _streaming_bulk(self, index_name, doc_stream, chunk_size=500):
"""Bulk index data and returns the successful indexed data's
identifier.
Args:
index_name (str): Name of the concerned index.
doc_stream (generator): Generator of documents to index
chunk_size (int): Number of documents chunk to send for indexation
Yields:
document id indexed
"""
actions = (
{
"_index": index_name,
"_op_type": "index",
"_type": self.doc_type,
"_source": data,
}
for data in doc_stream
)
for ok, result in helpers.streaming_bulk(
client=self.storage,
actions=actions,
chunk_size=chunk_size,
raise_on_error=False,
raise_on_exception=False,
):
if not ok:
logger.error("Error during %s indexation. Skipping.", result)
continue
yield result["index"]["_id"]
def is_index_opened(self, index_name: str) -> bool:
"""Determine if an index is opened or not
"""
try:
self.storage.indices.stats(index_name)
return True
except Exception:
# fails when indice is closed (no other api call found)
return False
def streaming_bulk(self, index_name, doc_stream, chunk_size=500, source=True):
"""Bulk index data and returns the successful indexed data as per
source's setup.
the `source` permits to retrieve only what's of interest to
us, e.g:
- source=True ; gives back the original indexed data
- source=False ; returns without the original _source field
- source=['task_id'] ; returns only task_id in the _source field
Note that:
- if the index is closed, it will be opened
- if the index does not exist, it will be created and opened
This keeps the index opened for performance reasons.
Args:
index_name (str): Name of the concerned index.
doc_stream (generator): Document generator to index
chunk_size (int): Number of documents chunk to send
source (bool, [str]): the information to return
"""
# index must exist
if not self.storage.indices.exists(index_name):
self.create(index_name)
# index must be opened
if not self.is_index_opened(index_name):
self.storage.indices.open(index_name)
indexed_ids = self._streaming_bulk(
index_name, doc_stream, chunk_size=chunk_size
)
yield from self.mget(
index_name, indexed_ids, chunk_size=chunk_size, source=source
)
diff --git a/swh/scheduler/celery_backend/config.py b/swh/scheduler/celery_backend/config.py
index 58a57d8..de3c2ee 100644
--- a/swh/scheduler/celery_backend/config.py
+++ b/swh/scheduler/celery_backend/config.py
@@ -1,345 +1,342 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import functools
import logging
import os
-import pkg_resources
import traceback
from typing import Any, Dict
import urllib.parse
from celery import Celery
-from celery.signals import setup_logging, celeryd_after_setup, worker_init
+from celery.signals import celeryd_after_setup, setup_logging, worker_init
from celery.utils.log import ColorFormatter
from celery.worker.control import Panel
-
from kombu import Exchange, Queue
from kombu.five import monotonic as _monotonic
-
+import pkg_resources
import requests
-from swh.scheduler import CONFIG as SWH_CONFIG
-
from swh.core.config import load_named_config, merge_configs
from swh.core.sentry import init_sentry
+from swh.scheduler import CONFIG as SWH_CONFIG
try:
from swh.core.logger import JournalHandler
except ImportError:
JournalHandler = None # type: ignore
DEFAULT_CONFIG_NAME = "worker"
CONFIG_NAME_ENVVAR = "SWH_WORKER_INSTANCE"
CONFIG_NAME_TEMPLATE = "worker/%s"
DEFAULT_CONFIG = {
"task_broker": ("str", "amqp://guest@localhost//"),
"task_modules": ("list[str]", []),
"task_queues": ("list[str]", []),
"task_soft_time_limit": ("int", 0),
}
logger = logging.getLogger(__name__)
# Celery eats tracebacks in signal callbacks, this decorator catches
# and prints them.
# Also tries to notify Sentry if possible.
def _print_errors(f):
@functools.wraps(f)
def newf(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as exc:
traceback.print_exc()
try:
import sentry_sdk
sentry_sdk.capture_exception(exc)
except Exception:
traceback.print_exc()
return newf
@setup_logging.connect
@_print_errors
def setup_log_handler(
loglevel=None,
logfile=None,
format=None,
colorize=None,
log_console=None,
log_journal=None,
**kwargs,
):
"""Setup logging according to Software Heritage preferences.
We use the command-line loglevel for tasks only, as we never
really care about the debug messages from celery.
"""
if loglevel is None:
loglevel = logging.DEBUG
if isinstance(loglevel, str):
loglevel = logging._nameToLevel[loglevel]
formatter = logging.Formatter(format)
root_logger = logging.getLogger("")
root_logger.setLevel(logging.INFO)
log_target = os.environ.get("SWH_LOG_TARGET", "console")
if log_target == "console":
log_console = True
elif log_target == "journal":
log_journal = True
# this looks for log levels *higher* than DEBUG
if loglevel <= logging.DEBUG and log_console is None:
log_console = True
if log_console:
color_formatter = ColorFormatter(format) if colorize else formatter
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(color_formatter)
root_logger.addHandler(console)
if log_journal:
if not JournalHandler:
root_logger.warning(
"JournalHandler is not available, skipping. "
"Please install swh-core[logging]."
)
else:
systemd_journal = JournalHandler()
systemd_journal.setLevel(logging.DEBUG)
systemd_journal.setFormatter(formatter)
root_logger.addHandler(systemd_journal)
logging.getLogger("celery").setLevel(logging.INFO)
# Silence amqp heartbeat_tick messages
logger = logging.getLogger("amqp")
logger.addFilter(lambda record: not record.msg.startswith("heartbeat_tick"))
logger.setLevel(loglevel)
# Silence useless "Starting new HTTP connection" messages
logging.getLogger("urllib3").setLevel(logging.WARNING)
# Completely disable azure logspam
azure_logger = logging.getLogger("azure.core.pipeline.policies.http_logging_policy")
azure_logger.setLevel(logging.WARNING)
logging.getLogger("swh").setLevel(loglevel)
# get_task_logger makes the swh tasks loggers children of celery.task
logging.getLogger("celery.task").setLevel(loglevel)
return loglevel
@celeryd_after_setup.connect
@_print_errors
def setup_queues_and_tasks(sender, instance, **kwargs):
"""Signal called on worker start.
This automatically registers swh.scheduler.task.Task subclasses as
available celery tasks.
This also subscribes the worker to the "implicit" per-task queues defined
for these task classes.
"""
logger.info("Setup Queues & Tasks for %s", sender)
instance.app.conf["worker_name"] = sender
@worker_init.connect
@_print_errors
def on_worker_init(*args, **kwargs):
try:
from sentry_sdk.integrations.celery import CeleryIntegration
except ImportError:
integrations = []
else:
integrations = [CeleryIntegration()]
sentry_dsn = None # will be set in `init_sentry` function
init_sentry(sentry_dsn, integrations=integrations)
@Panel.register
def monotonic(state):
"""Get the current value for the monotonic clock"""
return {"monotonic": _monotonic()}
def route_for_task(name, args, kwargs, options, task=None, **kw):
"""Route tasks according to the task_queue attribute in the task class"""
if name is not None and name.startswith("swh."):
return {"queue": name}
def get_queue_stats(app, queue_name):
"""Get the statistics regarding a queue on the broker.
Arguments:
queue_name: name of the queue to check
Returns a dictionary raw from the RabbitMQ management API;
or `None` if the current configuration does not use RabbitMQ.
Interesting keys:
- Consumers (number of consumers for the queue)
- messages (number of messages in queue)
- messages_unacknowledged (number of messages currently being
processed)
Documentation: https://www.rabbitmq.com/management.html#http-api
"""
conn_info = app.connection().info()
if conn_info["transport"] == "memory":
# We're running in a test environment, without RabbitMQ.
return None
url = "http://{hostname}:{port}/api/queues/{vhost}/{queue}".format(
hostname=conn_info["hostname"],
port=conn_info["port"] + 10000,
vhost=urllib.parse.quote(conn_info["virtual_host"], safe=""),
queue=urllib.parse.quote(queue_name, safe=""),
)
credentials = (conn_info["userid"], conn_info["password"])
r = requests.get(url, auth=credentials)
if r.status_code == 404:
return {}
if r.status_code != 200:
raise ValueError(
"Got error %s when reading queue stats: %s" % (r.status_code, r.json())
)
return r.json()
def get_queue_length(app, queue_name):
"""Shortcut to get a queue's length"""
stats = get_queue_stats(app, queue_name)
if stats:
return stats.get("messages")
def register_task_class(app, name, cls):
"""Register a class-based task under the given name"""
if name in app.tasks:
return
task_instance = cls()
task_instance.name = name
app.register_task(task_instance)
INSTANCE_NAME = os.environ.get(CONFIG_NAME_ENVVAR)
CONFIG_NAME = os.environ.get("SWH_CONFIG_FILENAME")
CONFIG = {} # type: Dict[str, Any]
if CONFIG_NAME:
# load the celery config from the main config file given as
# SWH_CONFIG_FILENAME environment variable.
# This is expected to have a [celery] section in which we have the
# celery specific configuration.
SWH_CONFIG.clear()
SWH_CONFIG.update(load_named_config(CONFIG_NAME))
CONFIG = SWH_CONFIG.get("celery", {})
if not CONFIG:
# otherwise, back to compat config loading mechanism
if INSTANCE_NAME:
CONFIG_NAME = CONFIG_NAME_TEMPLATE % INSTANCE_NAME
else:
CONFIG_NAME = DEFAULT_CONFIG_NAME
# Load the Celery config
CONFIG = load_named_config(CONFIG_NAME, DEFAULT_CONFIG)
CONFIG.setdefault("task_modules", [])
# load tasks modules declared as plugin entry points
for entrypoint in pkg_resources.iter_entry_points("swh.workers"):
worker_registrer_fn = entrypoint.load()
# The registry function is expected to return a dict which the 'tasks' key
# is a string (or a list of strings) with the name of the python module in
# which celery tasks are defined.
task_modules = worker_registrer_fn().get("task_modules", [])
CONFIG["task_modules"].extend(task_modules)
# Celery Queues
CELERY_QUEUES = [Queue("celery", Exchange("celery"), routing_key="celery")]
CELERY_DEFAULT_CONFIG = dict(
# Timezone configuration: all in UTC
enable_utc=True,
timezone="UTC",
# Imported modules
imports=CONFIG.get("task_modules", []),
# Time (in seconds, or a timedelta object) for when after stored task
# tombstones will be deleted. None means to never expire results.
result_expires=None,
# A string identifying the default serialization method to use. Can
# be json (default), pickle, yaml, msgpack, or any custom
# serialization methods that have been registered with
task_serializer="msgpack",
# Result serialization format
result_serializer="msgpack",
# Late ack means the task messages will be acknowledged after the task has
# been executed, not just before, which is the default behavior.
task_acks_late=True,
# A string identifying the default serialization method to use.
# Can be pickle (default), json, yaml, msgpack or any custom serialization
# methods that have been registered with kombu.serialization.registry
accept_content=["msgpack", "json"],
# If True the task will report its status as “started”
# when the task is executed by a worker.
task_track_started=True,
# Default compression used for task messages. Can be gzip, bzip2
# (if available), or any custom compression schemes registered
# in the Kombu compression registry.
# result_compression='bzip2',
# task_compression='bzip2',
# Disable all rate limits, even if tasks has explicit rate limits set.
# (Disabling rate limits altogether is recommended if you don’t have any
# tasks using them.)
worker_disable_rate_limits=True,
# Task routing
task_routes=route_for_task,
# Allow pool restarts from remote
worker_pool_restarts=True,
# Do not prefetch tasks
worker_prefetch_multiplier=1,
# Send events
worker_send_task_events=True,
# Do not send useless task_sent events
task_send_sent_event=False,
)
def build_app(config=None):
config = merge_configs(
{k: v for (k, (_, v)) in DEFAULT_CONFIG.items()}, config or {}
)
config["task_queues"] = CELERY_QUEUES + [
Queue(queue, Exchange(queue), routing_key=queue)
for queue in config.get("task_queues", ())
]
logger.debug("Creating a Celery app with %s", config)
# Instantiate the Celery app
app = Celery(broker=config["task_broker"], task_cls="swh.scheduler.task:SWHTask")
app.add_defaults(CELERY_DEFAULT_CONFIG)
app.add_defaults(config)
return app
app = build_app(CONFIG)
# XXX for BW compat
Celery.get_queue_length = get_queue_length
diff --git a/swh/scheduler/celery_backend/listener.py b/swh/scheduler/celery_backend/listener.py
index a9e3fc7..b608269 100644
--- a/swh/scheduler/celery_backend/listener.py
+++ b/swh/scheduler/celery_backend/listener.py
@@ -1,222 +1,220 @@
# Copyright (C) 2015-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
import logging
-import time
import sys
-
-import click
+import time
from arrow import utcnow
-from kombu import Queue
-
import celery
from celery.events import EventReceiver
+import click
+from kombu import Queue
from swh.core.statsd import statsd
class ReliableEventReceiver(EventReceiver):
def __init__(
self,
channel,
handlers=None,
routing_key="#",
node_id=None,
app=None,
queue_prefix="celeryev",
accept=None,
):
super(ReliableEventReceiver, self).__init__(
channel, handlers, routing_key, node_id, app, queue_prefix, accept
)
self.queue = Queue(
".".join([self.queue_prefix, self.node_id]),
exchange=self.exchange,
routing_key=self.routing_key,
auto_delete=False,
durable=True,
)
def get_consumers(self, consumer, channel):
return [
consumer(
queues=[self.queue],
callbacks=[self._receive],
no_ack=False,
accept=self.accept,
)
]
def _receive(self, bodies, message):
if not isinstance(bodies, list): # celery<4 returned body as element
bodies = [bodies]
for body in bodies:
type, body = self.event_from_message(body)
self.process(type, body, message)
def process(self, type, event, message):
"""Process the received event by dispatching it to the appropriate
handler."""
handler = self.handlers.get(type) or self.handlers.get("*")
if handler:
handler(event, message)
statsd.increment(
"swh_scheduler_listener_handled_event_total", tags={"event_type": type}
)
ACTION_SEND_DELAY = datetime.timedelta(seconds=1.0)
ACTION_QUEUE_MAX_LENGTH = 1000
def event_monitor(app, backend):
logger = logging.getLogger("swh.scheduler.listener")
actions = {
"last_send": utcnow() - 2 * ACTION_SEND_DELAY,
"queue": [],
}
def try_perform_actions(actions=actions):
logger.debug("Try perform pending actions")
if actions["queue"] and (
len(actions["queue"]) > ACTION_QUEUE_MAX_LENGTH
or utcnow() - actions["last_send"] > ACTION_SEND_DELAY
):
perform_actions(actions)
def perform_actions(actions, backend=backend):
logger.info("Perform %s pending actions" % len(actions["queue"]))
action_map = {
"start_task_run": backend.start_task_run,
"end_task_run": backend.end_task_run,
}
messages = []
db = backend.get_db()
try:
cursor = db.cursor(None)
for action in actions["queue"]:
messages.append(action["message"])
function = action_map[action["action"]]
args = action.get("args", ())
kwargs = action.get("kwargs", {})
kwargs["cur"] = cursor
function(*args, **kwargs)
except Exception:
db.conn.rollback()
else:
db.conn.commit()
finally:
backend.put_db(db)
for message in messages:
if not message.acknowledged:
message.ack()
actions["queue"] = []
actions["last_send"] = utcnow()
def queue_action(action, actions=actions):
actions["queue"].append(action)
try_perform_actions()
def catchall_event(event, message):
logger.debug("event: %s %s", event["type"], event.get("name", "N/A"))
if not message.acknowledged:
message.ack()
try_perform_actions()
def task_started(event, message):
logger.debug("task_started: %s %s", event["type"], event.get("name", "N/A"))
queue_action(
{
"action": "start_task_run",
"args": [event["uuid"]],
"kwargs": {
"timestamp": utcnow(),
"metadata": {"worker": event["hostname"],},
},
"message": message,
}
)
def task_succeeded(event, message):
logger.debug("task_succeeded: event: %s" % event)
logger.debug(" message: %s" % message)
result = event["result"]
logger.debug("task_succeeded: result: %s" % result)
try:
status = result.get("status")
if status == "success":
status = "eventful" if result.get("eventful") else "uneventful"
except Exception:
status = "eventful" if result else "uneventful"
queue_action(
{
"action": "end_task_run",
"args": [event["uuid"]],
"kwargs": {"timestamp": utcnow(), "status": status, "result": result,},
"message": message,
}
)
def task_failed(event, message):
logger.debug("task_failed: event: %s" % event)
logger.debug(" message: %s" % message)
queue_action(
{
"action": "end_task_run",
"args": [event["uuid"]],
"kwargs": {"timestamp": utcnow(), "status": "failed",},
"message": message,
}
)
recv = ReliableEventReceiver(
celery.current_app.connection(),
app=celery.current_app,
handlers={
"task-started": task_started,
"task-result": task_succeeded,
"task-failed": task_failed,
"*": catchall_event,
},
node_id="listener",
)
errors = 0
while True:
try:
recv.capture(limit=None, timeout=None, wakeup=True)
errors = 0
except KeyboardInterrupt:
logger.exception("Keyboard interrupt, exiting")
break
except Exception:
logger.exception("Unexpected exception")
if errors < 5:
time.sleep(errors)
errors += 1
else:
logger.error("Too many consecutive errors, exiting")
sys.exit(1)
@click.command()
@click.pass_context
def main(ctx):
click.echo("Deprecated! Use 'swh-scheduler listener' instead.", err=True)
ctx.exit(1)
if __name__ == "__main__":
main()
diff --git a/swh/scheduler/celery_backend/runner.py b/swh/scheduler/celery_backend/runner.py
index b8592d0..957c1b7 100644
--- a/swh/scheduler/celery_backend/runner.py
+++ b/swh/scheduler/celery_backend/runner.py
@@ -1,131 +1,131 @@
# Copyright (C) 2015-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
-import arrow
import logging
+
+import arrow
from kombu.utils.uuid import uuid
from swh.core.statsd import statsd
-from swh.scheduler import get_scheduler, compute_nb_tasks_from
-
+from swh.scheduler import compute_nb_tasks_from, get_scheduler
logger = logging.getLogger(__name__)
# Max batch size for tasks
MAX_NUM_TASKS = 10000
def run_ready_tasks(backend, app):
"""Run tasks that are ready
Args:
backend (Scheduler): backend to read tasks to schedule
app (App): Celery application to send tasks to
Returns:
A list of dictionaries::
{
'task': the scheduler's task id,
'backend_id': Celery's task id,
'scheduler': arrow.utcnow()
}
The result can be used to block-wait for the tasks' results::
backend_tasks = run_ready_tasks(self.scheduler, app)
for task in backend_tasks:
AsyncResult(id=task['backend_id']).get()
"""
all_backend_tasks = []
while True:
task_types = {}
pending_tasks = []
for task_type in backend.get_task_types():
task_type_name = task_type["type"]
task_types[task_type_name] = task_type
max_queue_length = task_type["max_queue_length"]
if max_queue_length is None:
max_queue_length = 0
backend_name = task_type["backend_name"]
if max_queue_length:
try:
queue_length = app.get_queue_length(backend_name)
except ValueError:
queue_length = None
if queue_length is None:
# Running without RabbitMQ (probably a test env).
num_tasks = MAX_NUM_TASKS
else:
num_tasks = min(max_queue_length - queue_length, MAX_NUM_TASKS)
else:
num_tasks = MAX_NUM_TASKS
# only pull tasks if the buffer is at least 1/5th empty (= 80%
# full), to help postgresql use properly indexed queries.
if num_tasks > min(MAX_NUM_TASKS, max_queue_length) // 5:
num_tasks, num_tasks_priority = compute_nb_tasks_from(num_tasks)
grabbed_tasks = backend.grab_ready_tasks(
task_type_name,
num_tasks=num_tasks,
num_tasks_priority=num_tasks_priority,
)
if grabbed_tasks:
pending_tasks.extend(grabbed_tasks)
logger.info(
"Grabbed %s tasks %s", len(grabbed_tasks), task_type_name
)
statsd.increment(
"swh_scheduler_runner_scheduled_task_total",
len(grabbed_tasks),
tags={"task_type": task_type_name},
)
if not pending_tasks:
return all_backend_tasks
backend_tasks = []
celery_tasks = []
for task in pending_tasks:
args = task["arguments"]["args"]
kwargs = task["arguments"]["kwargs"]
backend_name = task_types[task["type"]]["backend_name"]
backend_id = uuid()
celery_tasks.append((backend_name, backend_id, args, kwargs))
data = {
"task": task["id"],
"backend_id": backend_id,
"scheduled": arrow.utcnow(),
}
backend_tasks.append(data)
logger.debug("Sent %s celery tasks", len(backend_tasks))
backend.mass_schedule_task_runs(backend_tasks)
for backend_name, backend_id, args, kwargs in celery_tasks:
app.send_task(
backend_name, task_id=backend_id, args=args, kwargs=kwargs,
)
all_backend_tasks.extend(backend_tasks)
def main():
from .config import app as main_app
for module in main_app.conf.CELERY_IMPORTS:
__import__(module)
main_backend = get_scheduler("local")
try:
run_ready_tasks(main_backend, main_app)
except Exception:
main_backend.rollback()
raise
if __name__ == "__main__":
main()
diff --git a/swh/scheduler/cli/__init__.py b/swh/scheduler/cli/__init__.py
index 4cd81c6..eb4db7c 100644
--- a/swh/scheduler/cli/__init__.py
+++ b/swh/scheduler/cli/__init__.py
@@ -1,93 +1,93 @@
# Copyright (C) 2016-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
# WARNING: do not import unnecessary things here to keep cli startup time under
# control
import logging
import click
from swh.core.cli import CONTEXT_SETTINGS, AliasedGroup
@click.group(name="scheduler", context_settings=CONTEXT_SETTINGS, cls=AliasedGroup)
@click.option(
"--config-file",
"-C",
default=None,
type=click.Path(exists=True, dir_okay=False,),
help="Configuration file.",
)
@click.option(
"--database",
"-d",
default=None,
help="Scheduling database DSN (imply cls is 'local')",
)
@click.option(
"--url", "-u", default=None, help="Scheduler's url access (imply cls is 'remote')"
)
@click.option(
"--no-stdout", is_flag=True, default=False, help="Do NOT output logs on the console"
)
@click.pass_context
def cli(ctx, config_file, database, url, no_stdout):
"""Software Heritage Scheduler tools.
Use a local scheduler instance by default (plugged to the
main scheduler db).
"""
try:
from psycopg2 import OperationalError
except ImportError:
class OperationalError(Exception):
pass
from swh.core import config
- from swh.scheduler import get_scheduler, DEFAULT_CONFIG
+ from swh.scheduler import DEFAULT_CONFIG, get_scheduler
ctx.ensure_object(dict)
logger = logging.getLogger(__name__)
scheduler = None
conf = config.read(config_file, DEFAULT_CONFIG)
if "scheduler" not in conf:
raise ValueError("missing 'scheduler' configuration")
if database:
conf["scheduler"]["cls"] = "local"
conf["scheduler"]["args"]["db"] = database
elif url:
conf["scheduler"]["cls"] = "remote"
conf["scheduler"]["args"] = {"url": url}
sched_conf = conf["scheduler"]
try:
logger.debug("Instantiating scheduler with %s" % (sched_conf))
scheduler = get_scheduler(**sched_conf)
except (ValueError, OperationalError):
# it's the subcommand to decide whether not having a proper
# scheduler instance is a problem.
pass
ctx.obj["scheduler"] = scheduler
ctx.obj["config"] = conf
from . import admin, celery_monitor, task, task_type # noqa
def main():
import click.core
click.core.DEPRECATED_HELP_NOTICE = """
DEPRECATED! Please use the command 'swh scheduler'."""
cli.deprecated = True
return cli(auto_envvar_prefix="SWH_SCHEDULER")
if __name__ == "__main__":
main()
diff --git a/swh/scheduler/cli/admin.py b/swh/scheduler/cli/admin.py
index 9d8fdd2..2ce0873 100644
--- a/swh/scheduler/cli/admin.py
+++ b/swh/scheduler/cli/admin.py
@@ -1,110 +1,110 @@
# Copyright (C) 2016-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
# WARNING: do not import unnecessary things here to keep cli startup time under
# control
import logging
import time
import click
from . import cli
@cli.command("start-runner")
@click.option(
"--period",
"-p",
default=0,
help=(
"Period (in s) at witch pending tasks are checked and "
"executed. Set to 0 (default) for a one shot."
),
)
@click.pass_context
def runner(ctx, period):
"""Starts a swh-scheduler runner service.
This process is responsible for checking for ready-to-run tasks and
schedule them."""
- from swh.scheduler.celery_backend.runner import run_ready_tasks
from swh.scheduler.celery_backend.config import build_app
+ from swh.scheduler.celery_backend.runner import run_ready_tasks
app = build_app(ctx.obj["config"].get("celery"))
app.set_current()
logger = logging.getLogger(__name__ + ".runner")
scheduler = ctx.obj["scheduler"]
logger.debug("Scheduler %s" % scheduler)
try:
while True:
logger.debug("Run ready tasks")
try:
ntasks = len(run_ready_tasks(scheduler, app))
if ntasks:
logger.info("Scheduled %s tasks", ntasks)
except Exception:
logger.exception("Unexpected error in run_ready_tasks()")
if not period:
break
time.sleep(period)
except KeyboardInterrupt:
ctx.exit(0)
@cli.command("start-listener")
@click.pass_context
def listener(ctx):
"""Starts a swh-scheduler listener service.
This service is responsible for listening at task lifecycle events and
handle their workflow status in the database."""
scheduler_backend = ctx.obj["scheduler"]
if not scheduler_backend:
raise ValueError("Scheduler class (local/remote) must be instantiated")
broker = (
ctx.obj["config"]
.get("celery", {})
.get("task_broker", "amqp://guest@localhost/%2f")
)
from swh.scheduler.celery_backend.pika_listener import get_listener
listener = get_listener(broker, "celeryev.listener", scheduler_backend)
try:
listener.start_consuming()
finally:
listener.stop_consuming()
@cli.command("rpc-serve")
@click.option("--host", default="0.0.0.0", help="Host to run the scheduler server api")
@click.option("--port", default=5008, type=click.INT, help="Binding port of the server")
@click.option(
"--debug/--nodebug",
default=None,
help=(
"Indicates if the server should run in debug mode. "
"Defaults to True if log-level is DEBUG, False otherwise."
),
)
@click.pass_context
def rpc_server(ctx, host, port, debug):
"""Starts a swh-scheduler API HTTP server.
"""
if ctx.obj["config"]["scheduler"]["cls"] == "remote":
click.echo(
"The API server can only be started with a 'local' " "configuration",
err=True,
)
ctx.exit(1)
from swh.scheduler.api import server
server.app.config.update(ctx.obj["config"])
if debug is None:
debug = ctx.obj["log_level"] <= logging.DEBUG
server.app.run(host, port=port, debug=bool(debug))
diff --git a/swh/scheduler/cli/task.py b/swh/scheduler/cli/task.py
index e415a64..c304163 100644
--- a/swh/scheduler/cli/task.py
+++ b/swh/scheduler/cli/task.py
@@ -1,763 +1,768 @@
# Copyright (C) 2016-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
# WARNING: do not import unnecessary things here to keep cli startup time under
# control
import locale
import logging
+from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional
import click
-from typing import Any, Dict, Optional, Iterator, TYPE_CHECKING
-
from . import cli
if TYPE_CHECKING:
# importing swh.storage.interface triggers the load of 300+ modules, so...
from swh.model.model import Origin
from swh.storage.interface import StorageInterface
locale.setlocale(locale.LC_ALL, "")
ARROW_LOCALE = locale.getlocale(locale.LC_TIME)[0]
class DateTimeType(click.ParamType):
name = "time and date"
def convert(self, value, param, ctx):
import arrow
if not isinstance(value, arrow.Arrow):
value = arrow.get(value)
return value
DATETIME = DateTimeType()
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
def format_dict(d):
import datetime
+
import arrow
ret = {}
for k, v in d.items():
if isinstance(v, (arrow.Arrow, datetime.date, datetime.datetime)):
v = arrow.get(v).format()
elif isinstance(v, dict):
v = format_dict(v)
ret[k] = v
return ret
def pretty_print_list(list, indent=0):
"""Pretty-print a list"""
return "".join("%s%r\n" % (" " * indent, item) for item in list)
def pretty_print_dict(dict, indent=0):
"""Pretty-print a list"""
return "".join(
"%s%s: %r\n" % (" " * indent, click.style(key, bold=True), value)
for key, value in sorted(dict.items())
)
def pretty_print_run(run, indent=4):
fmt = (
"{indent}{backend_id} [{status}]\n"
"{indent} scheduled: {scheduled} [{started}:{ended}]"
)
return fmt.format(indent=" " * indent, **format_dict(run))
def pretty_print_task(task, full=False):
"""Pretty-print a task
If 'full' is True, also print the status and priority fields.
>>> import datetime
>>> task = {
... 'id': 1234,
... 'arguments': {
... 'args': ['foo', 'bar', True],
... 'kwargs': {'key': 'value', 'key2': 42},
... },
... 'current_interval': datetime.timedelta(hours=1),
... 'next_run': datetime.datetime(2019, 2, 21, 13, 52, 35, 407818),
... 'policy': 'oneshot',
... 'priority': None,
... 'status': 'next_run_not_scheduled',
... 'type': 'test_task',
... }
>>> print(click.unstyle(pretty_print_task(task)))
Task 1234
Next run: ... (2019-02-21 13:52:35+00:00)
Interval: 1:00:00
Type: test_task
Policy: oneshot
Args:
'foo'
'bar'
True
Keyword args:
key: 'value'
key2: 42
>>> print(click.unstyle(pretty_print_task(task, full=True)))
Task 1234
Next run: ... (2019-02-21 13:52:35+00:00)
Interval: 1:00:00
Type: test_task
Policy: oneshot
Status: next_run_not_scheduled
Priority:\x20
Args:
'foo'
'bar'
True
Keyword args:
key: 'value'
key2: 42
"""
import arrow
next_run = arrow.get(task["next_run"])
lines = [
"%s %s\n" % (click.style("Task", bold=True), task["id"]),
click.style(" Next run: ", bold=True),
"%s (%s)" % (next_run.humanize(locale=ARROW_LOCALE), next_run.format()),
"\n",
click.style(" Interval: ", bold=True),
str(task["current_interval"]),
"\n",
click.style(" Type: ", bold=True),
task["type"] or "",
"\n",
click.style(" Policy: ", bold=True),
task["policy"] or "",
"\n",
]
if full:
lines += [
click.style(" Status: ", bold=True),
task["status"] or "",
"\n",
click.style(" Priority: ", bold=True),
task["priority"] or "",
"\n",
]
lines += [
click.style(" Args:\n", bold=True),
pretty_print_list(task["arguments"]["args"], indent=4),
click.style(" Keyword args:\n", bold=True),
pretty_print_dict(task["arguments"]["kwargs"], indent=4),
]
return "".join(lines)
@cli.group("task")
@click.pass_context
def task(ctx):
"""Manipulate tasks."""
pass
@task.command("schedule")
@click.option(
"--columns",
"-c",
multiple=True,
default=["type", "args", "kwargs", "next_run"],
type=click.Choice(["type", "args", "kwargs", "policy", "next_run"]),
help="columns present in the CSV file",
)
@click.option("--delimiter", "-d", default=",")
@click.argument("file", type=click.File(encoding="utf-8"))
@click.pass_context
def schedule_tasks(ctx, columns, delimiter, file):
"""Schedule tasks from a CSV input file.
The following columns are expected, and can be set through the -c option:
- type: the type of the task to be scheduled (mandatory)
- args: the arguments passed to the task (JSON list, defaults to an empty
list)
- kwargs: the keyword arguments passed to the task (JSON object, defaults
to an empty dict)
- next_run: the date at which the task should run (datetime, defaults to
now)
The CSV can be read either from a named file, or from stdin (use - as
filename).
Use sample:
cat scheduling-task.txt | \
python3 -m swh.scheduler.cli \
--database 'service=swh-scheduler-dev' \
task schedule \
--columns type --columns kwargs --columns policy \
--delimiter ';' -
"""
import csv
import json
+
import arrow
tasks = []
now = arrow.utcnow()
scheduler = ctx.obj["scheduler"]
if not scheduler:
raise ValueError("Scheduler class (local/remote) must be instantiated")
reader = csv.reader(file, delimiter=delimiter)
for line in reader:
task = dict(zip(columns, line))
args = json.loads(task.pop("args", "[]"))
kwargs = json.loads(task.pop("kwargs", "{}"))
task["arguments"] = {
"args": args,
"kwargs": kwargs,
}
task["next_run"] = DATETIME.convert(task.get("next_run", now), None, None)
tasks.append(task)
created = scheduler.create_tasks(tasks)
output = [
"Created %d tasks\n" % len(created),
]
for task in created:
output.append(pretty_print_task(task))
click.echo_via_pager("\n".join(output))
@task.command("add")
@click.argument("type", nargs=1, required=True)
@click.argument("options", nargs=-1)
@click.option(
"--policy", "-p", default="recurring", type=click.Choice(["recurring", "oneshot"])
)
@click.option(
"--priority", "-P", default=None, type=click.Choice(["low", "normal", "high"])
)
@click.option("--next-run", "-n", default=None)
@click.pass_context
def schedule_task(ctx, type, options, policy, priority, next_run):
"""Schedule one task from arguments.
The first argument is the name of the task type, further ones are
positional and keyword argument(s) of the task, in YAML format.
Keyword args are of the form key=value.
Usage sample:
swh-scheduler --database 'service=swh-scheduler' \
task add list-pypi
swh-scheduler --database 'service=swh-scheduler' \
task add list-debian-distribution --policy=oneshot distribution=stretch
Note: if the priority is not given, the task won't have the priority set,
which is considered as the lowest priority level.
"""
import arrow
from .utils import parse_options
scheduler = ctx.obj["scheduler"]
if not scheduler:
raise ValueError("Scheduler class (local/remote) must be instantiated")
now = arrow.utcnow()
(args, kw) = parse_options(options)
task = {
"type": type,
"policy": policy,
"priority": priority,
"arguments": {"args": args, "kwargs": kw,},
"next_run": DATETIME.convert(next_run or now, None, None),
}
created = scheduler.create_tasks([task])
output = [
"Created %d tasks\n" % len(created),
]
for task in created:
output.append(pretty_print_task(task))
click.echo("\n".join(output))
def iter_origins( # use string annotations to prevent some pkg loading
storage: "StorageInterface", page_token: "Optional[str]" = None,
) -> "Iterator[Origin]":
"""Iterate over origins in the storage. Optionally starting from page_token.
This logs regularly an info message during pagination with the page_token. This, in
order to feed it back to the cli if the process interrupted.
Yields
origin model objects from the storage
"""
while True:
page_result = storage.origin_list(page_token=page_token)
page_token = page_result.next_page_token
yield from page_result.results
if not page_token:
break
click.echo(f"page_token: {page_token}\n")
@task.command("schedule_origins")
@click.argument("type", nargs=1, required=True)
@click.argument("options", nargs=-1)
@click.option(
"--batch-size",
"-b",
"origin_batch_size",
default=10,
show_default=True,
type=int,
help="Number of origins per task",
)
@click.option(
"--page-token",
default=0,
show_default=True,
type=str,
help="Only schedule tasks for origins whose ID is greater",
)
@click.option(
"--limit",
default=None,
type=int,
help="Limit the tasks scheduling up to this number of tasks",
)
@click.option("--storage-url", "-g", help="URL of the (graph) storage API")
@click.option(
"--dry-run/--no-dry-run",
is_flag=True,
default=False,
help="List only what would be scheduled.",
)
@click.pass_context
def schedule_origin_metadata_index(
ctx, type, options, storage_url, origin_batch_size, page_token, limit, dry_run
):
"""Schedules tasks for origins that are already known.
The first argument is the name of the task type, further ones are
keyword argument(s) of the task in the form key=value, where value is
in YAML format.
Usage sample:
swh-scheduler --database 'service=swh-scheduler' \
task schedule_origins index-origin-metadata
"""
from itertools import islice
+
from swh.storage import get_storage
+
from .utils import parse_options, schedule_origin_batches
scheduler = ctx.obj["scheduler"]
storage = get_storage("remote", url=storage_url)
if dry_run:
scheduler = None
(args, kw) = parse_options(options)
if args:
raise click.ClickException("Only keywords arguments are allowed.")
origins = iter_origins(storage, page_token=page_token)
if limit:
origins = islice(origins, limit)
origin_urls = (origin.url for origin in origins)
schedule_origin_batches(scheduler, type, origin_urls, origin_batch_size, kw)
@task.command("list-pending")
@click.argument("task-types", required=True, nargs=-1)
@click.option(
"--limit",
"-l",
required=False,
type=click.INT,
help="The maximum number of tasks to fetch",
)
@click.option(
"--before",
"-b",
required=False,
type=DATETIME,
help="List all jobs supposed to run before the given date",
)
@click.pass_context
def list_pending_tasks(ctx, task_types, limit, before):
"""List the tasks that are going to be run.
You can override the number of tasks to fetch
"""
from swh.scheduler import compute_nb_tasks_from
scheduler = ctx.obj["scheduler"]
if not scheduler:
raise ValueError("Scheduler class (local/remote) must be instantiated")
num_tasks, num_tasks_priority = compute_nb_tasks_from(limit)
output = []
for task_type in task_types:
pending = scheduler.peek_ready_tasks(
task_type,
timestamp=before,
num_tasks=num_tasks,
num_tasks_priority=num_tasks_priority,
)
output.append("Found %d %s tasks\n" % (len(pending), task_type))
for task in pending:
output.append(pretty_print_task(task))
click.echo("\n".join(output))
@task.command("list")
@click.option(
"--task-id",
"-i",
default=None,
multiple=True,
metavar="ID",
help="List only tasks whose id is ID.",
)
@click.option(
"--task-type",
"-t",
default=None,
multiple=True,
metavar="TYPE",
help="List only tasks of type TYPE",
)
@click.option(
"--limit",
"-l",
required=False,
type=click.INT,
help="The maximum number of tasks to fetch.",
)
@click.option(
"--status",
"-s",
multiple=True,
metavar="STATUS",
type=click.Choice(
("next_run_not_scheduled", "next_run_scheduled", "completed", "disabled")
),
default=None,
help="List tasks whose status is STATUS.",
)
@click.option(
"--policy",
"-p",
default=None,
type=click.Choice(["recurring", "oneshot"]),
help="List tasks whose policy is POLICY.",
)
@click.option(
"--priority",
"-P",
default=None,
multiple=True,
type=click.Choice(["all", "low", "normal", "high"]),
help="List tasks whose priority is PRIORITY.",
)
@click.option(
"--before",
"-b",
required=False,
type=DATETIME,
metavar="DATETIME",
help="Limit to tasks supposed to run before the given date.",
)
@click.option(
"--after",
"-a",
required=False,
type=DATETIME,
metavar="DATETIME",
help="Limit to tasks supposed to run after the given date.",
)
@click.option(
"--list-runs",
"-r",
is_flag=True,
default=False,
help="Also list past executions of each task.",
)
@click.pass_context
def list_tasks(
ctx, task_id, task_type, limit, status, policy, priority, before, after, list_runs
):
"""List tasks.
"""
scheduler = ctx.obj["scheduler"]
if not scheduler:
raise ValueError("Scheduler class (local/remote) must be instantiated")
if not task_type:
task_type = [x["type"] for x in scheduler.get_task_types()]
# if task_id is not given, default value for status is
# 'next_run_not_scheduled'
# if task_id is given, default status is 'all'
if task_id is None and status is None:
status = ["next_run_not_scheduled"]
if status and "all" in status:
status = None
if priority and "all" in priority:
priority = None
output = []
tasks = scheduler.search_tasks(
task_id=task_id,
task_type=task_type,
status=status,
priority=priority,
policy=policy,
before=before,
after=after,
limit=limit,
)
if list_runs:
runs = {t["id"]: [] for t in tasks}
for r in scheduler.get_task_runs([task["id"] for task in tasks]):
runs[r["task"]].append(r)
else:
runs = {}
output.append("Found %d tasks\n" % (len(tasks)))
for task in tasks:
output.append(pretty_print_task(task, full=True))
if runs.get(task["id"]):
output.append(click.style(" Executions:", bold=True))
for run in runs[task["id"]]:
output.append(pretty_print_run(run, indent=4))
click.echo("\n".join(output))
@task.command("respawn")
@click.argument("task-ids", required=True, nargs=-1)
@click.option(
"--next-run",
"-n",
required=False,
type=DATETIME,
metavar="DATETIME",
default=None,
help="Re spawn the selected tasks at this date",
)
@click.pass_context
def respawn_tasks(ctx, task_ids, next_run):
"""Respawn tasks.
Respawn tasks given by their ids (see the 'task list' command to
find task ids) at the given date (immediately by default).
Eg.
swh-scheduler task respawn 1 3 12
"""
import arrow
scheduler = ctx.obj["scheduler"]
if not scheduler:
raise ValueError("Scheduler class (local/remote) must be instantiated")
if next_run is None:
next_run = arrow.utcnow()
output = []
scheduler.set_status_tasks(
task_ids, status="next_run_not_scheduled", next_run=next_run
)
output.append("Respawn tasks %s\n" % (task_ids,))
click.echo("\n".join(output))
@task.command("archive")
@click.option(
"--before",
"-b",
default=None,
help="""Task whose ended date is anterior will be archived.
Default to current month's first day.""",
)
@click.option(
"--after",
"-a",
default=None,
help="""Task whose ended date is after the specified date will
be archived. Default to prior month's first day.""",
)
@click.option(
"--batch-index",
default=1000,
type=click.INT,
help="Batch size of tasks to read from db to archive",
)
@click.option(
"--bulk-index",
default=200,
type=click.INT,
help="Batch size of tasks to bulk index",
)
@click.option(
"--batch-clean",
default=1000,
type=click.INT,
help="Batch size of task to clean after archival",
)
@click.option(
"--dry-run/--no-dry-run",
is_flag=True,
default=False,
help="Default to list only what would be archived.",
)
@click.option("--verbose", is_flag=True, default=False, help="Verbose mode")
@click.option(
"--cleanup/--no-cleanup",
is_flag=True,
default=True,
help="Clean up archived tasks (default)",
)
@click.option(
"--start-from",
type=click.STRING,
default=None,
help="(Optional) default page to start from.",
)
@click.pass_context
def archive_tasks(
ctx,
before,
after,
batch_index,
bulk_index,
batch_clean,
dry_run,
verbose,
cleanup,
start_from,
):
"""Archive task/task_run whose (task_type is 'oneshot' and task_status
is 'completed') or (task_type is 'recurring' and task_status is
'disabled').
With --dry-run flag set (default), only list those.
"""
- import arrow
from itertools import groupby
+
+ import arrow
+
from swh.core.utils import grouper
from swh.scheduler.backend_es import ElasticSearchBackend
config = ctx.obj["config"]
scheduler = ctx.obj["scheduler"]
if not scheduler:
raise ValueError("Scheduler class (local/remote) must be instantiated")
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
logger = logging.getLogger(__name__)
logging.getLogger("urllib3").setLevel(logging.WARN)
logging.getLogger("elasticsearch").setLevel(logging.ERROR)
if dry_run:
logger.info("**DRY-RUN** (only reading db)")
if not cleanup:
logger.info("**NO CLEANUP**")
es_storage = ElasticSearchBackend(**config)
now = arrow.utcnow()
# Default to archive tasks from a rolling month starting the week
# prior to the current one
if not before:
before = now.shift(weeks=-1).format("YYYY-MM-DD")
if not after:
after = now.shift(weeks=-1).shift(months=-1).format("YYYY-MM-DD")
logger.debug(
"index: %s; cleanup: %s; period: [%s ; %s]"
% (not dry_run, not dry_run and cleanup, after, before)
)
def get_index_name(
data: Dict[str, Any], es_storage: ElasticSearchBackend = es_storage
) -> str:
"""Given a data record, determine the index's name through its ending
date. This varies greatly depending on the task_run's
status.
"""
date = data.get("started")
if not date:
date = data["scheduled"]
return es_storage.compute_index_name(date.year, date.month)
def index_data(before, page_token, batch_index):
while True:
result = scheduler.filter_task_to_archive(
after, before, page_token=page_token, limit=batch_index
)
tasks_sorted = sorted(result["tasks"], key=get_index_name)
groups = groupby(tasks_sorted, key=get_index_name)
for index_name, tasks_group in groups:
logger.debug("Index tasks to %s" % index_name)
if dry_run:
for task in tasks_group:
yield task
continue
yield from es_storage.streaming_bulk(
index_name,
tasks_group,
source=["task_id", "task_run_id"],
chunk_size=bulk_index,
)
page_token = result.get("next_page_token")
if page_token is None:
break
gen = index_data(before, page_token=start_from, batch_index=batch_index)
if cleanup:
for task_ids in grouper(gen, n=batch_clean):
task_ids = list(task_ids)
logger.info("Clean up %s tasks: [%s, ...]" % (len(task_ids), task_ids[0]))
if dry_run: # no clean up
continue
ctx.obj["scheduler"].delete_archived_tasks(task_ids)
else:
for task_ids in grouper(gen, n=batch_index):
task_ids = list(task_ids)
logger.info("Indexed %s tasks: [%s, ...]" % (len(task_ids), task_ids[0]))
logger.debug("Done!")
diff --git a/swh/scheduler/cli/task_type.py b/swh/scheduler/cli/task_type.py
index 4320d44..c6368eb 100644
--- a/swh/scheduler/cli/task_type.py
+++ b/swh/scheduler/cli/task_type.py
@@ -1,233 +1,232 @@
# Copyright (C) 2016-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
-# WARNING: do not import unnecessary things here to keep cli startup time under
-# control
-import click
-import logging
-
from importlib import import_module
+import logging
from typing import Mapping
+# WARNING: do not import unnecessary things here to keep cli startup time under
+# control
+import click
from pkg_resources import iter_entry_points
-from . import cli
+from . import cli
logger = logging.getLogger(__name__)
DEFAULT_TASK_TYPE = {
"full": { # for tasks like 'list_xxx_full()'
"default_interval": "90 days",
"min_interval": "90 days",
"max_interval": "90 days",
"backoff_factor": 1,
},
"*": { # value if not suffix matches
"default_interval": "1 day",
"min_interval": "1 day",
"max_interval": "1 day",
"backoff_factor": 1,
},
}
PLUGIN_WORKER_DESCRIPTIONS = {
entry_point.name: entry_point for entry_point in iter_entry_points("swh.workers")
}
@cli.group("task-type")
@click.pass_context
def task_type(ctx):
"""Manipulate task types."""
pass
@task_type.command("list")
@click.option("--verbose", "-v", is_flag=True, default=False, help="Verbose mode")
@click.option(
"--task_type",
"-t",
multiple=True,
default=None,
help="List task types of given type",
)
@click.option(
"--task_name",
"-n",
multiple=True,
default=None,
help="List task types of given backend task name",
)
@click.pass_context
def list_task_types(ctx, verbose, task_type, task_name):
click.echo("Known task types:")
if verbose:
tmpl = (
click.style("{type}: ", bold=True)
+ """{backend_name}
{description}
interval: {default_interval} [{min_interval}, {max_interval}]
backoff_factor: {backoff_factor}
max_queue_length: {max_queue_length}
num_retries: {num_retries}
retry_delay: {retry_delay}
"""
)
else:
tmpl = "{type}:\n {description}"
for tasktype in sorted(
ctx.obj["scheduler"].get_task_types(), key=lambda x: x["type"]
):
if task_type and tasktype["type"] not in task_type:
continue
if task_name and tasktype["backend_name"] not in task_name:
continue
click.echo(tmpl.format(**tasktype))
@task_type.command("register")
@click.option(
"--plugins",
"-p",
"plugins",
multiple=True,
default=("all",),
type=click.Choice(["all"] + list(PLUGIN_WORKER_DESCRIPTIONS)),
help="Registers task-types for provided plugins. " "Defaults to all",
)
@click.pass_context
def register_task_types(ctx, plugins):
"""Register missing task-type entries in the scheduler.
According to declared tasks in each loaded worker (e.g. lister, loader,
...) plugins.
"""
import celery.app.task
scheduler = ctx.obj["scheduler"]
if plugins == ("all",):
plugins = list(PLUGIN_WORKER_DESCRIPTIONS)
for plugin in plugins:
entrypoint = PLUGIN_WORKER_DESCRIPTIONS[plugin]
logger.info("Loading entrypoint for plugin %s", plugin)
registry_entry = entrypoint.load()()
for task_module in registry_entry["task_modules"]:
mod = import_module(task_module)
for task_name in (x for x in dir(mod) if not x.startswith("_")):
logger.debug("Loading task name %s", task_name)
taskobj = getattr(mod, task_name)
if isinstance(taskobj, celery.app.task.Task):
tt_name = task_name.replace("_", "-")
task_cfg = registry_entry.get("task_types", {}).get(tt_name, {})
ensure_task_type(task_module, tt_name, taskobj, task_cfg, scheduler)
def ensure_task_type(
task_module: str, task_type: str, swhtask, task_config: Mapping, scheduler
):
"""Ensure a given task-type (for the task_module) exists in the scheduler.
Args:
task_module: task module we are currently checking for task type
consistency
task_type: the type of the task to check/insert (correspond to
the 'type' field in the db)
swhtask (SWHTask): the SWHTask instance the task-type correspond to
task_config: a dict with specific/overloaded values for the
task-type to be created
scheduler: the scheduler object used to access the scheduler db
"""
for suffix, defaults in DEFAULT_TASK_TYPE.items():
if task_type.endswith("-" + suffix):
task_type_dict = defaults.copy()
break
else:
task_type_dict = DEFAULT_TASK_TYPE["*"].copy()
task_type_dict["type"] = task_type
task_type_dict["backend_name"] = swhtask.name
if swhtask.__doc__:
task_type_dict["description"] = swhtask.__doc__.splitlines()[0]
task_type_dict.update(task_config)
current_task_type = scheduler.get_task_type(task_type)
if current_task_type:
# Ensure the existing task_type is consistent in the scheduler
if current_task_type["backend_name"] != task_type_dict["backend_name"]:
logger.warning(
"Existing task type %s for module %s has a "
"different backend name than current "
"code version provides (%s vs. %s)",
task_type,
task_module,
current_task_type["backend_name"],
task_type_dict["backend_name"],
)
else:
logger.info("Create task type %s in scheduler", task_type)
logger.debug(" %s", task_type_dict)
scheduler.create_task_type(task_type_dict)
@task_type.command("add")
@click.argument("type", required=True)
@click.argument("task-name", required=True)
@click.argument("description", required=True)
@click.option(
"--default-interval",
"-i",
default="90 days",
help='Default interval ("90 days" by default)',
)
@click.option(
"--min-interval",
default=None,
help="Minimum interval (default interval if not set)",
)
@click.option(
"--max-interval",
"-i",
default=None,
help="Maximal interval (default interval if not set)",
)
@click.option("--backoff-factor", "-f", type=float, default=1, help="Backoff factor")
@click.pass_context
def add_task_type(
ctx,
type,
task_name,
description,
default_interval,
min_interval,
max_interval,
backoff_factor,
):
"""Create a new task type
"""
scheduler = ctx.obj["scheduler"]
if not scheduler:
raise ValueError("Scheduler class (local/remote) must be instantiated")
task_type = dict(
type=type,
backend_name=task_name,
description=description,
default_interval=default_interval,
min_interval=min_interval,
max_interval=max_interval,
backoff_factor=backoff_factor,
max_queue_length=None,
num_retries=None,
retry_delay=None,
)
scheduler.create_task_type(task_type)
click.echo("OK")
diff --git a/swh/scheduler/cli/utils.py b/swh/scheduler/cli/utils.py
index 2fb02a5..688965a 100644
--- a/swh/scheduler/cli/utils.py
+++ b/swh/scheduler/cli/utils.py
@@ -1,90 +1,90 @@
# Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
# WARNING: do not import unnecessary things here to keep cli startup time under
# control
import click
-
TASK_BATCH_SIZE = 1000 # Number of tasks per query to the scheduler
def schedule_origin_batches(scheduler, task_type, origins, origin_batch_size, kwargs):
from itertools import islice
+
from swh.scheduler.utils import create_task_dict
nb_origins = 0
nb_tasks = 0
while True:
task_batch = []
for _ in range(TASK_BATCH_SIZE):
# Group origins
origin_batch = []
for origin in islice(origins, origin_batch_size):
origin_batch.append(origin)
nb_origins += len(origin_batch)
if not origin_batch:
break
# Create a task for these origins
args = [origin_batch]
task_dict = create_task_dict(task_type, "oneshot", *args, **kwargs)
task_batch.append(task_dict)
# Schedule a batch of tasks
if not task_batch:
break
nb_tasks += len(task_batch)
if scheduler:
scheduler.create_tasks(task_batch)
click.echo("Scheduled %d tasks (%d origins)." % (nb_tasks, nb_origins))
# Print final status.
if nb_tasks:
click.echo("Done.")
else:
click.echo("Nothing to do (no origin metadata matched the criteria).")
def parse_argument(option):
import yaml
try:
return yaml.safe_load(option)
except Exception:
raise click.ClickException("Invalid argument: {}".format(option))
def parse_options(options):
"""Parses options from a CLI as YAML and turns it into Python
args and kwargs.
>>> parse_options([])
([], {})
>>> parse_options(['foo', 'bar'])
(['foo', 'bar'], {})
>>> parse_options(['[foo, bar]'])
([['foo', 'bar']], {})
>>> parse_options(['"foo"', '"bar"'])
(['foo', 'bar'], {})
>>> parse_options(['foo="bar"'])
([], {'foo': 'bar'})
>>> parse_options(['"foo"', 'bar="baz"'])
(['foo'], {'bar': 'baz'})
>>> parse_options(['42', 'bar=False'])
([42], {'bar': False})
>>> parse_options(['42', 'bar=false'])
([42], {'bar': False})
>>> parse_options(['42', '"foo'])
Traceback (most recent call last):
...
click.exceptions.ClickException: Invalid argument: "foo
"""
kw_pairs = [x.split("=", 1) for x in options if "=" in x]
args = [parse_argument(x) for x in options if "=" not in x]
kw = {k: parse_argument(v) for (k, v) in kw_pairs}
return (args, kw)
diff --git a/swh/scheduler/elasticsearch_memory.py b/swh/scheduler/elasticsearch_memory.py
index da59852..2dd958c 100644
--- a/swh/scheduler/elasticsearch_memory.py
+++ b/swh/scheduler/elasticsearch_memory.py
@@ -1,146 +1,144 @@
# Copyright (C) 2018-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
"""Memory Elastic Search backend
"""
+from ast import literal_eval
import datetime # noqa serialization purposes
import hashlib
import logging
-
-from ast import literal_eval
from typing import Optional
import psycopg2 # noqa serialization purposes
-
logger = logging.getLogger(__name__)
class BasicSerializer:
"""For memory elastic search implementation (not for production)
"""
def __init__(self, *args, **kwargs):
pass
def dumps(self, *args, **kwargs):
return str(*args)
class BasicTransport:
"""For memory elastic search implementation, (not for production)
"""
def __init__(self, *args, **kwargs):
self.serializer = BasicSerializer()
class MemoryElasticsearch:
"""Memory Elasticsearch instance (for test purposes)
Partial implementation oriented towards index storage (and not search)
For now, its sole client is the scheduler for task archival purposes.
"""
def __init__(self, *args, **kwargs):
self.index = {}
self.mapping = {}
self.settings = {}
self.indices = self # HACK
self.main_mapping_key: Optional[str] = None
self.main_settings_key: Optional[str] = None
self.transport = BasicTransport()
def create(self, index, **kwargs):
logger.debug(f"create index {index}")
logger.debug(f"indices: {self.index}")
logger.debug(f"mapping: {self.mapping}")
logger.debug(f"settings: {self.settings}")
self.index[index] = {
"status": "opened",
"data": {},
"mapping": self.get_mapping(self.main_mapping_key),
"settings": self.get_settings(self.main_settings_key),
}
logger.debug(f"index {index} created")
def close(self, index, **kwargs):
"""Close index"""
idx = self.index.get(index)
if idx:
idx["status"] = "closed"
def open(self, index, **kwargs):
"""Open index"""
idx = self.index.get(index)
if idx:
idx["status"] = "opened"
def bulk(self, body, **kwargs):
"""Bulk insert document in index"""
assert isinstance(body, str)
all_data = body.split("\n")
if all_data[-1] == "":
all_data = all_data[:-1] # drop the empty line if any
ids = []
# data is sent as tuple (index, data-to-index)
for i in range(0, len(all_data), 2):
# The first entry is about the index to use
# not about a data to index
# find the index
index_data = literal_eval(all_data[i])
idx_name = index_data["index"]["_index"]
# associated data to index
data = all_data[i + 1]
_id = hashlib.sha1(data.encode("utf-8")).hexdigest()
parsed_data = eval(data) # for datetime
self.index[idx_name]["data"][_id] = parsed_data
ids.append(_id)
# everything is indexed fine
return {"items": [{"index": {"status": 200, "_id": _id,}} for _id in ids]}
def mget(self, *args, body, index, **kwargs):
"""Bulk indexed documents retrieval"""
idx = self.index[index]
docs = []
idx_docs = idx["data"]
for _id in body["ids"]:
doc = idx_docs.get(_id)
if doc:
d = {
"found": True,
"_source": doc,
}
docs.append(d)
return {"docs": docs}
def stats(self, index, **kwargs):
idx = self.index[index] # will raise if it does not exist
if not idx or idx["status"] == "closed":
raise ValueError("Closed index") # simulate issue if index closed
def exists(self, index, **kwargs):
return self.index.get(index) is not None
def put_mapping(self, index, body, **kwargs):
self.mapping[index] = body
self.main_mapping_key = index
def get_mapping(self, index, **kwargs):
return self.mapping.get(index) or self.index.get(index, {}).get("mapping", {})
def put_settings(self, index, body, **kwargs):
self.settings[index] = body
self.main_settings_key = index
def get_settings(self, index, **kwargs):
return self.settings.get(index) or self.index.get(index, {}).get("settings", {})
diff --git a/swh/scheduler/interface.py b/swh/scheduler/interface.py
index 69787ea..0496093 100644
--- a/swh/scheduler/interface.py
+++ b/swh/scheduler/interface.py
@@ -1,312 +1,311 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import Any, Dict, Iterable, List, Optional
from uuid import UUID
from swh.core.api import remote_api_endpoint
-
from swh.scheduler.model import (
ListedOrigin,
ListedOriginPageToken,
Lister,
PaginatedListedOriginList,
)
class SchedulerInterface:
@remote_api_endpoint("task_type/create")
def create_task_type(self, task_type):
"""Create a new task type ready for scheduling.
Args:
task_type (dict): a dictionary with the following keys:
- type (str): an identifier for the task type
- description (str): a human-readable description of what the
task does
- backend_name (str): the name of the task in the
job-scheduling backend
- default_interval (datetime.timedelta): the default interval
between two task runs
- min_interval (datetime.timedelta): the minimum interval
between two task runs
- max_interval (datetime.timedelta): the maximum interval
between two task runs
- backoff_factor (float): the factor by which the interval
changes at each run
- max_queue_length (int): the maximum length of the task queue
for this task type
"""
...
@remote_api_endpoint("task_type/get")
def get_task_type(self, task_type_name):
"""Retrieve the task type with id task_type_name"""
...
@remote_api_endpoint("task_type/get_all")
def get_task_types(self):
"""Retrieve all registered task types"""
...
@remote_api_endpoint("task/create")
def create_tasks(self, tasks, policy="recurring"):
"""Create new tasks.
Args:
tasks (list): each task is a dictionary with the following keys:
- type (str): the task type
- arguments (dict): the arguments for the task runner, keys:
- args (list of str): arguments
- kwargs (dict str -> str): keyword arguments
- next_run (datetime.datetime): the next scheduled run for the
task
Returns:
a list of created tasks.
"""
...
@remote_api_endpoint("task/set_status")
def set_status_tasks(self, task_ids, status="disabled", next_run=None):
"""Set the tasks' status whose ids are listed.
If given, also set the next_run date.
"""
...
@remote_api_endpoint("task/disable")
def disable_tasks(self, task_ids):
"""Disable the tasks whose ids are listed."""
...
@remote_api_endpoint("task/search")
def search_tasks(
self,
task_id=None,
task_type=None,
status=None,
priority=None,
policy=None,
before=None,
after=None,
limit=None,
):
"""Search tasks from selected criterions"""
...
@remote_api_endpoint("task/get")
def get_tasks(self, task_ids):
"""Retrieve the info of tasks whose ids are listed."""
...
@remote_api_endpoint("task/peek_ready")
def peek_ready_tasks(
self, task_type, timestamp=None, num_tasks=None, num_tasks_priority=None,
):
"""Fetch the list of ready tasks
Args:
task_type (str): filtering task per their type
timestamp (datetime.datetime): peek tasks that need to be executed
before that timestamp
num_tasks (int): only peek at num_tasks tasks (with no priority)
num_tasks_priority (int): only peek at num_tasks_priority
tasks (with priority)
Returns:
a list of tasks
"""
...
@remote_api_endpoint("task/grab_ready")
def grab_ready_tasks(
self, task_type, timestamp=None, num_tasks=None, num_tasks_priority=None,
):
"""Fetch the list of ready tasks, and mark them as scheduled
Args:
task_type (str): filtering task per their type
timestamp (datetime.datetime): grab tasks that need to be executed
before that timestamp
num_tasks (int): only grab num_tasks tasks (with no priority)
num_tasks_priority (int): only grab oneshot num_tasks tasks (with
priorities)
Returns:
a list of tasks
"""
...
@remote_api_endpoint("task_run/schedule_one")
def schedule_task_run(self, task_id, backend_id, metadata=None, timestamp=None):
"""Mark a given task as scheduled, adding a task_run entry in the database.
Args:
task_id (int): the identifier for the task being scheduled
backend_id (str): the identifier of the job in the backend
metadata (dict): metadata to add to the task_run entry
timestamp (datetime.datetime): the instant the event occurred
Returns:
a fresh task_run entry
"""
...
@remote_api_endpoint("task_run/schedule")
def mass_schedule_task_runs(self, task_runs):
"""Schedule a bunch of task runs.
Args:
task_runs (list): a list of dicts with keys:
- task (int): the identifier for the task being scheduled
- backend_id (str): the identifier of the job in the backend
- metadata (dict): metadata to add to the task_run entry
- scheduled (datetime.datetime): the instant the event occurred
Returns:
None
"""
...
@remote_api_endpoint("task_run/start")
def start_task_run(self, backend_id, metadata=None, timestamp=None):
"""Mark a given task as started, updating the corresponding task_run
entry in the database.
Args:
backend_id (str): the identifier of the job in the backend
metadata (dict): metadata to add to the task_run entry
timestamp (datetime.datetime): the instant the event occurred
Returns:
the updated task_run entry
"""
...
@remote_api_endpoint("task_run/end")
def end_task_run(
self, backend_id, status, metadata=None, timestamp=None, result=None,
):
"""Mark a given task as ended, updating the corresponding task_run entry in the
database.
Args:
backend_id (str): the identifier of the job in the backend
status (str): how the task ended; one of: 'eventful', 'uneventful',
'failed'
metadata (dict): metadata to add to the task_run entry
timestamp (datetime.datetime): the instant the event occurred
Returns:
the updated task_run entry
"""
...
@remote_api_endpoint("task/filter_for_archive")
def filter_task_to_archive(
self,
after_ts: str,
before_ts: str,
limit: int = 10,
page_token: Optional[str] = None,
) -> Dict[str, Any]:
"""Compute the tasks to archive within the datetime interval
[after_ts, before_ts[. The method returns a paginated result.
Returns:
dict with the following keys:
- **next_page_token**: opaque token to be used as
`page_token` to retrieve the next page of result. If absent,
there is no more pages to gather.
- **tasks**: list of task dictionaries with the following keys:
**id** (str): origin task id
**started** (Optional[datetime]): started date
**scheduled** (datetime): scheduled date
**arguments** (json dict): task's arguments
...
"""
...
@remote_api_endpoint("task/delete_archived")
def delete_archived_tasks(self, task_ids):
"""Delete archived tasks as much as possible. Only the task_ids whose
complete associated task_run have been cleaned up will be.
"""
...
@remote_api_endpoint("task_run/get")
def get_task_runs(self, task_ids, limit=None):
"""Search task run for a task id"""
...
@remote_api_endpoint("lister/get_or_create")
def get_or_create_lister(
self, name: str, instance_name: Optional[str] = None
) -> Lister:
"""Retrieve information about the given instance of the lister from the
database, or create the entry if it did not exist.
"""
...
@remote_api_endpoint("lister/update")
def update_lister(self, lister: Lister) -> Lister:
"""Update the state for the given lister instance in the database.
Returns:
a new Lister object, with all fields updated from the database
Raises:
StaleData if the `updated` timestamp for the lister instance in
database doesn't match the one passed by the user.
"""
...
@remote_api_endpoint("origins/record")
def record_listed_origins(
self, listed_origins: Iterable[ListedOrigin]
) -> List[ListedOrigin]:
"""Record a set of origins that a lister has listed.
This performs an "upsert": origins with the same (lister_id, url,
visit_type) values are updated with new values for
extra_loader_arguments, last_update and last_seen.
"""
...
@remote_api_endpoint("origins/get")
def get_listed_origins(
self,
lister_id: Optional[UUID] = None,
url: Optional[str] = None,
limit: int = 1000,
page_token: Optional[ListedOriginPageToken] = None,
) -> PaginatedListedOriginList:
"""Get information on the listed origins matching either the `url` or
`lister_id`, or both arguments.
Use the `limit` and `page_token` arguments for continuation. The next
page token, if any, is returned in the PaginatedListedOriginList object.
"""
...
@remote_api_endpoint("priority_ratios/get")
def get_priority_ratios(self):
...
diff --git a/swh/scheduler/model.py b/swh/scheduler/model.py
index 904aaf9..e3827ff 100644
--- a/swh/scheduler/model.py
+++ b/swh/scheduler/model.py
@@ -1,193 +1,193 @@
# Copyright (C) 2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
-from uuid import UUID
from typing import Any, Dict, List, Optional, Tuple, Union
+from uuid import UUID
import attr
import attr.converters
from attrs_strict import type_validator
@attr.s
class BaseSchedulerModel:
"""Base class for database-backed objects.
These database-backed objects are defined through attrs-based attributes
that match the columns of the database 1:1. This is a (very) lightweight
ORM.
These attrs-based attributes have metadata specific to the functionality
expected from these fields in the database:
- `primary_key`: the column is a primary key; it should be filtered out
when doing an `update` of the object
- `auto_primary_key`: the column is a primary key, which is automatically handled
by the database. It will not be inserted to. This must be matched with a
database-side default value.
- `auto_now_add`: the column is a timestamp that is set to the current time when
the object is inserted, and never updated afterwards. This must be matched with
a database-side default value.
- `auto_now`: the column is a timestamp that is set to the current time when
the object is inserted or updated.
"""
_pk_cols: Optional[Tuple[str, ...]] = None
_select_cols: Optional[Tuple[str, ...]] = None
_insert_cols_and_metavars: Optional[Tuple[Tuple[str, ...], Tuple[str, ...]]] = None
@classmethod
def primary_key_columns(cls) -> Tuple[str, ...]:
"""Get the primary key columns for this object type"""
if cls._pk_cols is None:
columns: List[str] = []
for field in attr.fields(cls):
if any(
field.metadata.get(flag)
for flag in ("auto_primary_key", "primary_key")
):
columns.append(field.name)
cls._pk_cols = tuple(sorted(columns))
return cls._pk_cols
@classmethod
def select_columns(cls) -> Tuple[str, ...]:
"""Get all the database columns needed for a `select` on this object type"""
if cls._select_cols is None:
columns: List[str] = []
for field in attr.fields(cls):
columns.append(field.name)
cls._select_cols = tuple(sorted(columns))
return cls._select_cols
@classmethod
def insert_columns_and_metavars(cls) -> Tuple[Tuple[str, ...], Tuple[str, ...]]:
"""Get the database columns and metavars needed for an `insert` or `update` on
this object type.
This implements support for the `auto_*` field metadata attributes.
"""
if cls._insert_cols_and_metavars is None:
zipped_cols_and_metavars: List[Tuple[str, str]] = []
for field in attr.fields(cls):
if any(
field.metadata.get(flag)
for flag in ("auto_now_add", "auto_primary_key")
):
continue
elif field.metadata.get("auto_now"):
zipped_cols_and_metavars.append((field.name, "now()"))
else:
zipped_cols_and_metavars.append((field.name, f"%({field.name})s"))
zipped_cols_and_metavars.sort()
cols, metavars = zip(*zipped_cols_and_metavars)
cls._insert_cols_and_metavars = cols, metavars
return cls._insert_cols_and_metavars
@attr.s
class Lister(BaseSchedulerModel):
name = attr.ib(type=str, validator=[type_validator()])
instance_name = attr.ib(type=str, validator=[type_validator()])
# Populated by database
id = attr.ib(
type=Optional[UUID],
validator=type_validator(),
default=None,
metadata={"auto_primary_key": True},
)
current_state = attr.ib(
type=Dict[str, Any], validator=[type_validator()], factory=dict
)
created = attr.ib(
type=Optional[datetime.datetime],
validator=[type_validator()],
default=None,
metadata={"auto_now_add": True},
)
updated = attr.ib(
type=Optional[datetime.datetime],
validator=[type_validator()],
default=None,
metadata={"auto_now": True},
)
@attr.s
class ListedOrigin(BaseSchedulerModel):
"""Basic information about a listed origin, output by a lister"""
lister_id = attr.ib(
type=UUID, validator=[type_validator()], metadata={"primary_key": True}
)
url = attr.ib(
type=str, validator=[type_validator()], metadata={"primary_key": True}
)
visit_type = attr.ib(
type=str, validator=[type_validator()], metadata={"primary_key": True}
)
extra_loader_arguments = attr.ib(
type=Dict[str, str], validator=[type_validator()], factory=dict
)
last_update = attr.ib(
type=Optional[datetime.datetime], validator=[type_validator()], default=None,
)
enabled = attr.ib(type=bool, validator=[type_validator()], default=True)
first_seen = attr.ib(
type=Optional[datetime.datetime],
validator=[type_validator()],
default=None,
metadata={"auto_now_add": True},
)
last_seen = attr.ib(
type=Optional[datetime.datetime],
validator=[type_validator()],
default=None,
metadata={"auto_now": True},
)
ListedOriginPageToken = Tuple[UUID, str]
def convert_listed_origin_page_token(
input: Union[None, ListedOriginPageToken, List[Union[UUID, str]]]
) -> Optional[ListedOriginPageToken]:
if input is None:
return None
if isinstance(input, tuple):
return input
x, y = input
assert isinstance(x, UUID)
assert isinstance(y, str)
return (x, y)
@attr.s
class PaginatedListedOriginList(BaseSchedulerModel):
"""A list of listed origins, with a continuation token"""
origins = attr.ib(type=List[ListedOrigin], validator=[type_validator()])
next_page_token = attr.ib(
type=Optional[ListedOriginPageToken],
validator=[type_validator()],
converter=convert_listed_origin_page_token,
default=None,
)
diff --git a/swh/scheduler/pytest_plugin.py b/swh/scheduler/pytest_plugin.py
index 723bc3a..a7ae36c 100644
--- a/swh/scheduler/pytest_plugin.py
+++ b/swh/scheduler/pytest_plugin.py
@@ -1,111 +1,108 @@
# Copyright (C) 2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from datetime import timedelta
import glob
import os
from celery.contrib.testing import worker
from celery.contrib.testing.app import TestApp, setup_default_app
-
import pkg_resources
import pytest
from swh.core.utils import numfile_sortkey as sortkey
-
import swh.scheduler
from swh.scheduler import get_scheduler
-
SQL_DIR = os.path.join(os.path.dirname(swh.scheduler.__file__), "sql")
DUMP_FILES = os.path.join(SQL_DIR, "*.sql")
# celery tasks for testing purpose; tasks themselves should be
# in swh/scheduler/tests/tasks.py
TASK_NAMES = ["ping", "multiping", "add", "error", "echo"]
@pytest.fixture
def swh_scheduler_config(request, postgresql):
scheduler_config = {
"db": postgresql.dsn,
}
all_dump_files = sorted(glob.glob(DUMP_FILES), key=sortkey)
cursor = postgresql.cursor()
for fname in all_dump_files:
with open(fname) as fobj:
cursor.execute(fobj.read())
postgresql.commit()
return scheduler_config
@pytest.fixture
def swh_scheduler(swh_scheduler_config):
scheduler = get_scheduler("local", swh_scheduler_config)
for taskname in TASK_NAMES:
scheduler.create_task_type(
{
"type": "swh-test-{}".format(taskname),
"description": "The {} testing task".format(taskname),
"backend_name": "swh.scheduler.tests.tasks.{}".format(taskname),
"default_interval": timedelta(days=1),
"min_interval": timedelta(hours=6),
"max_interval": timedelta(days=12),
}
)
return scheduler
# this alias is used to be able to easily instantiate a db-backed Scheduler
# eg. for the RPC client/server test suite.
swh_db_scheduler = swh_scheduler
@pytest.fixture(scope="session")
def swh_scheduler_celery_app():
"""Set up a Celery app as swh.scheduler and swh worker tests would expect it"""
test_app = TestApp(
set_as_current=True,
enable_logging=True,
task_cls="swh.scheduler.task:SWHTask",
config={
"accept_content": ["application/x-msgpack", "application/json"],
"task_serializer": "msgpack",
"result_serializer": "json",
},
)
with setup_default_app(test_app, use_trap=False):
from swh.scheduler.celery_backend import config
config.app = test_app
test_app.set_default()
test_app.set_current()
yield test_app
@pytest.fixture(scope="session")
def swh_scheduler_celery_includes():
"""List of task modules that should be loaded by the swh_scheduler_celery_worker on
startup."""
task_modules = ["swh.scheduler.tests.tasks"]
for entrypoint in pkg_resources.iter_entry_points("swh.workers"):
task_modules.extend(entrypoint.load()().get("task_modules", []))
return task_modules
@pytest.fixture(scope="session")
def swh_scheduler_celery_worker(
swh_scheduler_celery_app, swh_scheduler_celery_includes,
):
"""Spawn a worker"""
for module in swh_scheduler_celery_includes:
swh_scheduler_celery_app.loader.import_task_module(module)
with worker.start_worker(swh_scheduler_celery_app, pool="solo") as w:
yield w
diff --git a/swh/scheduler/tests/common.py b/swh/scheduler/tests/common.py
index 9c2c463..13c38d3 100644
--- a/swh/scheduler/tests/common.py
+++ b/swh/scheduler/tests/common.py
@@ -1,104 +1,103 @@
# Copyright (C) 2017-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import copy
import datetime
-
TEMPLATES = {
"git": {
"type": "update-git",
"arguments": {"args": [], "kwargs": {},},
"next_run": None,
},
"hg": {
"type": "update-hg",
"arguments": {"args": [], "kwargs": {},},
"next_run": None,
"policy": "oneshot",
},
}
TASK_TYPES = {
"git": {
"type": "update-git",
"description": "Update a git repository",
"backend_name": "swh.loader.git.tasks.UpdateGitRepository",
"default_interval": datetime.timedelta(days=64),
"min_interval": datetime.timedelta(hours=12),
"max_interval": datetime.timedelta(days=64),
"backoff_factor": 2,
"max_queue_length": None,
"num_retries": 7,
"retry_delay": datetime.timedelta(hours=2),
},
"hg": {
"type": "update-hg",
"description": "Update a mercurial repository",
"backend_name": "swh.loader.mercurial.tasks.UpdateHgRepository",
"default_interval": datetime.timedelta(days=64),
"min_interval": datetime.timedelta(hours=12),
"max_interval": datetime.timedelta(days=64),
"backoff_factor": 2,
"max_queue_length": None,
"num_retries": 7,
"retry_delay": datetime.timedelta(hours=2),
},
}
def tasks_from_template(template, max_timestamp, num, num_priority=0, priorities=None):
"""Build tasks from template
"""
def _task_from_template(template, next_run, priority, *args, **kwargs):
ret = copy.deepcopy(template)
ret["next_run"] = next_run
if priority:
ret["priority"] = priority
if args:
ret["arguments"]["args"] = list(args)
if kwargs:
ret["arguments"]["kwargs"] = kwargs
return ret
def _pop_priority(priorities):
if not priorities:
return None
for priority, remains in priorities.items():
if remains > 0:
priorities[priority] = remains - 1
return priority
return None
if num_priority and priorities:
priorities = {
priority: ratio * num_priority for priority, ratio in priorities.items()
}
tasks = []
for i in range(num + num_priority):
priority = _pop_priority(priorities)
tasks.append(
_task_from_template(
template,
max_timestamp - datetime.timedelta(microseconds=i),
priority,
"argument-%03d" % i,
**{"kwarg%03d" % i: "bogus-kwarg"},
)
)
return tasks
LISTERS = (
{"name": "github"},
{"name": "gitlab", "instance_name": "gitlab"},
{"name": "gitlab", "instance_name": "freedesktop"},
{"name": "npm"},
{"name": "pypi"},
)
diff --git a/swh/scheduler/tests/conftest.py b/swh/scheduler/tests/conftest.py
index d187c42..91e92bc 100644
--- a/swh/scheduler/tests/conftest.py
+++ b/swh/scheduler/tests/conftest.py
@@ -1,42 +1,41 @@
# Copyright (C) 2016-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
-import os
from datetime import datetime, timezone
+import os
from typing import List
import pytest
from swh.scheduler.model import ListedOrigin, Lister
from swh.scheduler.tests.common import LISTERS
-
# make sure we are not fooled by CELERY_ config environment vars
for var in [x for x in os.environ.keys() if x.startswith("CELERY")]:
os.environ.pop(var)
# test_cli tests depends on a en/C locale, so ensure it
os.environ["LC_ALL"] = "C.UTF-8"
@pytest.fixture
def stored_lister(swh_scheduler) -> Lister:
"""Store a lister in the scheduler and return its information"""
return swh_scheduler.get_or_create_lister(**LISTERS[0])
@pytest.fixture
def listed_origins(stored_lister) -> List[ListedOrigin]:
"""Return a (fixed) set of 1000 listed origins"""
return [
ListedOrigin(
lister_id=stored_lister.id,
url=f"https://example.com/{i:04d}.git",
visit_type="git",
last_update=datetime(2020, 6, 15, 16, 0, 0, i, tzinfo=timezone.utc),
)
for i in range(1000)
]
diff --git a/swh/scheduler/tests/es/conftest.py b/swh/scheduler/tests/es/conftest.py
index 8d86522..6b3028d 100644
--- a/swh/scheduler/tests/es/conftest.py
+++ b/swh/scheduler/tests/es/conftest.py
@@ -1,49 +1,48 @@
# Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
-import yaml
-
import pytest
+import yaml
from swh.scheduler import get_scheduler
@pytest.fixture
def swh_sched_config(swh_scheduler_config):
return {
"scheduler": {"cls": "local", "args": swh_scheduler_config,},
"elasticsearch": {
"cls": "memory",
"args": {"index_name_prefix": "swh-tasks",},
},
}
@pytest.fixture
def swh_sched_config_file(swh_sched_config, monkeypatch, tmp_path):
conffile = str(tmp_path / "elastic.yml")
with open(conffile, "w") as f:
f.write(yaml.dump(swh_sched_config))
monkeypatch.setenv("SWH_CONFIG_FILENAME", conffile)
return conffile
@pytest.fixture
def swh_sched(swh_sched_config):
return get_scheduler(**swh_sched_config["scheduler"])
@pytest.fixture
def swh_elasticsearch_backend(swh_sched_config):
from swh.scheduler.backend_es import ElasticSearchBackend
backend = ElasticSearchBackend(**swh_sched_config)
backend.initialize()
return backend
@pytest.fixture
def swh_elasticsearch_memory(swh_elasticsearch_backend):
return swh_elasticsearch_backend.storage
diff --git a/swh/scheduler/tests/es/test_backend_es.py b/swh/scheduler/tests/es/test_backend_es.py
index 6b332ad..1c7e2c0 100644
--- a/swh/scheduler/tests/es/test_backend_es.py
+++ b/swh/scheduler/tests/es/test_backend_es.py
@@ -1,80 +1,79 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
-import pytest
-
import elasticsearch
+import pytest
from swh.scheduler.backend_es import get_elasticsearch
-from ..common import tasks_from_template, TEMPLATES
+from ..common import TEMPLATES, tasks_from_template
def test_get_elasticsearch():
with pytest.raises(ValueError, match="Unknown elasticsearch class"):
get_elasticsearch("unknown")
es = get_elasticsearch("memory")
assert es
from swh.scheduler.elasticsearch_memory import MemoryElasticsearch
assert isinstance(es, MemoryElasticsearch)
es = get_elasticsearch("local")
assert es
assert isinstance(es, elasticsearch.Elasticsearch)
def test_backend_setup_basic(swh_elasticsearch_backend):
"""Elastic search instance should allow to create/close/check index
"""
index_name = "swh-tasks-2010-01"
try:
swh_elasticsearch_backend.storage.indices.get_mapping(index_name)
except (elasticsearch.exceptions.NotFoundError, KeyError):
pass
assert not swh_elasticsearch_backend.storage.indices.exists(index_name)
swh_elasticsearch_backend.create(index_name)
assert swh_elasticsearch_backend.storage.indices.exists(index_name)
assert swh_elasticsearch_backend.is_index_opened(index_name)
# index exists with a mapping
mapping = swh_elasticsearch_backend.storage.indices.get_mapping(index_name)
assert mapping != {}
def test_backend_setup_index(swh_elasticsearch_backend):
"""Elastic search instance should allow to bulk index
"""
template_git = TEMPLATES["git"]
next_run_date = datetime.datetime.utcnow() - datetime.timedelta(days=1)
tasks = tasks_from_template(template_git, next_run_date, 1)
index_name = swh_elasticsearch_backend.compute_index_name(
next_run_date.year, next_run_date.month
)
assert not swh_elasticsearch_backend.storage.indices.exists(index_name)
tasks = list(swh_elasticsearch_backend.streaming_bulk(index_name, tasks))
assert len(tasks) > 0
for output_task in tasks:
assert output_task is not None
assert output_task["type"] == template_git["type"]
assert output_task["arguments"] is not None
next_run = output_task["next_run"]
if isinstance(next_run, str): # real elasticsearch
assert next_run == next_run_date.isoformat()
else: # memory implem. does not really index
assert next_run == next_run_date
assert swh_elasticsearch_backend.storage.indices.exists(index_name)
assert swh_elasticsearch_backend.is_index_opened(index_name)
mapping = swh_elasticsearch_backend.storage.indices.get_mapping(index_name)
assert mapping != {}
diff --git a/swh/scheduler/tests/es/test_cli_task.py b/swh/scheduler/tests/es/test_cli_task.py
index ff8ba46..d0182fd 100644
--- a/swh/scheduler/tests/es/test_cli_task.py
+++ b/swh/scheduler/tests/es/test_cli_task.py
@@ -1,114 +1,111 @@
# Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
-import arrow
import datetime
import logging
-import uuid
import random
+import uuid
-import pytest
-
+import arrow
from click.testing import CliRunner
+import pytest
from swh.scheduler.cli import cli
-
-from ..common import tasks_from_template, TASK_TYPES, TEMPLATES
-
+from ..common import TASK_TYPES, TEMPLATES, tasks_from_template
logger = logging.getLogger(__name__)
@pytest.mark.usefixtures("swh_elasticsearch_backend")
def test_cli_archive_tasks(swh_sched, swh_sched_config_file):
scheduler = swh_sched
template_git = TEMPLATES["git"]
template_hg = TEMPLATES["hg"]
# first initialize scheduler's db (is this still needed?)
for tt in TASK_TYPES.values():
scheduler.create_task_type(tt)
next_run_start = arrow.utcnow().datetime - datetime.timedelta(days=1)
recurring = tasks_from_template(template_git, next_run_start, 100)
oneshots = tasks_from_template(
template_hg, next_run_start - datetime.timedelta(days=1), 50
)
past_time = next_run_start - datetime.timedelta(days=7)
all_tasks = recurring + oneshots
result = scheduler.create_tasks(all_tasks)
assert len(result) == len(all_tasks)
# simulate task run
backend_tasks = [
{
"task": task["id"],
"backend_id": str(uuid.uuid4()),
"scheduled": next_run_start - datetime.timedelta(minutes=i % 60),
}
for i, task in enumerate(result)
]
scheduler.mass_schedule_task_runs(backend_tasks)
# Disable some tasks
tasks_to_disable = set()
for task in result:
status = random.choice(["disabled", "completed"])
if status == "disabled":
tasks_to_disable.add(task["id"])
scheduler.disable_tasks(tasks_to_disable)
git_tasks = scheduler.search_tasks(task_type=template_git["type"])
hg_tasks = scheduler.search_tasks(task_type=template_hg["type"])
assert len(git_tasks) + len(hg_tasks) == len(all_tasks)
# Ensure the task_run are in expected state
task_runs = scheduler.get_task_runs([t["id"] for t in git_tasks + hg_tasks])
# Same for the tasks
for t in git_tasks + hg_tasks:
if t["id"] in tasks_to_disable:
assert t["status"] == "disabled"
future_time = next_run_start + datetime.timedelta(days=1)
for tr in task_runs:
assert past_time <= tr["scheduled"]
assert tr["scheduled"] < future_time
runner = CliRunner()
result = runner.invoke(
cli,
[
"--config-file",
swh_sched_config_file,
"task",
"archive",
"--after",
past_time.isoformat(),
"--before",
future_time.isoformat(),
"--cleanup",
],
obj={"log_level": logging.DEBUG,},
)
assert result.exit_code == 0, result.output
# disabled tasks should no longer be in the scheduler
git_tasks = scheduler.search_tasks(task_type=template_git["type"])
hg_tasks = scheduler.search_tasks(task_type=template_hg["type"])
remaining_tasks = git_tasks + hg_tasks
count_disabled = 0
for task in remaining_tasks:
logger.debug(f"task status: {task['status']}")
if task["status"] == "disabled":
count_disabled += 1
assert count_disabled == 0
assert len(remaining_tasks) == len(all_tasks) - len(tasks_to_disable)
diff --git a/swh/scheduler/tests/es/test_elasticsearch_memory.py b/swh/scheduler/tests/es/test_elasticsearch_memory.py
index ada5ef0..5c79622 100644
--- a/swh/scheduler/tests/es/test_elasticsearch_memory.py
+++ b/swh/scheduler/tests/es/test_elasticsearch_memory.py
@@ -1,151 +1,150 @@
# Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
import hashlib
import logging
import random
+from typing import Any, Dict
import pytest
from swh.scheduler.elasticsearch_memory import BasicSerializer, BasicTransport
-from ..common import tasks_from_template, TEMPLATES
-from typing import Any, Dict
-
+from ..common import TEMPLATES, tasks_from_template
logger = logging.getLogger(__name__)
def test_serializer():
s = BasicSerializer()
assert s
data = {"something": [1, 2, 3], "cool": {"1": "2"}}
actual_data = s.dumps(data)
assert isinstance(actual_data, str)
assert actual_data == str(data)
def test_basic_transport():
b = BasicTransport()
assert b
assert isinstance(b.serializer, BasicSerializer)
def test_index_manipulation(swh_elasticsearch_memory):
index_name = "swh-tasks-xxxx"
indices = swh_elasticsearch_memory.index
assert not swh_elasticsearch_memory.exists(index_name)
assert index_name not in indices
# so stat raises
with pytest.raises(Exception):
swh_elasticsearch_memory.stats(index_name)
# we create the index
swh_elasticsearch_memory.create(index_name)
# now the index exists
assert swh_elasticsearch_memory.exists(index_name)
assert index_name in indices
# it's opened
assert indices[index_name]["status"] == "opened"
# so stats is happy
swh_elasticsearch_memory.stats(index_name)
# open the index, nothing changes
swh_elasticsearch_memory.open(index_name)
assert indices[index_name]["status"] == "opened"
# close the index
swh_elasticsearch_memory.close(index_name)
assert indices[index_name]["status"] == "closed"
# reopen the index (fun times)
swh_elasticsearch_memory.open(index_name)
assert indices[index_name]["status"] == "opened"
def test_bulk_and_mget(swh_elasticsearch_memory):
# initialize tasks
template_git = TEMPLATES["git"]
next_run_start = datetime.datetime.utcnow() - datetime.timedelta(days=1)
tasks = tasks_from_template(template_git, next_run_start, 100)
def compute_id(stask):
return hashlib.sha1(stask.encode("utf-8")).hexdigest()
body = []
ids_to_task = {}
for task in tasks:
date = task["next_run"]
index_name = f"swh-tasks-{date.year}-{date.month}"
idx = {"index": {"_index": index_name}}
sidx = swh_elasticsearch_memory.transport.serializer.dumps(idx)
body.append(sidx)
stask = swh_elasticsearch_memory.transport.serializer.dumps(task)
body.append(stask)
_id = compute_id(stask)
ids_to_task[_id] = task
logger.debug(f"_id: {_id}, task: {task}")
# store
# create the index first
swh_elasticsearch_memory.create(index_name)
# then bulk insert new data
result = swh_elasticsearch_memory.bulk("\n".join(body))
# no guarantee in the order
assert result
actual_items = result["items"]
assert len(actual_items) == len(ids_to_task)
def get_id(data: Dict[str, Any]) -> str:
return data["index"]["_id"]
actual_items = sorted(actual_items, key=get_id)
expected_items = {
"items": [{"index": {"status": 200, "_id": _id}} for _id in list(ids_to_task)]
}
expected_items = sorted(expected_items["items"], key=get_id)
assert actual_items == expected_items
# retrieve
nb_docs = 10
ids = list(ids_to_task)
random_ids = []
# add some inexistent ids
for i in range(16):
noisy_id = f"{i}" * 40
random_ids.append(noisy_id)
random_ids.extend(random.sample(ids, nb_docs)) # add relevant ids
for i in range(16, 32):
noisy_id = f"{i}" * 40
random_ids.append(noisy_id)
result = swh_elasticsearch_memory.mget(index=index_name, body={"ids": random_ids})
assert result["docs"]
assert len(result["docs"]) == nb_docs, "no random and inexistent id found"
for doc in result["docs"]:
assert doc["found"]
actual_task = doc["_source"]
_id = compute_id(str(actual_task))
expected_task = ids_to_task[_id]
assert actual_task == expected_task
diff --git a/swh/scheduler/tests/test_api_client.py b/swh/scheduler/tests/test_api_client.py
index d931ac0..eae6b0c 100644
--- a/swh/scheduler/tests/test_api_client.py
+++ b/swh/scheduler/tests/test_api_client.py
@@ -1,75 +1,75 @@
# Copyright (C) 2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
-import pytest
from flask import url_for
+import pytest
-import swh.scheduler.api.server as server
from swh.scheduler.api.client import RemoteScheduler
+import swh.scheduler.api.server as server
from swh.scheduler.tests.test_scheduler import TestScheduler # noqa
# tests are executed using imported class (TestScheduler) using overloaded
# swh_scheduler fixture below
# the Flask app used as server in these tests
@pytest.fixture
def app(swh_db_scheduler):
assert hasattr(server, "scheduler")
server.scheduler = swh_db_scheduler
yield server.app
# the RPCClient class used as client used in these tests
@pytest.fixture
def swh_rpc_client_class():
return RemoteScheduler
@pytest.fixture
def swh_scheduler(swh_rpc_client, app):
yield swh_rpc_client
def test_site_map(flask_app_client):
sitemap = flask_app_client.get(url_for("site_map"))
assert sitemap.headers["Content-Type"] == "application/json"
rules = set(x["rule"] for x in sitemap.json)
# we expect at least these rules
expected_rules = set(
"/" + rule
for rule in (
"lister/get_or_create",
"lister/update",
"origins/get",
"origins/record",
"priority_ratios/get",
"task/create",
"task/delete_archived",
"task/disable",
"task/filter_for_archive",
"task/get",
"task/grab_ready",
"task/peek_ready",
"task/search",
"task/set_status",
"task_run/end",
"task_run/get",
"task_run/schedule",
"task_run/schedule_one",
"task_run/start",
"task_type/create",
"task_type/get",
"task_type/get_all",
)
)
assert rules == expected_rules
def test_root(flask_app_client):
root = flask_app_client.get("/")
assert root.status_code == 200
assert b"Software Heritage scheduler RPC server" in root.data
diff --git a/swh/scheduler/tests/test_celery_tasks.py b/swh/scheduler/tests/test_celery_tasks.py
index b61cea1..ab8be43 100644
--- a/swh/scheduler/tests/test_celery_tasks.py
+++ b/swh/scheduler/tests/test_celery_tasks.py
@@ -1,176 +1,174 @@
-from time import sleep
from itertools import count
+from time import sleep
-from celery.result import GroupResult
-from celery.result import AsyncResult
-
+from celery.result import AsyncResult, GroupResult
import pytest
-from swh.scheduler.utils import create_task_dict
from swh.scheduler.celery_backend.runner import run_ready_tasks
+from swh.scheduler.utils import create_task_dict
def test_ping(swh_scheduler_celery_app, swh_scheduler_celery_worker):
res = swh_scheduler_celery_app.send_task("swh.scheduler.tests.tasks.ping")
assert res
res.wait()
assert res.successful()
assert res.result == "OK"
def test_ping_with_kw(swh_scheduler_celery_app, swh_scheduler_celery_worker):
res = swh_scheduler_celery_app.send_task(
"swh.scheduler.tests.tasks.ping", kwargs={"a": 1}
)
assert res
res.wait()
assert res.successful()
assert res.result == "OK (kw={'a': 1})"
def test_multiping(swh_scheduler_celery_app, swh_scheduler_celery_worker):
"Test that a task that spawns subtasks (group) works"
res = swh_scheduler_celery_app.send_task(
"swh.scheduler.tests.tasks.multiping", kwargs={"n": 5}
)
assert res
res.wait()
assert res.successful()
# retrieve the GroupResult for this task and wait for all the subtasks
# to complete
promise_id = res.result
assert promise_id
promise = GroupResult.restore(promise_id, app=swh_scheduler_celery_app)
for i in range(5):
if promise.ready():
break
sleep(1)
results = [x.get() for x in promise.results]
assert len(results) == 5
for i in range(5):
assert ("OK (kw={'i': %s})" % i) in results
def test_scheduler_fixture(
swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_scheduler
):
"Test that the scheduler fixture works properly"
task_type = swh_scheduler.get_task_type("swh-test-ping")
assert task_type
assert task_type["backend_name"] == "swh.scheduler.tests.tasks.ping"
swh_scheduler.create_tasks([create_task_dict("swh-test-ping", "oneshot")])
backend_tasks = run_ready_tasks(swh_scheduler, swh_scheduler_celery_app)
assert backend_tasks
for task in backend_tasks:
# Make sure the task completed
AsyncResult(id=task["backend_id"]).get()
def test_task_return_value(
swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_scheduler
):
task_type = swh_scheduler.get_task_type("swh-test-add")
assert task_type
assert task_type["backend_name"] == "swh.scheduler.tests.tasks.add"
swh_scheduler.create_tasks([create_task_dict("swh-test-add", "oneshot", 12, 30)])
backend_tasks = run_ready_tasks(swh_scheduler, swh_scheduler_celery_app)
assert len(backend_tasks) == 1
task = backend_tasks[0]
value = AsyncResult(id=task["backend_id"]).get()
assert value == 42
def test_task_exception(
swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_scheduler
):
task_type = swh_scheduler.get_task_type("swh-test-error")
assert task_type
assert task_type["backend_name"] == "swh.scheduler.tests.tasks.error"
swh_scheduler.create_tasks([create_task_dict("swh-test-error", "oneshot")])
backend_tasks = run_ready_tasks(swh_scheduler, swh_scheduler_celery_app)
assert len(backend_tasks) == 1
task = backend_tasks[0]
result = AsyncResult(id=task["backend_id"])
with pytest.raises(NotImplementedError):
result.get()
def test_statsd(swh_scheduler_celery_app, swh_scheduler_celery_worker, mocker):
m = mocker.patch("swh.scheduler.task.Statsd._send_to_server")
mocker.patch("swh.scheduler.task.ts", side_effect=count())
mocker.patch("swh.core.statsd.monotonic", side_effect=count())
res = swh_scheduler_celery_app.send_task("swh.scheduler.tests.tasks.echo")
assert res
res.wait()
assert res.successful()
assert res.result == {}
m.assert_any_call(
"swh_task_called_count:1|c|"
"#task:swh.scheduler.tests.tasks.echo,worker:unknown worker"
)
m.assert_any_call(
"swh_task_start_ts:0|g|"
"#task:swh.scheduler.tests.tasks.echo,worker:unknown worker"
)
m.assert_any_call(
"swh_task_end_ts:1|g|"
"#status:uneventful,task:swh.scheduler.tests.tasks.echo,"
"worker:unknown worker"
)
m.assert_any_call(
"swh_task_duration_seconds:1000|ms|"
"#task:swh.scheduler.tests.tasks.echo,worker:unknown worker"
)
m.assert_any_call(
"swh_task_success_count:1|c|"
"#task:swh.scheduler.tests.tasks.echo,worker:unknown worker"
)
def test_statsd_with_status(
swh_scheduler_celery_app, swh_scheduler_celery_worker, mocker
):
m = mocker.patch("swh.scheduler.task.Statsd._send_to_server")
mocker.patch("swh.scheduler.task.ts", side_effect=count())
mocker.patch("swh.core.statsd.monotonic", side_effect=count())
res = swh_scheduler_celery_app.send_task(
"swh.scheduler.tests.tasks.echo", kwargs={"status": "eventful"}
)
assert res
res.wait()
assert res.successful()
assert res.result == {"status": "eventful"}
m.assert_any_call(
"swh_task_called_count:1|c|"
"#task:swh.scheduler.tests.tasks.echo,worker:unknown worker"
)
m.assert_any_call(
"swh_task_start_ts:0|g|"
"#task:swh.scheduler.tests.tasks.echo,worker:unknown worker"
)
m.assert_any_call(
"swh_task_end_ts:1|g|"
"#status:eventful,task:swh.scheduler.tests.tasks.echo,"
"worker:unknown worker"
)
m.assert_any_call(
"swh_task_duration_seconds:1000|ms|"
"#task:swh.scheduler.tests.tasks.echo,worker:unknown worker"
)
m.assert_any_call(
"swh_task_success_count:1|c|"
"#task:swh.scheduler.tests.tasks.echo,worker:unknown worker"
)
diff --git a/swh/scheduler/tests/test_cli.py b/swh/scheduler/tests/test_cli.py
index d124dfa..4619119 100644
--- a/swh/scheduler/tests/test_cli.py
+++ b/swh/scheduler/tests/test_cli.py
@@ -1,797 +1,795 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
+from itertools import islice
+import logging
import re
import tempfile
-import logging
-
-from itertools import islice
-
from unittest.mock import patch
-from click.testing import CliRunner
+from click.testing import CliRunner
import pytest
+from swh.core.api.classes import stream_results
from swh.model.model import Origin
-from swh.storage import get_storage
from swh.scheduler.cli import cli
from swh.scheduler.utils import create_task_dict
-from swh.core.api.classes import stream_results
+from swh.storage import get_storage
CLI_CONFIG = """
scheduler:
cls: foo
args: {}
"""
def invoke(scheduler, catch_exceptions, args):
runner = CliRunner()
with patch(
"swh.scheduler.get_scheduler"
) as get_scheduler_mock, tempfile.NamedTemporaryFile(
"a", suffix=".yml"
) as config_fd:
config_fd.write(CLI_CONFIG)
config_fd.seek(0)
get_scheduler_mock.return_value = scheduler
args = ["-C" + config_fd.name,] + args
result = runner.invoke(cli, args, obj={"log_level": logging.WARNING})
if not catch_exceptions and result.exception:
print(result.output)
raise result.exception
return result
def test_schedule_tasks(swh_scheduler):
csv_data = (
b'swh-test-ping;[["arg1", "arg2"]];{"key": "value"};'
+ datetime.datetime.utcnow().isoformat().encode()
+ b"\n"
+ b'swh-test-ping;[["arg3", "arg4"]];{"key": "value"};'
+ datetime.datetime.utcnow().isoformat().encode()
+ b"\n"
)
with tempfile.NamedTemporaryFile(suffix=".csv") as csv_fd:
csv_fd.write(csv_data)
csv_fd.seek(0)
result = invoke(
swh_scheduler, False, ["task", "schedule", "-d", ";", csv_fd.name]
)
expected = r"""
Created 2 tasks
Task 1
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: recurring
Args:
\['arg1', 'arg2'\]
Keyword args:
key: 'value'
Task 2
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: recurring
Args:
\['arg3', 'arg4'\]
Keyword args:
key: 'value'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_schedule_tasks_columns(swh_scheduler):
with tempfile.NamedTemporaryFile(suffix=".csv") as csv_fd:
csv_fd.write(b'swh-test-ping;oneshot;["arg1", "arg2"];{"key": "value"}\n')
csv_fd.seek(0)
result = invoke(
swh_scheduler,
False,
[
"task",
"schedule",
"-c",
"type",
"-c",
"policy",
"-c",
"args",
"-c",
"kwargs",
"-d",
";",
csv_fd.name,
],
)
expected = r"""
Created 1 tasks
Task 1
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Args:
'arg1'
'arg2'
Keyword args:
key: 'value'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_schedule_task(swh_scheduler):
result = invoke(
swh_scheduler,
False,
["task", "add", "swh-test-ping", "arg1", "arg2", "key=value",],
)
expected = r"""
Created 1 tasks
Task 1
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: recurring
Args:
'arg1'
'arg2'
Keyword args:
key: 'value'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_list_pending_tasks_none(swh_scheduler):
result = invoke(swh_scheduler, False, ["task", "list-pending", "swh-test-ping",])
expected = r"""
Found 0 swh-test-ping tasks
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_list_pending_tasks(swh_scheduler):
task1 = create_task_dict("swh-test-ping", "oneshot", key="value1")
task2 = create_task_dict("swh-test-ping", "oneshot", key="value2")
task2["next_run"] += datetime.timedelta(days=1)
swh_scheduler.create_tasks([task1, task2])
result = invoke(swh_scheduler, False, ["task", "list-pending", "swh-test-ping",])
expected = r"""
Found 1 swh-test-ping tasks
Task 1
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Args:
Keyword args:
key: 'value1'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
swh_scheduler.grab_ready_tasks("swh-test-ping")
result = invoke(swh_scheduler, False, ["task", "list-pending", "swh-test-ping",])
expected = r"""
Found 0 swh-test-ping tasks
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_list_pending_tasks_filter(swh_scheduler):
task = create_task_dict("swh-test-multiping", "oneshot", key="value")
swh_scheduler.create_tasks([task])
result = invoke(swh_scheduler, False, ["task", "list-pending", "swh-test-ping",])
expected = r"""
Found 0 swh-test-ping tasks
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_list_pending_tasks_filter_2(swh_scheduler):
swh_scheduler.create_tasks(
[
create_task_dict("swh-test-multiping", "oneshot", key="value"),
create_task_dict("swh-test-ping", "oneshot", key="value2"),
]
)
result = invoke(swh_scheduler, False, ["task", "list-pending", "swh-test-ping",])
expected = r"""
Found 1 swh-test-ping tasks
Task 2
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Args:
Keyword args:
key: 'value2'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
# Fails because "task list-pending --limit 3" only returns 2 tasks, because
# of how compute_nb_tasks_from works.
@pytest.mark.xfail
def test_list_pending_tasks_limit(swh_scheduler):
swh_scheduler.create_tasks(
[
create_task_dict("swh-test-ping", "oneshot", key="value%d" % i)
for i in range(10)
]
)
result = invoke(
swh_scheduler, False, ["task", "list-pending", "swh-test-ping", "--limit", "3",]
)
expected = r"""
Found 2 swh-test-ping tasks
Task 1
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Args:
Keyword args:
key: 'value0'
Task 2
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Args:
Keyword args:
key: 'value1'
Task 3
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Args:
Keyword args:
key: 'value2'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_list_pending_tasks_before(swh_scheduler):
task1 = create_task_dict("swh-test-ping", "oneshot", key="value")
task2 = create_task_dict("swh-test-ping", "oneshot", key="value2")
task1["next_run"] += datetime.timedelta(days=3)
task2["next_run"] += datetime.timedelta(days=1)
swh_scheduler.create_tasks([task1, task2])
result = invoke(
swh_scheduler,
False,
[
"task",
"list-pending",
"swh-test-ping",
"--before",
(datetime.date.today() + datetime.timedelta(days=2)).isoformat(),
],
)
expected = r"""
Found 1 swh-test-ping tasks
Task 2
Next run: in a day \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Args:
Keyword args:
key: 'value2'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_list_tasks(swh_scheduler):
task1 = create_task_dict("swh-test-ping", "oneshot", key="value1")
task2 = create_task_dict("swh-test-ping", "oneshot", key="value2")
task1["next_run"] += datetime.timedelta(days=3, hours=2)
swh_scheduler.create_tasks([task1, task2])
swh_scheduler.grab_ready_tasks("swh-test-ping")
result = invoke(swh_scheduler, False, ["task", "list",])
expected = r"""
Found 2 tasks
Task 1
Next run: in 3 days \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Status: next_run_not_scheduled
Priority:\x20
Args:
Keyword args:
key: 'value1'
Task 2
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Status: next_run_scheduled
Priority:\x20
Args:
Keyword args:
key: 'value2'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_list_tasks_id(swh_scheduler):
task1 = create_task_dict("swh-test-ping", "oneshot", key="value1")
task2 = create_task_dict("swh-test-ping", "oneshot", key="value2")
task3 = create_task_dict("swh-test-ping", "oneshot", key="value3")
swh_scheduler.create_tasks([task1, task2, task3])
result = invoke(swh_scheduler, False, ["task", "list", "--task-id", "2",])
expected = r"""
Found 1 tasks
Task 2
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Status: next_run_not_scheduled
Priority:\x20
Args:
Keyword args:
key: 'value2'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_list_tasks_id_2(swh_scheduler):
task1 = create_task_dict("swh-test-ping", "oneshot", key="value1")
task2 = create_task_dict("swh-test-ping", "oneshot", key="value2")
task3 = create_task_dict("swh-test-ping", "oneshot", key="value3")
swh_scheduler.create_tasks([task1, task2, task3])
result = invoke(
swh_scheduler, False, ["task", "list", "--task-id", "2", "--task-id", "3"]
)
expected = r"""
Found 2 tasks
Task 2
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Status: next_run_not_scheduled
Priority:\x20
Args:
Keyword args:
key: 'value2'
Task 3
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Status: next_run_not_scheduled
Priority:\x20
Args:
Keyword args:
key: 'value3'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_list_tasks_type(swh_scheduler):
task1 = create_task_dict("swh-test-ping", "oneshot", key="value1")
task2 = create_task_dict("swh-test-multiping", "oneshot", key="value2")
task3 = create_task_dict("swh-test-ping", "oneshot", key="value3")
swh_scheduler.create_tasks([task1, task2, task3])
result = invoke(
swh_scheduler, False, ["task", "list", "--task-type", "swh-test-ping"]
)
expected = r"""
Found 2 tasks
Task 1
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Status: next_run_not_scheduled
Priority:\x20
Args:
Keyword args:
key: 'value1'
Task 3
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Status: next_run_not_scheduled
Priority:\x20
Args:
Keyword args:
key: 'value3'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_list_tasks_limit(swh_scheduler):
task1 = create_task_dict("swh-test-ping", "oneshot", key="value1")
task2 = create_task_dict("swh-test-ping", "oneshot", key="value2")
task3 = create_task_dict("swh-test-ping", "oneshot", key="value3")
swh_scheduler.create_tasks([task1, task2, task3])
result = invoke(swh_scheduler, False, ["task", "list", "--limit", "2",])
expected = r"""
Found 2 tasks
Task 1
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Status: next_run_not_scheduled
Priority:\x20
Args:
Keyword args:
key: 'value1'
Task 2
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Status: next_run_not_scheduled
Priority:\x20
Args:
Keyword args:
key: 'value2'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_list_tasks_before(swh_scheduler):
task1 = create_task_dict("swh-test-ping", "oneshot", key="value1")
task2 = create_task_dict("swh-test-ping", "oneshot", key="value2")
task1["next_run"] += datetime.timedelta(days=3, hours=2)
swh_scheduler.create_tasks([task1, task2])
swh_scheduler.grab_ready_tasks("swh-test-ping")
result = invoke(
swh_scheduler,
False,
[
"task",
"list",
"--before",
(datetime.date.today() + datetime.timedelta(days=2)).isoformat(),
],
)
expected = r"""
Found 1 tasks
Task 2
Next run: just now \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Status: next_run_scheduled
Priority:\x20
Args:
Keyword args:
key: 'value2'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def test_list_tasks_after(swh_scheduler):
task1 = create_task_dict("swh-test-ping", "oneshot", key="value1")
task2 = create_task_dict("swh-test-ping", "oneshot", key="value2")
task1["next_run"] += datetime.timedelta(days=3, hours=2)
swh_scheduler.create_tasks([task1, task2])
swh_scheduler.grab_ready_tasks("swh-test-ping")
result = invoke(
swh_scheduler,
False,
[
"task",
"list",
"--after",
(datetime.date.today() + datetime.timedelta(days=2)).isoformat(),
],
)
expected = r"""
Found 1 tasks
Task 1
Next run: in 3 days \(.*\)
Interval: 1 day, 0:00:00
Type: swh-test-ping
Policy: oneshot
Status: next_run_not_scheduled
Priority:\x20
Args:
Keyword args:
key: 'value1'
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), result.output
def _fill_storage_with_origins(storage, nb_origins):
origins = [Origin(url=f"http://example.com/{i}") for i in range(nb_origins)]
storage.origin_add(origins)
return origins
@pytest.fixture
def storage():
"""An instance of in-memory storage that gets injected
into the CLI functions."""
storage = get_storage(cls="memory")
with patch("swh.storage.get_storage") as get_storage_mock:
get_storage_mock.return_value = storage
yield storage
@patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3)
def test_task_schedule_origins_dry_run(swh_scheduler, storage):
"""Tests the scheduling when origin_batch_size*task_batch_size is a
divisor of nb_origins."""
_fill_storage_with_origins(storage, 90)
result = invoke(
swh_scheduler,
False,
["task", "schedule_origins", "--dry-run", "swh-test-ping",],
)
# Check the output
expected = r"""
Scheduled 3 tasks \(30 origins\).
Scheduled 6 tasks \(60 origins\).
Scheduled 9 tasks \(90 origins\).
Done.
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), repr(result.output)
# Check scheduled tasks
tasks = swh_scheduler.search_tasks()
assert len(tasks) == 0
def _assert_origin_tasks_contraints(tasks, max_tasks, max_task_size, expected_origins):
# check there are not too many tasks
assert len(tasks) <= max_tasks
# check tasks are not too large
assert all(len(task["arguments"]["args"][0]) <= max_task_size for task in tasks)
# check the tasks are exhaustive
assert sum([len(task["arguments"]["args"][0]) for task in tasks]) == len(
expected_origins
)
assert set.union(*(set(task["arguments"]["args"][0]) for task in tasks)) == {
origin.url for origin in expected_origins
}
@patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3)
def test_task_schedule_origins(swh_scheduler, storage):
"""Tests the scheduling when neither origin_batch_size or
task_batch_size is a divisor of nb_origins."""
origins = _fill_storage_with_origins(storage, 70)
result = invoke(
swh_scheduler,
False,
["task", "schedule_origins", "swh-test-ping", "--batch-size", "20",],
)
# Check the output
expected = r"""
Scheduled 3 tasks \(60 origins\).
Scheduled 4 tasks \(70 origins\).
Done.
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), repr(result.output)
# Check tasks
tasks = swh_scheduler.search_tasks()
_assert_origin_tasks_contraints(tasks, 4, 20, origins)
assert all(task["arguments"]["kwargs"] == {} for task in tasks)
def test_task_schedule_origins_kwargs(swh_scheduler, storage):
"""Tests support of extra keyword-arguments."""
origins = _fill_storage_with_origins(storage, 30)
result = invoke(
swh_scheduler,
False,
[
"task",
"schedule_origins",
"swh-test-ping",
"--batch-size",
"20",
'key1="value1"',
'key2="value2"',
],
)
# Check the output
expected = r"""
Scheduled 2 tasks \(30 origins\).
Done.
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), repr(result.output)
# Check tasks
tasks = swh_scheduler.search_tasks()
_assert_origin_tasks_contraints(tasks, 2, 20, origins)
assert all(
task["arguments"]["kwargs"] == {"key1": "value1", "key2": "value2"}
for task in tasks
)
def test_task_schedule_origins_with_limit(swh_scheduler, storage):
"""Tests support of extra keyword-arguments."""
_fill_storage_with_origins(storage, 50)
limit = 20
expected_origins = list(islice(stream_results(storage.origin_list), limit))
nb_origins = len(expected_origins)
assert nb_origins == limit
max_task_size = 5
nb_tasks, remainder = divmod(nb_origins, max_task_size)
assert remainder == 0 # made the numbers go round
result = invoke(
swh_scheduler,
False,
[
"task",
"schedule_origins",
"swh-test-ping",
"--batch-size",
max_task_size,
"--limit",
limit,
],
)
# Check the output
expected = rf"""
Scheduled {nb_tasks} tasks \({nb_origins} origins\).
Done.
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), repr(result.output)
tasks = swh_scheduler.search_tasks()
_assert_origin_tasks_contraints(tasks, max_task_size, nb_origins, expected_origins)
def test_task_schedule_origins_with_page_token(swh_scheduler, storage):
"""Tests support of extra keyword-arguments."""
nb_total_origins = 50
origins = _fill_storage_with_origins(storage, nb_total_origins)
# prepare page_token and origins result expectancy
page_result = storage.origin_list(limit=10)
assert len(page_result.results) == 10
page_token = page_result.next_page_token
assert page_token is not None
# remove the first 10 origins listed as we won't see those in tasks
expected_origins = [o for o in origins if o not in page_result.results]
nb_origins = len(expected_origins)
assert nb_origins == nb_total_origins - len(page_result.results)
max_task_size = 10
nb_tasks, remainder = divmod(nb_origins, max_task_size)
assert remainder == 0
result = invoke(
swh_scheduler,
False,
[
"task",
"schedule_origins",
"swh-test-ping",
"--batch-size",
max_task_size,
"--page-token",
page_token,
],
)
# Check the output
expected = rf"""
Scheduled {nb_tasks} tasks \({nb_origins} origins\).
Done.
""".lstrip()
assert result.exit_code == 0, result.output
assert re.fullmatch(expected, result.output, re.MULTILINE), repr(result.output)
# Check tasks
tasks = swh_scheduler.search_tasks()
_assert_origin_tasks_contraints(tasks, max_task_size, nb_origins, expected_origins)
diff --git a/swh/scheduler/tests/test_cli_task_type.py b/swh/scheduler/tests/test_cli_task_type.py
index c158760..adf2ebe 100644
--- a/swh/scheduler/tests/test_cli_task_type.py
+++ b/swh/scheduler/tests/test_cli_task_type.py
@@ -1,128 +1,127 @@
# Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
-import pytest
import traceback
-import yaml
-import pkg_resources
from click.testing import CliRunner
+import pkg_resources
+import pytest
+import yaml
from swh.scheduler import get_scheduler
from swh.scheduler.cli import cli
-
FAKE_MODULE_ENTRY_POINTS = {
"lister.gnu=swh.lister.gnu:register",
"lister.pypi=swh.lister.pypi:register",
}
@pytest.fixture
def mock_pkg_resources(monkeypatch):
"""Monkey patch swh.scheduler's mock_pkg_resources.iter_entry_point call
"""
def fake_iter_entry_points(*args, **kwargs):
"""Substitute fake function to return a fixed set of entrypoints
"""
- from pkg_resources import EntryPoint, Distribution
+ from pkg_resources import Distribution, EntryPoint
d = Distribution()
return [EntryPoint.parse(entry, dist=d) for entry in FAKE_MODULE_ENTRY_POINTS]
original_method = pkg_resources.iter_entry_points
monkeypatch.setattr(pkg_resources, "iter_entry_points", fake_iter_entry_points)
yield
# reset monkeypatch: is that needed?
monkeypatch.setattr(pkg_resources, "iter_entry_points", original_method)
@pytest.fixture
def local_sched_config(swh_scheduler_config):
"""Expose the local scheduler configuration
"""
return {"scheduler": {"cls": "local", "args": swh_scheduler_config}}
@pytest.fixture
def local_sched_configfile(local_sched_config, tmp_path):
"""Write in temporary location the local scheduler configuration
"""
configfile = tmp_path / "config.yml"
configfile.write_text(yaml.dump(local_sched_config))
return configfile.as_posix()
def test_register_ttypes_all(
mock_pkg_resources, local_sched_config, local_sched_configfile
):
"""Registering all task types"""
for command in [
["--config-file", local_sched_configfile, "task-type", "register"],
["--config-file", local_sched_configfile, "task-type", "register", "-p", "all"],
[
"--config-file",
local_sched_configfile,
"task-type",
"register",
"-p",
"lister.gnu",
"-p",
"lister.pypi",
],
]:
result = CliRunner().invoke(cli, command)
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
scheduler = get_scheduler(**local_sched_config["scheduler"])
all_tasks = [
"list-gnu-full",
"list-pypi",
]
for task in all_tasks:
task_type_desc = scheduler.get_task_type(task)
assert task_type_desc
assert task_type_desc["type"] == task
assert task_type_desc["backoff_factor"] == 1
def test_register_ttypes_filter(
mock_pkg_resources, local_sched_config, local_sched_configfile
):
"""Filtering on one worker should only register its associated task type
"""
result = CliRunner().invoke(
cli,
[
"--config-file",
local_sched_configfile,
"task-type",
"register",
"--plugins",
"lister.gnu",
],
)
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
scheduler = get_scheduler(**local_sched_config["scheduler"])
all_tasks = [
"list-gnu-full",
]
for task in all_tasks:
task_type_desc = scheduler.get_task_type(task)
assert task_type_desc
assert task_type_desc["type"] == task
assert task_type_desc["backoff_factor"] == 1
diff --git a/swh/scheduler/tests/test_common.py b/swh/scheduler/tests/test_common.py
index b2bc8d4..7a0de09 100644
--- a/swh/scheduler/tests/test_common.py
+++ b/swh/scheduler/tests/test_common.py
@@ -1,65 +1,65 @@
# Copyright (C) 2017-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
-from .common import tasks_from_template, TEMPLATES
+from .common import TEMPLATES, tasks_from_template
def test_tasks_from_template_no_priority():
nb_tasks = 3
template = TEMPLATES["git"]
next_run = datetime.datetime.utcnow()
tasks = tasks_from_template(template, next_run, nb_tasks)
assert len(tasks) == nb_tasks
for i, t in enumerate(tasks):
assert t["type"] == template["type"]
assert t["arguments"] is not None
assert t.get("policy") is None # not defined in template
assert len(t["arguments"]["args"]) == 1
assert len(t["arguments"]["kwargs"].keys()) == 1
assert t["next_run"] == next_run - datetime.timedelta(microseconds=i)
assert t.get("priority") is None
def test_tasks_from_template_priority():
nb_tasks_no_priority = 3
nb_tasks_priority = 10
template = TEMPLATES["hg"]
priorities = {
"high": 0.5,
"normal": 0.3,
"low": 0.2,
}
next_run = datetime.datetime.utcnow()
tasks = tasks_from_template(
template,
next_run,
nb_tasks_no_priority,
num_priority=nb_tasks_priority,
priorities=priorities,
)
assert len(tasks) == nb_tasks_no_priority + nb_tasks_priority
repartition_priority = {k: 0 for k in priorities.keys()}
for i, t in enumerate(tasks):
assert t["type"] == template["type"]
assert t["arguments"] is not None
assert t["policy"] == template["policy"]
assert len(t["arguments"]["args"]) == 1
assert len(t["arguments"]["kwargs"].keys()) == 1
assert t["next_run"] == next_run - datetime.timedelta(microseconds=i)
priority = t.get("priority")
if priority:
assert priority in priorities
repartition_priority[priority] += 1
assert repartition_priority == {
k: v * nb_tasks_priority for k, v in priorities.items()
}
diff --git a/swh/scheduler/tests/test_scheduler.py b/swh/scheduler/tests/test_scheduler.py
index 8ffa85e..bdc8e57 100644
--- a/swh/scheduler/tests/test_scheduler.py
+++ b/swh/scheduler/tests/test_scheduler.py
@@ -1,730 +1,729 @@
# Copyright (C) 2017-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
+from collections import defaultdict
import copy
import datetime
-import random
-import uuid
-
-from collections import defaultdict
import inspect
+import random
from typing import Any, Dict, List, Optional
+import uuid
from arrow import utcnow
import attr
import pytest
from swh.scheduler.exc import StaleData
from swh.scheduler.interface import SchedulerInterface
from swh.scheduler.model import ListedOrigin, ListedOriginPageToken
-from .common import tasks_from_template, TEMPLATES, TASK_TYPES, LISTERS
+from .common import LISTERS, TASK_TYPES, TEMPLATES, tasks_from_template
def subdict(d, keys=None, excl=()):
if keys is None:
keys = [k for k in d.keys()]
return {k: d[k] for k in keys if k not in excl}
class TestScheduler:
def test_interface(self, swh_scheduler):
"""Checks all methods of SchedulerInterface are implemented by this
backend, and that they have the same signature."""
# Create an instance of the protocol (which cannot be instantiated
# directly, so this creates a subclass, then instantiates it)
interface = type("_", (SchedulerInterface,), {})()
assert "create_task_type" in dir(interface)
missing_methods = []
for meth_name in dir(interface):
if meth_name.startswith("_"):
continue
interface_meth = getattr(interface, meth_name)
try:
concrete_meth = getattr(swh_scheduler, meth_name)
except AttributeError:
if not getattr(interface_meth, "deprecated_endpoint", False):
# The backend is missing a (non-deprecated) endpoint
missing_methods.append(meth_name)
continue
expected_signature = inspect.signature(interface_meth)
actual_signature = inspect.signature(concrete_meth)
assert expected_signature == actual_signature, meth_name
assert missing_methods == []
def test_get_priority_ratios(self, swh_scheduler):
assert swh_scheduler.get_priority_ratios() == {
"high": 0.5,
"normal": 0.3,
"low": 0.2,
}
def test_add_task_type(self, swh_scheduler):
tt = TASK_TYPES["git"]
swh_scheduler.create_task_type(tt)
assert tt == swh_scheduler.get_task_type(tt["type"])
tt2 = TASK_TYPES["hg"]
swh_scheduler.create_task_type(tt2)
assert tt == swh_scheduler.get_task_type(tt["type"])
assert tt2 == swh_scheduler.get_task_type(tt2["type"])
def test_create_task_type_idempotence(self, swh_scheduler):
tt = TASK_TYPES["git"]
swh_scheduler.create_task_type(tt)
swh_scheduler.create_task_type(tt)
assert tt == swh_scheduler.get_task_type(tt["type"])
def test_get_task_types(self, swh_scheduler):
tt, tt2 = TASK_TYPES["git"], TASK_TYPES["hg"]
swh_scheduler.create_task_type(tt)
swh_scheduler.create_task_type(tt2)
actual_task_types = swh_scheduler.get_task_types()
assert tt in actual_task_types
assert tt2 in actual_task_types
def test_create_tasks(self, swh_scheduler):
priority_ratio = self._priority_ratio(swh_scheduler)
self._create_task_types(swh_scheduler)
num_tasks_priority = 100
tasks_1 = tasks_from_template(TEMPLATES["git"], utcnow(), 100)
tasks_2 = tasks_from_template(
TEMPLATES["hg"],
utcnow(),
100,
num_tasks_priority,
priorities=priority_ratio,
)
tasks = tasks_1 + tasks_2
# tasks are returned only once with their ids
ret1 = swh_scheduler.create_tasks(tasks + tasks_1 + tasks_2)
set_ret1 = set([t["id"] for t in ret1])
# creating the same set result in the same ids
ret = swh_scheduler.create_tasks(tasks)
set_ret = set([t["id"] for t in ret])
# Idempotence results
assert set_ret == set_ret1
assert len(ret) == len(ret1)
ids = set()
actual_priorities = defaultdict(int)
for task, orig_task in zip(ret, tasks):
task = copy.deepcopy(task)
task_type = TASK_TYPES[orig_task["type"].split("-")[-1]]
assert task["id"] not in ids
assert task["status"] == "next_run_not_scheduled"
assert task["current_interval"] == task_type["default_interval"]
assert task["policy"] == orig_task.get("policy", "recurring")
priority = task.get("priority")
if priority:
actual_priorities[priority] += 1
assert task["retries_left"] == (task_type["num_retries"] or 0)
ids.add(task["id"])
del task["id"]
del task["status"]
del task["current_interval"]
del task["retries_left"]
if "policy" not in orig_task:
del task["policy"]
if "priority" not in orig_task:
del task["priority"]
assert task == orig_task
assert dict(actual_priorities) == {
priority: int(ratio * num_tasks_priority)
for priority, ratio in priority_ratio.items()
}
def test_peek_ready_tasks_no_priority(self, swh_scheduler):
self._create_task_types(swh_scheduler)
t = utcnow()
task_type = TEMPLATES["git"]["type"]
tasks = tasks_from_template(TEMPLATES["git"], t, 100)
random.shuffle(tasks)
swh_scheduler.create_tasks(tasks)
ready_tasks = swh_scheduler.peek_ready_tasks(task_type)
assert len(ready_tasks) == len(tasks)
for i in range(len(ready_tasks) - 1):
assert ready_tasks[i]["next_run"] <= ready_tasks[i + 1]["next_run"]
# Only get the first few ready tasks
limit = random.randrange(5, 5 + len(tasks) // 2)
ready_tasks_limited = swh_scheduler.peek_ready_tasks(task_type, num_tasks=limit)
assert len(ready_tasks_limited) == limit
assert ready_tasks_limited == ready_tasks[:limit]
# Limit by timestamp
max_ts = tasks[limit - 1]["next_run"]
ready_tasks_timestamped = swh_scheduler.peek_ready_tasks(
task_type, timestamp=max_ts
)
for ready_task in ready_tasks_timestamped:
assert ready_task["next_run"] <= max_ts
# Make sure we get proper behavior for the first ready tasks
assert ready_tasks[: len(ready_tasks_timestamped)] == ready_tasks_timestamped
# Limit by both
ready_tasks_both = swh_scheduler.peek_ready_tasks(
task_type, timestamp=max_ts, num_tasks=limit // 3
)
assert len(ready_tasks_both) <= limit // 3
for ready_task in ready_tasks_both:
assert ready_task["next_run"] <= max_ts
assert ready_task in ready_tasks[: limit // 3]
def _priority_ratio(self, swh_scheduler):
return swh_scheduler.get_priority_ratios()
def test_peek_ready_tasks_mixed_priorities(self, swh_scheduler):
priority_ratio = self._priority_ratio(swh_scheduler)
self._create_task_types(swh_scheduler)
t = utcnow()
task_type = TEMPLATES["git"]["type"]
num_tasks_priority = 100
num_tasks_no_priority = 100
# Create tasks with and without priorities
tasks = tasks_from_template(
TEMPLATES["git"],
t,
num=num_tasks_no_priority,
num_priority=num_tasks_priority,
priorities=priority_ratio,
)
random.shuffle(tasks)
swh_scheduler.create_tasks(tasks)
# take all available tasks
ready_tasks = swh_scheduler.peek_ready_tasks(task_type)
assert len(ready_tasks) == len(tasks)
assert num_tasks_priority + num_tasks_no_priority == len(ready_tasks)
count_tasks_per_priority = defaultdict(int)
for task in ready_tasks:
priority = task.get("priority")
if priority:
count_tasks_per_priority[priority] += 1
assert dict(count_tasks_per_priority) == {
priority: int(ratio * num_tasks_priority)
for priority, ratio in priority_ratio.items()
}
# Only get some ready tasks
num_tasks = random.randrange(5, 5 + num_tasks_no_priority // 2)
num_tasks_priority = random.randrange(5, num_tasks_priority // 2)
ready_tasks_limited = swh_scheduler.peek_ready_tasks(
task_type, num_tasks=num_tasks, num_tasks_priority=num_tasks_priority
)
count_tasks_per_priority = defaultdict(int)
for task in ready_tasks_limited:
priority = task.get("priority")
count_tasks_per_priority[priority] += 1
import math
for priority, ratio in priority_ratio.items():
expected_count = math.ceil(ratio * num_tasks_priority)
actual_prio = count_tasks_per_priority[priority]
assert actual_prio == expected_count or actual_prio == expected_count + 1
assert count_tasks_per_priority[None] == num_tasks
def test_grab_ready_tasks(self, swh_scheduler):
priority_ratio = self._priority_ratio(swh_scheduler)
self._create_task_types(swh_scheduler)
t = utcnow()
task_type = TEMPLATES["git"]["type"]
num_tasks_priority = 100
num_tasks_no_priority = 100
# Create tasks with and without priorities
tasks = tasks_from_template(
TEMPLATES["git"],
t,
num=num_tasks_no_priority,
num_priority=num_tasks_priority,
priorities=priority_ratio,
)
random.shuffle(tasks)
swh_scheduler.create_tasks(tasks)
first_ready_tasks = swh_scheduler.peek_ready_tasks(
task_type, num_tasks=10, num_tasks_priority=10
)
grabbed_tasks = swh_scheduler.grab_ready_tasks(
task_type, num_tasks=10, num_tasks_priority=10
)
for peeked, grabbed in zip(first_ready_tasks, grabbed_tasks):
assert peeked["status"] == "next_run_not_scheduled"
del peeked["status"]
assert grabbed["status"] == "next_run_scheduled"
del grabbed["status"]
assert peeked == grabbed
assert peeked["priority"] == grabbed["priority"]
def test_get_tasks(self, swh_scheduler):
self._create_task_types(swh_scheduler)
t = utcnow()
tasks = tasks_from_template(TEMPLATES["git"], t, 100)
tasks = swh_scheduler.create_tasks(tasks)
random.shuffle(tasks)
while len(tasks) > 1:
length = random.randrange(1, len(tasks))
cur_tasks = sorted(tasks[:length], key=lambda x: x["id"])
tasks[:length] = []
ret = swh_scheduler.get_tasks(task["id"] for task in cur_tasks)
# result is not guaranteed to be sorted
ret.sort(key=lambda x: x["id"])
assert ret == cur_tasks
def test_search_tasks(self, swh_scheduler):
def make_real_dicts(lst):
"""RealDictRow is not a real dict."""
return [dict(d.items()) for d in lst]
self._create_task_types(swh_scheduler)
t = utcnow()
tasks = tasks_from_template(TEMPLATES["git"], t, 100)
tasks = swh_scheduler.create_tasks(tasks)
assert make_real_dicts(swh_scheduler.search_tasks()) == make_real_dicts(tasks)
def assert_filtered_task_ok(
self, task: Dict[str, Any], after: datetime.datetime, before: datetime.datetime
) -> None:
"""Ensure filtered tasks have the right expected properties
(within the range, recurring disabled, etc..)
"""
started = task["started"]
date = started if started is not None else task["scheduled"]
assert after <= date and date <= before
if task["task_policy"] == "oneshot":
assert task["task_status"] in ["completed", "disabled"]
if task["task_policy"] == "recurring":
assert task["task_status"] in ["disabled"]
def test_filter_task_to_archive(self, swh_scheduler):
"""Filtering only list disabled recurring or completed oneshot tasks
"""
self._create_task_types(swh_scheduler)
_time = utcnow()
recurring = tasks_from_template(TEMPLATES["git"], _time, 12)
oneshots = tasks_from_template(TEMPLATES["hg"], _time, 12)
total_tasks = len(recurring) + len(oneshots)
# simulate scheduling tasks
pending_tasks = swh_scheduler.create_tasks(recurring + oneshots)
backend_tasks = [
{
"task": task["id"],
"backend_id": str(uuid.uuid4()),
"scheduled": utcnow(),
}
for task in pending_tasks
]
swh_scheduler.mass_schedule_task_runs(backend_tasks)
# we simulate the task are being done
_tasks = []
for task in backend_tasks:
t = swh_scheduler.end_task_run(task["backend_id"], status="eventful")
_tasks.append(t)
# Randomly update task's status per policy
status_per_policy = {"recurring": 0, "oneshot": 0}
status_choice = {
# policy: [tuple (1-for-filtering, 'associated-status')]
"recurring": [
(1, "disabled"),
(0, "completed"),
(0, "next_run_not_scheduled"),
],
"oneshot": [
(0, "next_run_not_scheduled"),
(1, "disabled"),
(1, "completed"),
],
}
tasks_to_update = defaultdict(list)
_task_ids = defaultdict(list)
# randomize 'disabling' recurring task or 'complete' oneshot task
for task in pending_tasks:
policy = task["policy"]
_task_ids[policy].append(task["id"])
status = random.choice(status_choice[policy])
if status[0] != 1:
continue
# elected for filtering
status_per_policy[policy] += status[0]
tasks_to_update[policy].append(task["id"])
swh_scheduler.disable_tasks(tasks_to_update["recurring"])
# hack: change the status to something else than completed/disabled
swh_scheduler.set_status_tasks(
_task_ids["oneshot"], status="next_run_not_scheduled"
)
# complete the tasks to update
swh_scheduler.set_status_tasks(tasks_to_update["oneshot"], status="completed")
total_tasks_filtered = (
status_per_policy["recurring"] + status_per_policy["oneshot"]
)
# no pagination scenario
# retrieve tasks to archive
after = _time.shift(days=-1)
after_ts = after.format("YYYY-MM-DD")
before = utcnow().shift(days=1)
before_ts = before.format("YYYY-MM-DD")
tasks_result = swh_scheduler.filter_task_to_archive(
after_ts=after_ts, before_ts=before_ts, limit=total_tasks
)
tasks_to_archive = tasks_result["tasks"]
assert len(tasks_to_archive) == total_tasks_filtered
assert tasks_result.get("next_page_token") is None
actual_filtered_per_status = {"recurring": 0, "oneshot": 0}
for task in tasks_to_archive:
self.assert_filtered_task_ok(task, after, before)
actual_filtered_per_status[task["task_policy"]] += 1
assert actual_filtered_per_status == status_per_policy
# pagination scenario
nb_tasks = 3
tasks_result = swh_scheduler.filter_task_to_archive(
after_ts=after_ts, before_ts=before_ts, limit=nb_tasks
)
tasks_to_archive2 = tasks_result["tasks"]
assert len(tasks_to_archive2) == nb_tasks
next_page_token = tasks_result["next_page_token"]
assert next_page_token is not None
all_tasks = tasks_to_archive2
while next_page_token is not None: # Retrieve paginated results
tasks_result = swh_scheduler.filter_task_to_archive(
after_ts=after_ts,
before_ts=before_ts,
limit=nb_tasks,
page_token=next_page_token,
)
tasks_to_archive2 = tasks_result["tasks"]
assert len(tasks_to_archive2) <= nb_tasks
all_tasks.extend(tasks_to_archive2)
next_page_token = tasks_result.get("next_page_token")
actual_filtered_per_status = {"recurring": 0, "oneshot": 0}
for task in all_tasks:
self.assert_filtered_task_ok(task, after, before)
actual_filtered_per_status[task["task_policy"]] += 1
assert actual_filtered_per_status == status_per_policy
def test_delete_archived_tasks(self, swh_scheduler):
self._create_task_types(swh_scheduler)
_time = utcnow()
recurring = tasks_from_template(TEMPLATES["git"], _time, 12)
oneshots = tasks_from_template(TEMPLATES["hg"], _time, 12)
total_tasks = len(recurring) + len(oneshots)
pending_tasks = swh_scheduler.create_tasks(recurring + oneshots)
backend_tasks = [
{
"task": task["id"],
"backend_id": str(uuid.uuid4()),
"scheduled": utcnow(),
}
for task in pending_tasks
]
swh_scheduler.mass_schedule_task_runs(backend_tasks)
_tasks = []
percent = random.randint(0, 100) # random election removal boundary
for task in backend_tasks:
t = swh_scheduler.end_task_run(task["backend_id"], status="eventful")
c = random.randint(0, 100)
if c <= percent:
_tasks.append({"task_id": t["task"], "task_run_id": t["id"]})
swh_scheduler.delete_archived_tasks(_tasks)
all_tasks = [task["id"] for task in swh_scheduler.search_tasks()]
tasks_count = len(all_tasks)
tasks_run_count = len(swh_scheduler.get_task_runs(all_tasks))
assert tasks_count == total_tasks - len(_tasks)
assert tasks_run_count == total_tasks - len(_tasks)
def test_get_task_runs_no_task(self, swh_scheduler):
"""No task exist in the scheduler's db, get_task_runs() should always return an
empty list.
"""
assert not swh_scheduler.get_task_runs(task_ids=())
assert not swh_scheduler.get_task_runs(task_ids=(1, 2, 3))
assert not swh_scheduler.get_task_runs(task_ids=(1, 2, 3), limit=10)
def test_get_task_runs_no_task_executed(self, swh_scheduler):
"""No task has been executed yet, get_task_runs() should always return an empty
list.
"""
self._create_task_types(swh_scheduler)
_time = utcnow()
recurring = tasks_from_template(TEMPLATES["git"], _time, 12)
oneshots = tasks_from_template(TEMPLATES["hg"], _time, 12)
swh_scheduler.create_tasks(recurring + oneshots)
assert not swh_scheduler.get_task_runs(task_ids=())
assert not swh_scheduler.get_task_runs(task_ids=(1, 2, 3))
assert not swh_scheduler.get_task_runs(task_ids=(1, 2, 3), limit=10)
def test_get_task_runs_with_scheduled(self, swh_scheduler):
"""Some tasks have been scheduled but not executed yet, get_task_runs() should
not return an empty list. limit should behave as expected.
"""
self._create_task_types(swh_scheduler)
_time = utcnow()
recurring = tasks_from_template(TEMPLATES["git"], _time, 12)
oneshots = tasks_from_template(TEMPLATES["hg"], _time, 12)
total_tasks = len(recurring) + len(oneshots)
pending_tasks = swh_scheduler.create_tasks(recurring + oneshots)
backend_tasks = [
{
"task": task["id"],
"backend_id": str(uuid.uuid4()),
"scheduled": utcnow(),
}
for task in pending_tasks
]
swh_scheduler.mass_schedule_task_runs(backend_tasks)
assert not swh_scheduler.get_task_runs(task_ids=[total_tasks + 1])
btask = backend_tasks[0]
runs = swh_scheduler.get_task_runs(task_ids=[btask["task"]])
assert len(runs) == 1
run = runs[0]
assert subdict(run, excl=("id",)) == {
"task": btask["task"],
"backend_id": btask["backend_id"],
"scheduled": btask["scheduled"],
"started": None,
"ended": None,
"metadata": None,
"status": "scheduled",
}
runs = swh_scheduler.get_task_runs(
task_ids=[bt["task"] for bt in backend_tasks], limit=2
)
assert len(runs) == 2
runs = swh_scheduler.get_task_runs(
task_ids=[bt["task"] for bt in backend_tasks]
)
assert len(runs) == total_tasks
keys = ("task", "backend_id", "scheduled")
assert (
sorted([subdict(x, keys) for x in runs], key=lambda x: x["task"])
== backend_tasks
)
def test_get_task_runs_with_executed(self, swh_scheduler):
"""Some tasks have been executed, get_task_runs() should
not return an empty list. limit should behave as expected.
"""
self._create_task_types(swh_scheduler)
_time = utcnow()
recurring = tasks_from_template(TEMPLATES["git"], _time, 12)
oneshots = tasks_from_template(TEMPLATES["hg"], _time, 12)
pending_tasks = swh_scheduler.create_tasks(recurring + oneshots)
backend_tasks = [
{
"task": task["id"],
"backend_id": str(uuid.uuid4()),
"scheduled": utcnow(),
}
for task in pending_tasks
]
swh_scheduler.mass_schedule_task_runs(backend_tasks)
btask = backend_tasks[0]
ts = utcnow()
swh_scheduler.start_task_run(
btask["backend_id"], metadata={"something": "stupid"}, timestamp=ts
)
runs = swh_scheduler.get_task_runs(task_ids=[btask["task"]])
assert len(runs) == 1
assert subdict(runs[0], excl=("id")) == {
"task": btask["task"],
"backend_id": btask["backend_id"],
"scheduled": btask["scheduled"],
"started": ts,
"ended": None,
"metadata": {"something": "stupid"},
"status": "started",
}
ts2 = utcnow()
swh_scheduler.end_task_run(
btask["backend_id"],
metadata={"other": "stuff"},
timestamp=ts2,
status="eventful",
)
runs = swh_scheduler.get_task_runs(task_ids=[btask["task"]])
assert len(runs) == 1
assert subdict(runs[0], excl=("id")) == {
"task": btask["task"],
"backend_id": btask["backend_id"],
"scheduled": btask["scheduled"],
"started": ts,
"ended": ts2,
"metadata": {"something": "stupid", "other": "stuff"},
"status": "eventful",
}
def test_get_or_create_lister(self, swh_scheduler):
db_listers = []
for lister_args in LISTERS:
db_listers.append(swh_scheduler.get_or_create_lister(**lister_args))
for lister, lister_args in zip(db_listers, LISTERS):
assert lister.name == lister_args["name"]
assert lister.instance_name == lister_args.get("instance_name", "")
lister_get_again = swh_scheduler.get_or_create_lister(
lister.name, lister.instance_name
)
assert lister == lister_get_again
def test_update_lister(self, swh_scheduler, stored_lister):
lister = attr.evolve(stored_lister, current_state={"updated": "now"})
updated_lister = swh_scheduler.update_lister(lister)
assert updated_lister.updated > lister.updated
assert updated_lister == attr.evolve(lister, updated=updated_lister.updated)
def test_update_lister_stale(self, swh_scheduler, stored_lister):
swh_scheduler.update_lister(stored_lister)
with pytest.raises(StaleData) as exc:
swh_scheduler.update_lister(stored_lister)
assert "state not updated" in exc.value.args[0]
def test_record_listed_origins(self, swh_scheduler, listed_origins):
ret = swh_scheduler.record_listed_origins(listed_origins)
assert set(returned.url for returned in ret) == set(
origin.url for origin in listed_origins
)
assert all(origin.first_seen == origin.last_seen for origin in ret)
def test_record_listed_origins_upsert(self, swh_scheduler, listed_origins):
# First, insert `cutoff` origins
cutoff = 100
assert cutoff < len(listed_origins)
ret = swh_scheduler.record_listed_origins(listed_origins[:cutoff])
assert len(ret) == cutoff
# Then, insert all origins, including the `cutoff` first.
ret = swh_scheduler.record_listed_origins(listed_origins)
assert len(ret) == len(listed_origins)
# Two different "first seen" values
assert len(set(origin.first_seen for origin in ret)) == 2
# But a single "last seen" value
assert len(set(origin.last_seen for origin in ret)) == 1
def test_get_listed_origins_exact(self, swh_scheduler, listed_origins):
swh_scheduler.record_listed_origins(listed_origins)
for i, origin in enumerate(listed_origins):
ret = swh_scheduler.get_listed_origins(
lister_id=origin.lister_id, url=origin.url
)
assert ret.next_page_token is None
assert len(ret.origins) == 1
assert ret.origins[0].lister_id == origin.lister_id
assert ret.origins[0].url == origin.url
@pytest.mark.parametrize("num_origins,limit", [(20, 6), (5, 42), (20, 20)])
def test_get_listed_origins_limit(
self, swh_scheduler, listed_origins, num_origins, limit
) -> None:
added_origins = sorted(
listed_origins[:num_origins], key=lambda o: (o.lister_id, o.url)
)
swh_scheduler.record_listed_origins(added_origins)
returned_origins: List[ListedOrigin] = []
call_count = 0
next_page_token: Optional[ListedOriginPageToken] = None
while True:
call_count += 1
ret = swh_scheduler.get_listed_origins(
lister_id=listed_origins[0].lister_id,
limit=limit,
page_token=next_page_token,
)
returned_origins.extend(ret.origins)
next_page_token = ret.next_page_token
if next_page_token is None:
break
assert call_count == (num_origins // limit) + 1
assert len(returned_origins) == num_origins
assert [(origin.lister_id, origin.url) for origin in returned_origins] == [
(origin.lister_id, origin.url) for origin in added_origins
]
def test_get_listed_origins_all(self, swh_scheduler, listed_origins) -> None:
swh_scheduler.record_listed_origins(listed_origins)
ret = swh_scheduler.get_listed_origins(limit=len(listed_origins) + 1)
assert ret.next_page_token is None
assert len(ret.origins) == len(listed_origins)
def _create_task_types(self, scheduler):
for tt in TASK_TYPES.values():
scheduler.create_task_type(tt)
diff --git a/swh/scheduler/tests/test_server.py b/swh/scheduler/tests/test_server.py
index e41c80c..c291d81 100644
--- a/swh/scheduler/tests/test_server.py
+++ b/swh/scheduler/tests/test_server.py
@@ -1,110 +1,111 @@
# Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import copy
+
import pytest
import yaml
from swh.scheduler.api.server import load_and_check_config
def prepare_config_file(tmpdir, content, name="config.yml"):
"""Prepare configuration file in `$tmpdir/name` with content `content`.
Args:
tmpdir (LocalPath): root directory
content (str/dict): Content of the file either as string or as a dict.
If a dict, converts the dict into a yaml string.
name (str): configuration filename
Returns
path (str) of the configuration file prepared.
"""
config_path = tmpdir / name
if isinstance(content, dict): # convert if needed
content = yaml.dump(content)
config_path.write_text(content, encoding="utf-8")
# pytest on python3.5 does not support LocalPath manipulation, so
# convert path to string
return str(config_path)
def test_load_and_check_config_no_configuration():
"""Inexistent configuration files raises"""
with pytest.raises(EnvironmentError) as e:
load_and_check_config(None)
assert e.value.args[0] == "Configuration file must be defined"
config_path = "/some/inexistent/config.yml"
with pytest.raises(FileNotFoundError) as e:
load_and_check_config(config_path)
assert e.value.args[0] == "Configuration file %s does not exist" % (config_path,)
def test_load_and_check_config_wrong_configuration(tmpdir):
"""Wrong configuration raises"""
config_path = prepare_config_file(tmpdir, "something: useless")
with pytest.raises(KeyError) as e:
load_and_check_config(config_path)
assert e.value.args[0] == "Missing '%scheduler' configuration"
def test_load_and_check_config_remote_config_local_type_raise(tmpdir):
"""'local' configuration without 'local' storage raises"""
config = {"scheduler": {"cls": "remote", "args": {}}}
config_path = prepare_config_file(tmpdir, config)
with pytest.raises(ValueError) as e:
load_and_check_config(config_path, type="local")
assert (
e.value.args[0] == "The scheduler backend can only be started with a 'local'"
" configuration"
)
def test_load_and_check_config_local_incomplete_configuration(tmpdir):
"""Incomplete 'local' configuration should raise"""
config = {
"scheduler": {
"cls": "local",
"args": {"db": "database", "something": "needed-for-test",},
}
}
for key in ["db", "args"]:
c = copy.deepcopy(config)
if key == "db":
source = c["scheduler"]["args"]
else:
source = c["scheduler"]
source.pop(key)
config_path = prepare_config_file(tmpdir, c)
with pytest.raises(KeyError) as e:
load_and_check_config(config_path)
assert (
e.value.args[0] == "Invalid configuration; missing '%s' config entry" % key
)
def test_load_and_check_config_local_config_fine(tmpdir):
"""Local configuration is fine"""
config = {"scheduler": {"cls": "local", "args": {"db": "db",}}}
config_path = prepare_config_file(tmpdir, config)
cfg = load_and_check_config(config_path, type="local")
assert cfg == config
def test_load_and_check_config_remote_config_fine(tmpdir):
"""'Remote configuration is fine"""
config = {"scheduler": {"cls": "remote", "args": {}}}
config_path = prepare_config_file(tmpdir, config)
cfg = load_and_check_config(config_path, type="any")
assert cfg == config
diff --git a/swh/scheduler/tests/test_utils.py b/swh/scheduler/tests/test_utils.py
index 655f644..85aecc6 100644
--- a/swh/scheduler/tests/test_utils.py
+++ b/swh/scheduler/tests/test_utils.py
@@ -1,79 +1,79 @@
# Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
-import unittest
from datetime import timezone
+import unittest
from unittest.mock import patch
from swh.scheduler import utils
class UtilsTest(unittest.TestCase):
@patch("swh.scheduler.utils.datetime")
def test_create_oneshot_task_dict_simple(self, mock_datetime):
mock_datetime.now.return_value = "some-date"
actual_task = utils.create_oneshot_task_dict("some-task-type")
expected_task = {
"policy": "oneshot",
"type": "some-task-type",
"next_run": "some-date",
"arguments": {"args": [], "kwargs": {},},
}
self.assertEqual(actual_task, expected_task)
mock_datetime.now.assert_called_once_with(tz=timezone.utc)
@patch("swh.scheduler.utils.datetime")
def test_create_oneshot_task_dict_other_call(self, mock_datetime):
mock_datetime.now.return_value = "some-other-date"
actual_task = utils.create_oneshot_task_dict(
"some-task-type", "arg0", "arg1", priority="high", other_stuff="normal"
)
expected_task = {
"policy": "oneshot",
"type": "some-task-type",
"next_run": "some-other-date",
"arguments": {
"args": ("arg0", "arg1"),
"kwargs": {"other_stuff": "normal"},
},
"priority": "high",
}
self.assertEqual(actual_task, expected_task)
mock_datetime.now.assert_called_once_with(tz=timezone.utc)
@patch("swh.scheduler.utils.datetime")
def test_create_task_dict(self, mock_datetime):
mock_datetime.now.return_value = "date"
actual_task = utils.create_task_dict(
"task-type",
"recurring",
"arg0",
"arg1",
priority="low",
other_stuff="normal",
retries_left=3,
)
expected_task = {
"policy": "recurring",
"type": "task-type",
"next_run": "date",
"arguments": {
"args": ("arg0", "arg1"),
"kwargs": {"other_stuff": "normal"},
},
"priority": "low",
"retries_left": 3,
}
self.assertEqual(actual_task, expected_task)
mock_datetime.now.assert_called_once_with(tz=timezone.utc)