Page Menu
Home
Software Heritage
Search
Configure Global Search
Log In
Files
F9341800
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
30 KB
Subscribers
None
View Options
diff --git a/sql/updater/sql/Makefile b/sql/updater/sql/Makefile
new file mode 100644
index 0000000..cd976e0
--- /dev/null
+++ b/sql/updater/sql/Makefile
@@ -0,0 +1,50 @@
+# Depends: postgresql-client, postgresql-autodoc
+
+DBNAME = softwareheritage-scheduler-updater-dev
+DOCDIR = autodoc
+
+SQL_INIT = swh-init.sql
+SQL_SCHEMA = swh-schema.sql
+SQL_FUNC = swh-func.sql
+SQLS = $(SQL_INIT) $(SQL_SCHEMA) $(SQL_FUNC)
+
+PSQL_BIN = psql
+PSQL_FLAGS = --echo-all -X -v ON_ERROR_STOP=1
+PSQL = $(PSQL_BIN) $(PSQL_FLAGS)
+
+all:
+
+createdb: createdb-stamp
+createdb-stamp: $(SQL_INIT)
+ createdb $(DBNAME)
+ touch $@
+
+filldb: filldb-stamp
+filldb-stamp: createdb-stamp
+ cat $(SQLS) | $(PSQL) $(DBNAME)
+ touch $@
+
+dropdb:
+ -dropdb $(DBNAME)
+
+dumpdb: swh-scheduler-updater.dump
+swh-scheduler.dump: filldb-stamp
+ pg_dump -Fc $(DBNAME) > $@
+
+doc: autodoc-stamp $(DOCDIR)/swh-scheduler-updater.pdf
+autodoc-stamp: filldb-stamp
+ test -d $(DOCDIR)/ || mkdir $(DOCDIR)
+ postgresql_autodoc -d $(DBNAME) -f $(DOCDIR)/swh
+ cp -a $(DOCDIR)/swh-scheduler.dot $(DOCDIR)/swh-scheduler-updater.dot.orig
+ touch $@
+
+$(DOCDIR)/swh-scheduler.pdf: autodoc-stamp
+ dot -T pdf $(DOCDIR)/swh-scheduler-updater.dot > $(DOCDIR)/swh-scheduler-updater.pdf
+
+clean:
+ rm -rf *-stamp $(DOCDIR)/
+
+distclean: clean dropdb
+ rm -f swh-scheduler-updater.dump
+
+.PHONY: all initdb createdb dropdb doc clean
diff --git a/sql/updater/sql/swh-func.sql b/sql/updater/sql/swh-func.sql
new file mode 100644
index 0000000..81d38ce
--- /dev/null
+++ b/sql/updater/sql/swh-func.sql
@@ -0,0 +1,34 @@
+-- Postgresql index helper function
+create or replace function hash_sha1(text)
+ returns sha1
+as $$
+ select public.digest($1, 'sha1') :: sha1
+$$ language sql strict immutable;
+
+comment on function hash_sha1(text) is 'Compute sha1 hash as text';
+
+-- create a temporary table for cache tmp_cache,
+create or replace function swh_mktemp_cache()
+ returns void
+ language sql
+as $$
+ create temporary table tmp_cache (
+ like cache including defaults
+ ) on commit drop;
+ alter table tmp_cache drop column id;
+$$;
+
+create or replace function swh_cache_put()
+ returns void
+ language plpgsql
+as $$
+begin
+ insert into cache (id, url, last_seen)
+ select hash_sha1(url), url, last_seen
+ from tmp_cache t
+ on conflict(id)
+ do update set rate = (select rate from cache where id=excluded.id) + 1,
+ last_seen = excluded.last_seen;
+ return;
+end
+$$;
diff --git a/sql/updater/sql/swh-init.sql b/sql/updater/sql/swh-init.sql
new file mode 100644
index 0000000..43774e3
--- /dev/null
+++ b/sql/updater/sql/swh-init.sql
@@ -0,0 +1,4 @@
+create extension if not exists btree_gist;
+create extension if not exists pgcrypto;
+
+create or replace language plpgsql;
diff --git a/sql/updater/sql/swh-schema.sql b/sql/updater/sql/swh-schema.sql
new file mode 100644
index 0000000..56a7391
--- /dev/null
+++ b/sql/updater/sql/swh-schema.sql
@@ -0,0 +1,24 @@
+create table dbversion
+(
+ version int primary key,
+ release timestamptz not null,
+ description text not null
+);
+
+comment on table dbversion is 'Schema update tracking';
+
+-- a SHA1 checksum (not necessarily originating from Git)
+create domain sha1 as bytea check (length(value) = 20);
+
+insert into dbversion (version, release, description)
+ values (1, now(), 'Work In Progress');
+
+create table cache (
+ id sha1 primary key,
+ url text not null,
+ rate int default 1,
+ last_seen timestamptz not null
+);
+
+create index on cache(url);
+create index on cache(last_seen);
diff --git a/swh/scheduler/backend.py b/swh/scheduler/backend.py
index 517310c..4395382 100644
--- a/swh/scheduler/backend.py
+++ b/swh/scheduler/backend.py
@@ -1,498 +1,506 @@
# Copyright (C) 2015-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import binascii
import datetime
from functools import wraps
import json
import tempfile
from arrow import Arrow, utcnow
import psycopg2
import psycopg2.extras
from psycopg2.extensions import AsIs
from swh.core.config import SWHConfig
def adapt_arrow(arrow):
return AsIs("'%s'::timestamptz" % arrow.isoformat())
psycopg2.extensions.register_adapter(dict, psycopg2.extras.Json)
psycopg2.extensions.register_adapter(Arrow, adapt_arrow)
def autocommit(fn):
@wraps(fn)
def wrapped(self, *args, **kwargs):
autocommit = False
if 'cursor' not in kwargs or not kwargs['cursor']:
autocommit = True
kwargs['cursor'] = self.cursor()
try:
ret = fn(self, *args, **kwargs)
except Exception:
if autocommit:
self.rollback()
raise
if autocommit:
self.commit()
return ret
return wrapped
-class SchedulerBackend(SWHConfig):
- """Backend for the Software Heritage scheduling database.
-
- """
- CONFIG_BASE_FILENAME = 'scheduler'
- DEFAULT_CONFIG = {
- 'scheduling_db': ('str', 'dbname=softwareheritage-scheduler-dev'),
- }
+class DbBackend:
+ """Mixin intended to be used within scheduling db backend classes
- def __init__(self, **override_config):
- self.config = self.parse_config_file(global_config=False)
- self.config.update(override_config)
-
- self.db = None
-
- self.reconnect()
+ cf. swh.scheduler.backend.SchedulerBackend, and
+ swh.scheduler.updater.backend.SchedulerUpdaterBackend
+ """
def reconnect(self):
if not self.db or self.db.closed:
self.db = psycopg2.connect(
- dsn=self.config['scheduling_db'],
+ dsn=self.db_conn_dsn,
cursor_factory=psycopg2.extras.RealDictCursor,
)
def cursor(self):
"""Return a fresh cursor on the database, with auto-reconnection in
case of failure
"""
cur = None
# Get a fresh cursor and reconnect at most three times
tries = 0
while True:
tries += 1
try:
cur = self.db.cursor()
cur.execute('select 1')
break
except psycopg2.OperationalError:
if tries < 3:
self.reconnect()
else:
raise
return cur
def commit(self):
"""Commit a transaction"""
self.db.commit()
def rollback(self):
"""Rollback a transaction"""
self.db.rollback()
def close_connection(self):
"""Close db connection"""
if self.db and not self.db.closed:
self.db.close()
+ def _format_query(self, query, keys):
+ """Format a query with the given keys"""
+
+ query_keys = ', '.join(keys)
+ placeholders = ', '.join(['%s'] * len(keys))
+
+ return query.format(keys=query_keys, placeholders=placeholders)
+
+ def _format_multiquery(self, query, keys, values):
+ """Format a query with placeholders generated for multiple values"""
+ query_keys = ', '.join(keys)
+ placeholders = '), ('.join(
+ [', '.join(['%s'] * len(keys))] * len(values)
+ )
+ ret_values = sum([[value[key] for key in keys]
+ for value in values], [])
+
+ return (
+ query.format(keys=query_keys, placeholders=placeholders),
+ ret_values,
+ )
+
def copy_to(self, items, tblname, columns, cursor=None, item_cb=None):
def escape(data):
if data is None:
return ''
if isinstance(data, bytes):
return '\\x%s' % binascii.hexlify(data).decode('ascii')
elif isinstance(data, str):
return '"%s"' % data.replace('"', '""')
elif isinstance(data, (datetime.datetime, Arrow)):
# We escape twice to make sure the string generated by
# isoformat gets escaped
return escape(data.isoformat())
elif isinstance(data, dict):
return escape(json.dumps(data))
elif isinstance(data, list):
return escape("{%s}" % ','.join(escape(d) for d in data))
elif isinstance(data, psycopg2.extras.Range):
# We escape twice here too, so that we make sure
# everything gets passed to copy properly
return escape(
'%s%s,%s%s' % (
'[' if data.lower_inc else '(',
'-infinity' if data.lower_inf else escape(data.lower),
'infinity' if data.upper_inf else escape(data.upper),
']' if data.upper_inc else ')',
)
)
else:
# We don't escape here to make sure we pass literals properly
return str(data)
with tempfile.TemporaryFile('w+') as f:
for d in items:
if item_cb is not None:
item_cb(d)
line = [escape(d.get(k)) for k in columns]
f.write(','.join(line))
f.write('\n')
f.seek(0)
cursor.copy_expert('COPY %s (%s) FROM STDIN CSV' % (
tblname, ', '.join(columns)), f)
+
+class SchedulerBackend(SWHConfig, DbBackend):
+ """Backend for the Software Heritage scheduling database.
+
+ """
+ CONFIG_BASE_FILENAME = 'scheduler'
+ DEFAULT_CONFIG = {
+ 'scheduling_db': ('str', 'dbname=softwareheritage-scheduler-dev'),
+ }
+
+ def __init__(self, **override_config):
+ super().__init__()
+ self.config = self.parse_config_file(global_config=False)
+ self.config.update(override_config)
+ self.db = None
+ self.db_conn_dsn = self.config['scheduling_db']
+ self.reconnect()
+
task_type_keys = [
'type', 'description', 'backend_name', 'default_interval',
'min_interval', 'max_interval', 'backoff_factor', 'max_queue_length',
'num_retries', 'retry_delay',
]
- def _format_query(self, query, keys):
- """Format a query with the given keys"""
-
- query_keys = ', '.join(keys)
- placeholders = ', '.join(['%s'] * len(keys))
-
- return query.format(keys=query_keys, placeholders=placeholders)
-
- def _format_multiquery(self, query, keys, values):
- """Format a query with placeholders generated for multiple values"""
- query_keys = ', '.join(keys)
- placeholders = '), ('.join(
- [', '.join(['%s'] * len(keys))] * len(values)
- )
- ret_values = sum([[value[key] for key in keys]
- for value in values], [])
-
- return (
- query.format(keys=query_keys, placeholders=placeholders),
- ret_values,
- )
-
@autocommit
def create_task_type(self, task_type, cursor=None):
"""Create a new task type ready for scheduling.
Args:
task_type (dict): a dictionary with the following keys:
- type (str): an identifier for the task type
- description (str): a human-readable description of what the
task does
- backend_name (str): the name of the task in the
job-scheduling backend
- default_interval (datetime.timedelta): the default interval
between two task runs
- min_interval (datetime.timedelta): the minimum interval
between two task runs
- max_interval (datetime.timedelta): the maximum interval
between two task runs
- backoff_factor (float): the factor by which the interval
changes at each run
- max_queue_length (int): the maximum length of the task queue
for this task type
"""
query = self._format_query(
"""insert into task_type ({keys}) values ({placeholders})""",
self.task_type_keys,
)
cursor.execute(query, [task_type[key] for key in self.task_type_keys])
@autocommit
def get_task_type(self, task_type_name, cursor=None):
"""Retrieve the task type with id task_type_name"""
query = self._format_query(
"select {keys} from task_type where type=%s",
self.task_type_keys,
)
cursor.execute(query, (task_type_name,))
ret = cursor.fetchone()
return ret
@autocommit
def get_task_types(self, cursor=None):
query = self._format_query(
"select {keys} from task_type",
self.task_type_keys,
)
cursor.execute(query)
ret = cursor.fetchall()
return ret
task_create_keys = [
'type', 'arguments', 'next_run', 'policy', 'retries_left', 'priority',
]
task_keys = task_create_keys + ['id', 'current_interval', 'status']
@autocommit
def create_tasks(self, tasks, cursor=None):
"""Create new tasks.
Args:
tasks (list): each task is a dictionary with the following keys:
- type (str): the task type
- arguments (dict): the arguments for the task runner, keys:
- args (list of str): arguments
- kwargs (dict str -> str): keyword arguments
- next_run (datetime.datetime): the next scheduled run for the
task
Returns:
a list of created tasks.
"""
cursor.execute('select swh_scheduler_mktemp_task()')
self.copy_to(tasks, 'tmp_task', self.task_create_keys, cursor)
query = self._format_query(
'select {keys} from swh_scheduler_create_tasks_from_temp()',
self.task_keys,
)
cursor.execute(query)
return cursor.fetchall()
@autocommit
def set_status_tasks(self, task_ids, status='disabled', cursor=None):
"""Set the tasks' status whose ids are listed."""
query = "UPDATE task SET status = %s WHERE id IN %s"
cursor.execute(query, (status, tuple(task_ids),))
return None
@autocommit
def disable_tasks(self, task_ids, cursor=None):
"""Disable the tasks whose ids are listed."""
return self.set_status_tasks(task_ids)
@autocommit
def get_tasks(self, task_ids, cursor=None):
"""Retrieve the info of tasks whose ids are listed."""
query = self._format_query('select {keys} from task where id in %s',
self.task_keys)
cursor.execute(query, (tuple(task_ids),))
return cursor.fetchall()
@autocommit
def peek_ready_tasks(self, task_type, timestamp=None, num_tasks=None,
num_tasks_priority=None,
cursor=None):
"""Fetch the list of ready tasks
Args:
task_type (str): filtering task per their type
timestamp (datetime.datetime): peek tasks that need to be executed
before that timestamp
num_tasks (int): only peek at num_tasks tasks (with no priority)
num_tasks_priority (int): only peek at num_tasks_priority
tasks (with priority)
Returns:
a list of tasks
"""
if timestamp is None:
timestamp = utcnow()
cursor.execute(
'select * from swh_scheduler_peek_ready_tasks(%s, %s, %s, %s)',
(task_type, timestamp, num_tasks, num_tasks_priority)
)
return cursor.fetchall()
@autocommit
def grab_ready_tasks(self, task_type, timestamp=None, num_tasks=None,
num_tasks_priority=None, cursor=None):
"""Fetch the list of ready tasks, and mark them as scheduled
Args:
task_type (str): filtering task per their type
timestamp (datetime.datetime): grab tasks that need to be executed
before that timestamp
num_tasks (int): only grab num_tasks tasks (with no priority)
num_tasks_priority (int): only grab oneshot num_tasks tasks (with
priorities)
Returns:
a list of tasks
"""
if timestamp is None:
timestamp = utcnow()
cursor.execute(
'select * from swh_scheduler_grab_ready_tasks(%s, %s, %s, %s)',
(task_type, timestamp, num_tasks, num_tasks_priority)
)
return cursor.fetchall()
task_run_create_keys = ['task', 'backend_id', 'scheduled', 'metadata']
@autocommit
def schedule_task_run(self, task_id, backend_id, metadata=None,
timestamp=None, cursor=None):
"""Mark a given task as scheduled, adding a task_run entry in the database.
Args:
task_id (int): the identifier for the task being scheduled
backend_id (str): the identifier of the job in the backend
metadata (dict): metadata to add to the task_run entry
timestamp (datetime.datetime): the instant the event occurred
Returns:
a fresh task_run entry
"""
if metadata is None:
metadata = {}
if timestamp is None:
timestamp = utcnow()
cursor.execute(
'select * from swh_scheduler_schedule_task_run(%s, %s, %s, %s)',
(task_id, backend_id, metadata, timestamp)
)
return cursor.fetchone()
@autocommit
def mass_schedule_task_runs(self, task_runs, cursor=None):
"""Schedule a bunch of task runs.
Args:
task_runs (list): a list of dicts with keys:
- task (int): the identifier for the task being scheduled
- backend_id (str): the identifier of the job in the backend
- metadata (dict): metadata to add to the task_run entry
- scheduled (datetime.datetime): the instant the event occurred
Returns:
None
"""
cursor.execute('select swh_scheduler_mktemp_task_run()')
self.copy_to(task_runs, 'tmp_task_run', self.task_run_create_keys,
cursor)
cursor.execute('select swh_scheduler_schedule_task_run_from_temp()')
@autocommit
def start_task_run(self, backend_id, metadata=None, timestamp=None,
cursor=None):
"""Mark a given task as started, updating the corresponding task_run
entry in the database.
Args:
backend_id (str): the identifier of the job in the backend
metadata (dict): metadata to add to the task_run entry
timestamp (datetime.datetime): the instant the event occurred
Returns:
the updated task_run entry
"""
if metadata is None:
metadata = {}
if timestamp is None:
timestamp = utcnow()
cursor.execute(
'select * from swh_scheduler_start_task_run(%s, %s, %s)',
(backend_id, metadata, timestamp)
)
return cursor.fetchone()
@autocommit
def end_task_run(self, backend_id, status, metadata=None, timestamp=None,
result=None, cursor=None):
"""Mark a given task as ended, updating the corresponding task_run entry in the
database.
Args:
backend_id (str): the identifier of the job in the backend
status (str): how the task ended; one of: 'eventful', 'uneventful',
'failed'
metadata (dict): metadata to add to the task_run entry
timestamp (datetime.datetime): the instant the event occurred
Returns:
the updated task_run entry
"""
if metadata is None:
metadata = {}
if timestamp is None:
timestamp = utcnow()
cursor.execute(
'select * from swh_scheduler_end_task_run(%s, %s, %s, %s)',
(backend_id, status, metadata, timestamp)
)
return cursor.fetchone()
@autocommit
def filter_task_to_archive(self, after_ts, before_ts, limit=10, last_id=-1,
cursor=None):
"""Returns the list of task/task_run prior to a given date to archive.
"""
last_task_run_id = None
while True:
row = None
cursor.execute(
"select * from swh_scheduler_task_to_archive(%s, %s, %s, %s)",
(after_ts, before_ts, last_id, limit)
)
for row in cursor:
# nested type index does not accept bare values
# transform it as a dict to comply with this
row['arguments']['args'] = {
i: v for i, v in enumerate(row['arguments']['args'])
}
kwargs = row['arguments']['kwargs']
row['arguments']['kwargs'] = json.dumps(kwargs)
yield row
if not row:
break
_id = row.get('task_id')
_task_run_id = row.get('task_run_id')
if last_id == _id and last_task_run_id == _task_run_id:
break
last_id = _id
last_task_run_id = _task_run_id
@autocommit
def delete_archived_tasks(self, task_ids, cursor=None):
"""Delete archived tasks as much as possible. Only the task_ids whose
complete associated task_run have been cleaned up will be.
"""
_task_ids = _task_run_ids = []
for task_id in task_ids:
_task_ids.append(task_id['task_id'])
_task_run_ids.append(task_id['task_run_id'])
cursor.execute(
"select * from swh_scheduler_delete_archived_tasks(%s, %s)",
(_task_ids, _task_run_ids))
diff --git a/swh/scheduler/tests/test_events.py b/swh/scheduler/tests/test_events.py
index f62cbcd..8214c9d 100644
--- a/swh/scheduler/tests/test_events.py
+++ b/swh/scheduler/tests/test_events.py
@@ -1,54 +1,56 @@
# Copyright (C) 2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import unittest
+from arrow import utcnow
+from hypothesis import given
+from hypothesis.strategies import one_of, text, just, sampled_from
from nose.tools import istest
from swh.scheduler.updater.events import SWHEvent, LISTENED_EVENTS
-from hypothesis import given
-from hypothesis.strategies import one_of, text, just, sampled_from
def event_values_ko():
return {
'repos', 'org_members', 'teamadd', 'geo_cache',
'follow', 'issue_comments', 'followers', 'forks', 'pagebuild',
'pullrequest', 'pull_requests', 'commit_comments', 'watch', 'fork',
'forkapply', 'commits', 'release', 'gollum', 'membership', 'watchers',
'pullrequestreviewcomment', 'deployment', 'issuecomment', 'status',
'repo_labels', 'issue_events', 'commitcomment', 'issues', 'member',
'users', 'download', 'repo_collaborators', 'repository',
'deploymentstatus', 'pull_request_comments', 'gist'
}
class EventTest(unittest.TestCase):
def _make_event(self, event_name):
return {
- 'evt': event_name,
- 'url': 'something'
+ 'event': event_name,
+ 'url': 'something',
+ 'last_seen': utcnow(),
}
@istest
@given(sampled_from(LISTENED_EVENTS))
def check_ok(self, event_name):
evt = self._make_event(event_name)
self.assertTrue(SWHEvent(evt).check())
@istest
@given(text())
def check_with_noisy_event_should_be_ko(self, event_name):
if event_name in LISTENED_EVENTS:
# just in generation generates a real and correct name, skip it
return
evt = self._make_event(event_name)
self.assertFalse(SWHEvent(evt).check())
@istest
@given(one_of(map(just, event_values_ko())))
def check_ko(self, event_name):
evt = self._make_event(event_name)
self.assertFalse(SWHEvent(evt).check())
diff --git a/swh/scheduler/tests/test_scheduler_updater.py b/swh/scheduler/tests/test_scheduler_updater.py
new file mode 100644
index 0000000..37ebab1
--- /dev/null
+++ b/swh/scheduler/tests/test_scheduler_updater.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2018 The Software Heritage developers
+# See the AUTHORS file at the top-level directory of this distribution
+# License: GNU General Public License version 3, or any later version
+# See top-level LICENSE file for more information
+
+import os
+import unittest
+
+from arrow import utcnow
+from nose.plugins.attrib import attr
+from nose.tools import istest
+from hypothesis import given
+from hypothesis.strategies import sets, from_regex
+from urllib.parse import urlencode
+
+from swh.core.tests.db_testing import SingleDbTestFixture
+from swh.scheduler.updater.backend import SchedulerUpdaterBackend
+from swh.scheduler.updater.events import SWHEvent
+
+TEST_DIR = os.path.dirname(os.path.abspath(__file__))
+TEST_DATA_DIR = os.path.join(TEST_DIR, '../../../../swh-storage-testdata')
+
+
+@attr('db')
+class SchedulerUpdaterTest(SingleDbTestFixture, unittest.TestCase):
+ TEST_DB_NAME = 'softwareheritage-scheduler-updater-test'
+ TEST_DB_DUMP = os.path.join(TEST_DATA_DIR,
+ 'dumps/swh-scheduler-updater.dump')
+
+ def setUp(self):
+ super().setUp()
+ config = {
+ 'scheduling_updater_db': 'dbname=' + self.TEST_DB_NAME,
+ 'time_window': '1 minute',
+ }
+ self.backend = SchedulerUpdaterBackend(**config)
+
+ def _empty_tables(self):
+ self.cursor.execute(
+ """SELECT table_name FROM information_schema.tables
+ WHERE table_schema = %s""", ('public', ))
+ tables = set(table for (table,) in self.cursor.fetchall())
+ for table in tables:
+ self.cursor.execute('truncate table %s cascade' % table)
+ self.conn.commit()
+
+ def tearDown(self):
+ self.backend.close_connection()
+ self._empty_tables()
+ super().tearDown()
+
+ @istest
+ @given(sets(
+ from_regex(
+ r'^https://somwhere[.]org/[a-z0-9]{5,7}/[a-z0-9]{3,10}$'),
+ min_size=10, max_size=15))
+ def cache_read(self, urls):
+ def gen_events(urls):
+ for url in urls:
+ yield SWHEvent({
+ 'url': url,
+ 'event': 'create'
+ })
+
+ self.backend.cache_put(gen_events(urls))
+ r = self.backend.cache_read(timestamp=utcnow())
+
+ self.assertNotEqual(r, [])
diff --git a/swh/scheduler/updater/backend.py b/swh/scheduler/updater/backend.py
new file mode 100644
index 0000000..2b695df
--- /dev/null
+++ b/swh/scheduler/updater/backend.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2018 The Software Heritage developers
+# See the AUTHORS file at the top-level directory of this distribution
+# License: GNU General Public License version 3, or any later version
+# See top-level LICENSE file for more information
+
+
+from arrow import utcnow
+from swh.core.config import SWHConfig
+from swh.scheduler.backend import DbBackend, autocommit
+
+
+class SchedulerUpdaterBackend(SWHConfig, DbBackend):
+ CONFIG_BASE_FILENAME = 'scheduler-updater'
+ DEFAULT_CONFIG = {
+ 'scheduling_updater_db': (
+ 'str', 'dbname=softwareheritage-scheduler-updater-dev'),
+ 'time_window': ('str', '1 hour'),
+ }
+
+ def __init__(self, **override_config):
+ super().__init__()
+ self.config = self.parse_config_file(global_config=False)
+ self.config.update(override_config)
+ self.db = None
+ self.db_conn_dsn = self.config['scheduling_updater_db']
+ self.time_window = self.config['time_window']
+ self.reconnect()
+
+ cache_put_keys = ['url', 'last_seen']
+
+ @autocommit
+ def cache_put(self, events, timestamp=None, cursor=None):
+ if timestamp is None:
+ timestamp = utcnow()
+
+ def prepare_events(events):
+ for e in events:
+ event = e.get()
+ seen = event.get('last_seen')
+ if seen is None:
+ event['last_seen'] = timestamp
+ yield event
+
+ cursor.execute('select swh_mktemp_cache()')
+ self.copy_to(prepare_events(events),
+ 'tmp_cache', self.cache_put_keys, cursor)
+ cursor.execute('select swh_cache_put()')
+
+ # @autocommit
+ # def cache_get(self, event, cursor=None):
+ # pass
+
+ # @autocommit
+ # def cache_remove(self, event, cursor=None):
+ # pass
+
+ cache_read_keys = ['id', 'url']
+
+ @autocommit
+ def cache_read(self, timestamp, limit=100, cursor=None):
+ q = self._format_query("""select {keys}
+ from cache
+ where %s - interval %s <= last_seen and last_seen <= %s
+ limit %s
+ """, self.cache_read_keys)
+ cursor.execute(q, (timestamp, self.time_window, timestamp, limit))
+ return cursor.fetchall()
diff --git a/swh/scheduler/updater/events.py b/swh/scheduler/updater/events.py
index 3e660bf..cf0c7eb 100644
--- a/swh/scheduler/updater/events.py
+++ b/swh/scheduler/updater/events.py
@@ -1,39 +1,47 @@
# Copyright (C) 2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
LISTENED_EVENTS = [
'create',
'delete',
'public',
'push'
]
class SWHEvent:
"""SWH's interesting event (resulting in an origin update)
"""
def __init__(self, evt):
self.event = evt
def check(self):
- return 'evt' in self.event and self.event['evt'] in LISTENED_EVENTS
+ return 'event' in self.event and self.event['event'] in LISTENED_EVENTS
+
+ def get(self):
+ return {
+ 'event': self.event['event'],
+ 'url': self.event['url'],
+ 'last_seen': self.event.get('last_seen')
+ }
def __str__(self):
return {
- 'evt': self.event['evt'],
- 'url': self.event['url']
+ 'event': self.event['event'],
+ 'url': self.event['url'],
+ 'last_seen': self.event.get('last_seen')
}
class SWHPublisher:
def process(self):
pass
class SWHSubscriber:
def process(self):
pass
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Fri, Jul 4, 12:19 PM (2 w, 3 d ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3293300
Attached To
rDSCH Scheduling utilities
Event Timeline
Log In to Comment