Page MenuHomeSoftware Heritage

No OneTemporary

diff --git a/site-modules/profile/files/prometheus/sql/README.md b/site-modules/profile/files/prometheus/sql/README.md
index df061633..4fa082b2 100644
--- a/site-modules/profile/files/prometheus/sql/README.md
+++ b/site-modules/profile/files/prometheus/sql/README.md
@@ -1,10 +1,10 @@
Elephant Shed is an open source project, developed and maintained by credativ.
The Elephant Shed itself is licensed under the [GPLv3](https://www.gnu.org/licenses/gpl-3.0.html).
-The following files have been pulled from commit c2951bad8cbcb58cd8377e5057b1d69bc2d9928b of https://github.com/credativ/elephant-shed/
+The following files have been pulled from commit 8cf0db0b225623adec291019405dca8a00b11e1c of https://github.com/credativ/elephant-shed/
- update-prometheus-sql-exporter-config
- config/activity.yml
- config/queries.yml
- config/replication.yml
- config/wal.yml
diff --git a/site-modules/profile/files/prometheus/sql/update-prometheus-sql-exporter-config b/site-modules/profile/files/prometheus/sql/update-prometheus-sql-exporter-config
index 5744afac..83ef6528 100644
--- a/site-modules/profile/files/prometheus/sql/update-prometheus-sql-exporter-config
+++ b/site-modules/profile/files/prometheus/sql/update-prometheus-sql-exporter-config
@@ -1,84 +1,108 @@
#!/usr/bin/perl
use strict;
use warnings;
use PgCommon;
use Storable qw(dclone);
use YAML;
# from list of queries, remove all that do not satisfy the scope/version/cluster/database constraints
sub filter_queries($$$$;$)
{
my ($queries, $scope, $version, $cluster, $database) = @_;
my @result;
foreach my $query (@$queries) {
$query->{scope} //= 'database';
die "scope '$query->{scope}' must be either 'cluster' or 'database'"
unless ($query->{scope} =~ /^(cluster|database)$/);
next if $query->{scope} ne $scope;
next if $query->{min_version} and $version < $query->{min_version};
next if $query->{max_version} and $version > $query->{max_version};
next if $query->{version} and $version !~ /$query->{version}/;
next if $query->{cluster} and $cluster !~ /$query->{cluster}/;
next if $query->{database} and $database !~ /$query->{database}/;
my $q = dclone($query);
delete $q->{scope}; delete $q->{min_version}; delete $q->{max_version};
delete $q->{version}; delete $q->{cluster}; delete $q->{database};
push @result, $q;
}
return \@result;
}
+# Generate jobs from the given queries. One job is created for all the queries
+# with null interval, and one job is created per query with non-null interval.
+sub push_queries_as_jobs($$$)
+{
+ my ($jobs, $queries, $job_settings) = @_;
+ my @instant;
+ foreach my $query (@$queries) {
+ if (!$query->{interval}) {
+ push @instant, $query;
+ } else {
+ # Generate separate job for query with non-null interval
+ my $job_settings_query = dclone($job_settings);
+ $job_settings_query->{interval} = $query->{interval};
+ $job_settings_query->{name} .= '/' . $query->{name};
+ my $q = dclone($query);
+ delete $q->{interval};
+ $job_settings_query->{queries} = [$q];
+
+ push @$jobs, $job_settings_query;
+ }
+ }
+
+ my $instant_job = dclone($job_settings);
+ $instant_job->{interval} = 0;
+ $instant_job->{queries} = \@instant;
+ push @$jobs, $instant_job;
+}
+
my $queries_directory = $ARGV[0] // die "No directory for *.yml query files specified";
my $output_yaml = $ARGV[1] // die "No output yaml file specified";
# load all *.yml files from input directory and collect the contained query lists
my $queries;
foreach my $yml (glob "$queries_directory/*.yml") {
my $q;
eval { $q = YAML::LoadFile($yml); };
die "Error loading $yml: $@" if ($@);
next if (ref($q) eq ''); # file is empty
die "$yml is not a yaml list:" . ref($q) unless (ref($q) eq 'ARRAY');
push @$queries, @$q;
}
# walk all clusters and databases and produce jobs for them
my $jobs = [];
foreach my $version (get_versions()) {
foreach my $cluster (get_version_clusters($version)) {
my %info = cluster_info($version, $cluster);
next unless $info{running}; # cluster is down, skip it
my $owner = (getpwuid $info{owneruid})[0] // die "Could not determine owner name of cluster $version $cluster";
my $socket = get_cluster_socketdir($version, $cluster);
- # job for cluster-wide queries
- push @$jobs, {
+ # jobs for cluster-wide queries
+ push_queries_as_jobs $jobs, filter_queries($queries, 'cluster', $version, $cluster), {
connections => [ "postgres://$owner\@:$info{port}/postgres?sslmode=disable&host=$socket" ],
- interval => 0,
name => "$version/$cluster",
- queries => filter_queries($queries, 'cluster', $version, $cluster),
};
# jobs for per-database queries
my @cluster_databases = get_cluster_databases($version, $cluster);
foreach my $database (grep { $_ and $_ !~ /^template[01]$/ } @cluster_databases) {
- push @$jobs, {
+ push_queries_as_jobs $jobs, filter_queries($queries, 'database', $version, $cluster, $database), {
connections => [ "postgres://$owner\@:$info{port}/$database?sslmode=disable&host=$socket" ],
- interval => 0,
name => "$version/$cluster/$database",
- queries => filter_queries($queries, 'database', $version, $cluster, $database),
};
}
# create pg_stat_statements in "postgres" database if missing
next if $info{recovery}; # cluster is in recovery mode, skip it
open PSQL, "|-", "su -c 'psql -q -h $socket -p $info{port} postgres' $owner";
print PSQL "DO \$\$DECLARE ext name; BEGIN SELECT INTO ext extname FROM pg_extension WHERE extname = 'pg_stat_statements'; IF NOT FOUND THEN RAISE NOTICE 'Creating pg_stat_statements extension in cluster $version/$cluster'; CREATE EXTENSION pg_stat_statements; END IF; END\$\$ LANGUAGE plpgsql;\n";
close PSQL;
}
}
# write output yml
YAML::DumpFile($output_yaml, { jobs => $jobs });

File Metadata

Mime Type
text/x-diff
Expires
Sat, Jun 21, 5:36 PM (1 w, 6 d ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3266588

Event Timeline