diff --git a/data/deployments/admin/common.yaml b/data/deployments/admin/common.yaml index 445af36c..74ff7e56 100644 --- a/data/deployments/admin/common.yaml +++ b/data/deployments/admin/common.yaml @@ -1,51 +1,55 @@ swh::deploy::environment: admin dns::search_domains: - internal.admin.swh.network swh::postgresql::version: '14' swh::postgresql::listen_addresses: - 0.0.0.0 swh::postgresql::network_accesses: - 192.168.100.0/24 # Monitoring swh::postgresql::shared_buffers: 4GB swh::postgresql::port: 5432 swh::postgresql::cluster_name: "%{lookup('swh::postgresql::version')}/main" swh::postgresql::datadir_base: "/srv/postgresql" swh::postgresql::datadir: "%{lookup('swh::postgresql::datadir_base')}/%{lookup('swh::postgresql::cluster_name')}" hedgedoc::db::database: hedgedoc hedgedoc::db::username: hedgedoc # swh::deploy::hedgedoc::db::password: in private-data # namespace key `key_name`, lookup will happen on swh::deploy::{key_name}::... swh::deploy::reverse_proxy::services: - hedgedoc - grafana - sentry swh::deploy::hedgedoc::reverse_proxy::backend_http_host: bardo.internal.admin.swh.network swh::deploy::hedgedoc::reverse_proxy::backend_http_port: "3000" swh::deploy::hedgedoc::reverse_proxy::websocket_support: true swh::deploy::hedgedoc::base_url: hedgedoc.softwareheritage.org swh::deploy::hedgedoc::vhost::letsencrypt_cert: hedgedoc swh::deploy::hedgedoc::icinga_check_string: 'HedgeDoc' swh::deploy::grafana::vhost::letsencrypt_cert: "%{lookup('grafana::vhost::name')}" swh::deploy::grafana::reverse_proxy::backend_http_host: grafana0.internal.admin.swh.network swh::deploy::grafana::reverse_proxy::backend_http_port: "3000" swh::deploy::grafana::reverse_proxy::websocket_support: true swh::deploy::grafana::base_url: "%{lookup('grafana::vhost::name')}" swh::deploy::sentry::vhost::letsencrypt_cert: "%{lookup('sentry::vhost::name')}" swh::deploy::sentry::reverse_proxy::backend_http_host: riverside.internal.admin.swh.network swh::deploy::sentry::reverse_proxy::backend_http_port: "9000" swh::deploy::sentry::base_url: "%{lookup('sentry::vhost::name')}" swh::deploy::sentry::icinga_check_uri: '/auth/login/swh/' hitch::frontend: "[*]:443" hitch::proxy_support: true varnish::http_port: 80 grafana::db::host: db1.internal.admin.swh.network grafana::db::port: "%{lookup('swh::postgresql::port')}" + +syncoid::public_keys::backup01-azure: + type: ssh-ed25519 + key: "AAAAC3NzaC1lZDI1NTE5AAAAIC/IVxmzorYGJH5ThlzjrdHl9KBTsJKEqCAZhhJG6oGO" diff --git a/data/hostname/backup01.euwest.azure.internal.softwareheritage.org.yaml b/data/hostname/backup01.euwest.azure.internal.softwareheritage.org.yaml new file mode 100644 index 00000000..0685c0b4 --- /dev/null +++ b/data/hostname/backup01.euwest.azure.internal.softwareheritage.org.yaml @@ -0,0 +1,22 @@ +swh::apt_config::enable_non_free: true + +packages: + - linux-headers-cloud-amd64 + - zfs-dkms + +syncoid::configuration: + sources: + dali: + host: dali.internal.admin.swh.network + datasets: + postgresql: + dataset: data/postgresql + frequency: 1h + sync_snap: no # snapshots are managed by sanoid + postgresql_wal: + target: postgresql/wal + dataset: data/postgresql/wal + frequency: 1h + sync_snap: no # snapshots are managed by sanoid + target_dataset_base: data/sync + ssh_key: backup01-azure diff --git a/site-modules/profile/manifests/sanoid/sync_destination.pp b/site-modules/profile/manifests/sanoid/sync_destination.pp index 90fcba02..0f0a8fbf 100644 --- a/site-modules/profile/manifests/sanoid/sync_destination.pp +++ b/site-modules/profile/manifests/sanoid/sync_destination.pp @@ -1,67 +1,73 @@ # Configure a server to be the destination # of a zfs snapshot synchronization # This server will pull the snasphots from # the sources class profile::sanoid::sync_destination { $configuration = lookup('syncoid::configuration') $sources = $configuration["sources"] if $sources { include profile::sanoid::install $sources.each | $key, $config | { $source_host = $config['host'] $ssh_key_name = $config['ssh_key'] $ssh_key = lookup("syncoid::ssh_key::${ssh_key_name}") $authorized_key = lookup("syncoid::public_keys::${ssh_key_name}") $ssh_key_type = $authorized_key["type"] # computing a substring of ssh_key_type to remove 'ssh-' of the name $ssh_key_filename = "/root/.ssh/id_${ssh_key_type[4,255]}.syncoid_${ssh_key_name}" ensure_resource( 'file', $ssh_key_filename, { ensure => present, owner => 'root', group => 'root', mode => '0600', content => $ssh_key, }) @@::profile::sanoid::configure_sync_source { $::fqdn: user => 'root', ssh_key_name => "syncoid_${ssh_key_name}", ssh_key_type => $ssh_key_type, authorized_key => $authorized_key['key'], tag => $source_host, } # Create a timer and service for each dataset to sync $config['datasets'].each | $name, $props | { $dataset = $props['dataset'] - $destination = "${config['target_dataset_base']}/${key}/${name}" + $target = pick($props['target'], $name) + $destination = "${config['target_dataset_base']}/${key}/${target}" $service_basename = "syncoid-${key}-${name}" $source = "${source_host}:${dataset}" $delay = pick($props['delay'], lookup('syncoid::default_delay')) + $sync_snap = pick($props['sync_snap'], true) + if $sync_snap == false { + $sync_options = ' --no-sync-snap' + } # templates use: # - $ssh_key_filename # - $source # - $destination # - $delay # - $service_basename + # - $sync_option ::systemd::timer { "${service_basename}.timer": timer_content => template('profile/sanoid/syncoid.timer.erb'), service_content => template('profile/sanoid/syncoid.service.erb'), service_unit => "${service_basename}.service", active => true, enable => true, } } } } } diff --git a/site-modules/profile/templates/sanoid/syncoid.service.erb b/site-modules/profile/templates/sanoid/syncoid.service.erb index 9c779a05..6f3e7f8b 100644 --- a/site-modules/profile/templates/sanoid/syncoid.service.erb +++ b/site-modules/profile/templates/sanoid/syncoid.service.erb @@ -1,18 +1,18 @@ # Managed by puppet class profile::sanoid::syncoid_destination # Changes will be overwritten [Unit] Description=ZFS dataset synchronization of <% @source %> StartLimitIntervalSec=<%= @delay %> StartLimitBurst=2 [Service] Type=oneshot User=root Group=root -ExecStart=syncoid --sshkey <%= @ssh_key_filename %> root@<%= @source %> <%= @destination %> +ExecStart=syncoid<%= @sync_options %> --sshkey <%= @ssh_key_filename %> root@<%= @source %> <%= @destination %> Restart=on-failure RestartSec=1s [Install] WantedBy=multi-user.target