diff --git a/data/common/kafka.yaml b/data/common/kafka.yaml index 4594afa6..96c96a5b 100644 --- a/data/common/kafka.yaml +++ b/data/common/kafka.yaml @@ -1,72 +1,93 @@ --- zookeeper::clusters: rocquencourt: '1': kafka1.internal.softwareheritage.org '2': kafka2.internal.softwareheritage.org '3': kafka3.internal.softwareheritage.org '4': kafka4.internal.softwareheritage.org zookeeper::datastore: /var/lib/zookeeper zookeeper::client_port: 2181 zookeeper::election_port: 2888 zookeeper::leader_port: 3888 kafka::version: '2.6.0' kafka::scala_version: '2.13' kafka::mirror_url: https://archive.apache.org/dist/ kafka::cluster::heap_ops: "-Xmx6G -Xms6G" kafka::logdirs: - /srv/kafka/logdir kafka::broker_config: log.dirs: "%{alias('kafka::logdirs')}" num.recovery.threads.per.data.dir: 10 # Increase zookeeper and replication timeouts # https://cwiki.apache.org/confluence/display/KAFKA/KIP-537%3A+Increase+default+zookeeper+session+timeout will be default in 2.5.0 zookeeper.session.timeout.ms: 18000 replica.lag.time.max.ms: 30000 # Bump consumer offset retention to 30 days instead of the default of 7 days offsets.retention.minutes: 43200 # Increase the socket request max size to 200 MB socket.request.max.bytes: 209715200 # And the max message size to 100 MB message.max.bytes: 104857600 # For upgrades after 2.6 inter.broker.protocol.version: "2.6" # kafka::broker::password in private-data kafka::clusters: rocquencourt: zookeeper::chroot: '/kafka/softwareheritage' zookeeper::servers: - kafka1.internal.softwareheritage.org - kafka2.internal.softwareheritage.org - kafka3.internal.softwareheritage.org - kafka4.internal.softwareheritage.org brokers: kafka1.internal.softwareheritage.org: id: 1 public_hostname: broker1.journal.softwareheritage.org kafka2.internal.softwareheritage.org: id: 2 public_hostname: broker2.journal.softwareheritage.org kafka3.internal.softwareheritage.org: id: 3 public_hostname: broker3.journal.softwareheritage.org kafka4.internal.softwareheritage.org: id: 4 public_hostname: broker4.journal.softwareheritage.org superusers: - User:swh-admin-olasd # Users connecting in the plaintext endpoint are ANONYMOUS # TODO: remove when explicit ACLs are given to producers - User:ANONYMOUS broker::heap_opts: "%{lookup('kafka::cluster::heap_ops')}" tls: true plaintext_port: 9092 public_tls_port: 9093 internal_tls_port: 9094 public_listener_network: 128.93.166.0/26 + rocquencourt_staging: + zookeeper::chroot: '/kafka/softwareheritage' + zookeeper::servers: + - journal1.internal.staging.swh.network + brokers: + storage1.internal.staging.swh.network: + id: 2 + public_hostname: broker1.journal.staging.swh.network + broker::heap_opts: "%{alias('kafka::broker::heap_opts')}" + superusers: + - User:swh-admin-olasd + # Users connecting in the plaintext endpoint are ANONYMOUS + # TODO: remove when explicit ACLs are given to producers + - User:ANONYMOUS + tls: true + plaintext_port: 9092 + public_tls_port: 9093 + internal_tls_port: 9094 + cluster_config_overrides: + offsets.topic.replication.factor: 1 # this is mandatory with only one node + public_listener_network: "%{alias('kafka::cluster::public_network')}" diff --git a/data/deployments/staging/common.yaml b/data/deployments/staging/common.yaml index 63deae5a..b3101646 100644 --- a/data/deployments/staging/common.yaml +++ b/data/deployments/staging/common.yaml @@ -1,321 +1,298 @@ --- swh::deploy::environment: staging swh::deploy::storage::db::host: db1.internal.staging.swh.network swh::deploy::storage::db::user: swh swh::deploy::storage::db::dbname: swh swh::deploy::indexer::storage::db::host: db1.internal.staging.swh.network swh::deploy::indexer::storage::db::user: swh-indexer swh::deploy::indexer::storage::db::dbname: swh-indexer swh::deploy::scheduler::db::host: db1.internal.staging.swh.network swh::deploy::scheduler::db::dbname: swh-scheduler swh::deploy::scheduler::db::user: swh-scheduler swh::deploy::deposit::db::host: db1.internal.staging.swh.network swh::deploy::deposit::db::dbuser: swh-deposit swh::deploy::deposit::db::dbname: swh-deposit swh::deploy::vault::db::host: db1.internal.staging.swh.network swh::deploy::vault::db::user: swh-vault swh::deploy::vault::db::dbname: swh-vault swh::deploy::worker::lister::db::host: db1.internal.staging.swh.network swh::deploy::worker::lister::db::user: swh-lister swh::deploy::worker::lister::db::name: swh-lister swh::deploy::webapp::db::host: db1.internal.staging.swh.network # swh::deploy::webapp::db::password in private data swh::deploy::worker::instances: - checker_deposit - loader_archive - loader_cran - loader_cvs - loader_debian - loader_deposit - loader_nixguix - loader_git - loader_mercurial - loader_npm - loader_pypi - loader_svn - vault_cooker - lister - loader_high_priority - loader_opam #### Rabbitmq instance to use # swh::deploy::worker::task_broker::password in private data swh::deploy::worker::task_broker: "amqp://swhconsumer:%{hiera('swh::deploy::worker::task_broker::password')}@scheduler0.internal.staging.swh.network:5672/%2f" #### Storage/Indexer/Vault/Scheduler services to use in staging area swh::remote_service::storage0::url: "http://storage1.internal.staging.swh.network:%{hiera('swh::remote_service::storage::port')}/" swh::remote_service::storage::config::storage0: cls: remote args: url: "%{alias('swh::remote_service::storage0::url')}" swh::remote_service::storage::config: "%{alias('swh::remote_service::storage::config::storage0')}" swh::remote_service::storage::config::writable: &swh_remote_service_storage_config_writable "%{alias('swh::remote_service::storage::config::storage0')}" swh::remote_service::vault::config::vault0: cls: remote args: url: "http://vault.internal.staging.swh.network:%{hiera('swh::remote_service::vault::port')}/" swh::remote_service::vault::config: "%{alias('swh::remote_service::vault::config::vault0')}" swh::remote_service::vault::config::writable: "%{alias('swh::remote_service::vault::config::vault0')}" swh::remote_service::indexer::config::storage0: cls: remote url: "http://storage1.internal.staging.swh.network:%{hiera('swh::remote_service::indexer::port')}/" swh::remote_service::indexer::config: "%{alias('swh::remote_service::indexer::config::storage0')}" swh::remote_service::indexer::config::writable: "%{alias('swh::remote_service::indexer::config::storage0')}" swh::remote_service::scheduler::config::scheduler0: cls: remote args: url: "http://scheduler0.internal.staging.swh.network:%{hiera('swh::remote_service::scheduler::port')}/" swh::remote_service::scheduler::config: "%{alias('swh::remote_service::scheduler::config::scheduler0')}" swh::remote_service::scheduler::config::writable: "%{alias('swh::remote_service::scheduler::config::scheduler0')}" swh::remote_service::counters::url: "http://counters0.internal.staging.swh.network:%{hiera('swh::remote_service::counters::port')}/" swh::deploy::deposit::url: https://deposit.staging.swh.network swh::deploy::deposit::internal_url: "https://deposit-rp.internal.staging.swh.network" # do not save pack swh::deploy::worker::loader_git::save_data_path: "" swh::deploy::worker::loader_git::concurrency: 1 zookeeper::clusters: rocquencourt_staging: '2': storage1.internal.staging.swh.network kafka::broker::heap_opts: "-Xmx3G -Xms3G" -kafka::clusters: - rocquencourt_staging: - zookeeper::chroot: '/kafka/softwareheritage' - zookeeper::servers: - - journal1.internal.staging.swh.network - brokers: - storage1.internal.staging.swh.network: - id: 2 - public_hostname: broker1.journal.staging.swh.network - broker::heap_opts: "%{alias('kafka::broker::heap_opts')}" - superusers: - - User:swh-admin-olasd - # Users connecting in the plaintext endpoint are ANONYMOUS - # TODO: remove when explicit ACLs are given to producers - - User:ANONYMOUS - tls: true - plaintext_port: 9092 - public_tls_port: 9093 - internal_tls_port: 9094 - cluster_config_overrides: - offsets.topic.replication.factor: 1 # this is mandatory with only one node - public_listener_network: "%{alias('kafka::cluster::public_network')}" - swh::deploy::journal::brokers: - journal1.internal.staging.swh.network swh::deploy::deposit::vhost::letsencrypt_cert: deposit_staging swh::deploy::deposit::reverse_proxy::backend_http_host: deposit.internal.staging.swh.network swh::deploy::webapp::vhost::letsencrypt_cert: archive_staging swh::deploy::webapp::reverse_proxy::backend_http_host: webapp.internal.staging.swh.network swh::remote_service::objstorage::config::rw: cls: remote url: "http://storage1.internal.staging.swh.network:%{hiera('swh::remote_service::objstorage::port')}/" swh::remote_service::objstorage::config::ro: cls: filtered storage_conf: "%{alias('swh::remote_service::objstorage::config::rw')}" filters_conf: - type: readonly swh::deploy::objstorage::vhost::letsencrypt_cert: objstorage_staging swh::deploy::objstorage::reverse_proxy::backend_http_host: objstorage0.internal.staging.swh.network swh::deploy::objstorage::reverse_proxy::basic_auth::users: - swh-stg - enea-stg swh::deploy::objstorage::backend::public_server_name: objstorage.staging.swh.network objstorage.internal.staging.swh.network swh::remote_service::objstorage::config: "%{alias('swh::remote_service::objstorage::config::ro')}" swh::remote_service::objstorage::config::writable: "%{alias('swh::remote_service::objstorage::config::rw')}" swh::deploy::objstorage::backend::server_names: - "%{alias('swh::deploy::objstorage::backend::public_server_name')}" - "%{::swh_hostname.internal_fqdn}" - "%{::hostname}" - 127.0.0.1 - localhost - "::1" swh::deploy::reverse_proxy::services: - deposit - webapp - objstorage swh::postgresql::version: '12' swh::postgresql::port: 5433 swh::postgresql::cluster_name: "%{lookup('swh::postgresql::version')}/main" swh::postgresql::datadir_base: "%{lookup('swh::base_directory')}/postgres" swh::postgresql::datadir: "%{lookup('swh::postgresql::datadir_base')}/%{lookup('swh::postgresql::cluster_name')}" swh::postgresql::listen_addresses: - 0.0.0.0 swh::postgresql::network_accesses: - 192.168.100.0/24 # Monitoring - 192.168.130.0/24 # Staging services swh::postgresql::shared_buffers: 32GB postgresql::server::config_entries: shared_buffers: "%{alias('swh::postgresql::shared_buffers')}" cluster_name: "%{alias('swh::postgresql::cluster_name')}" swh::dbs: storage: name: swh user: swh scheduler: name: swh-scheduler user: swh-scheduler vault: name: swh-vault user: swh-vault lister: name: swh-lister user: swh-lister deposit: name: swh-deposit user: swh-deposit indexer::storage: name: swh-indexer user: swh-indexer webapp: name: swh-web user: swh-web mirror: name: swh-mirror user: swh-mirror password: "%{lookup('swh::deploy::mirror::db::password')}" pgbouncer::auth_hba_file: "/etc/postgresql/%{lookup('swh::postgresql::cluster_name')}/pg_hba.conf" pgbouncer::common::listen_addresses: - 0.0.0.0 pgbouncer::databases: - source_db: swh host: localhost auth_user: postgres port: 5433 alias: staging-swh - source_db: swh-scheduler host: localhost auth_user: postgres port: 5433 alias: staging-swh-scheduler - source_db: swh-vault host: localhost auth_user: postgres port: 5433 alias: staging-swh-vault - source_db: swh-lister host: localhost auth_user: postgres port: 5433 alias: staging-swh-lister - source_db: swh-deposit host: localhost auth_user: postgres port: 5433 alias: staging-swh-deposit - source_db: swh-indexer host: localhost auth_user: postgres port: 5433 alias: staging-swh-indexer - source_db: swh-web host: localhost auth_user: postgres port: 5433 alias: staging-swh-web - source_db: swh-mirror host: localhost auth_user: postgres port: 5433 alias: swh-mirror # open objstorage api swh::deploy::objstorage::backend::listen::host: 0.0.0.0 swh::deploy::objstorage::backend::workers: 16 swh::deploy::objstorage::directory: "%{hiera('swh::deploy::storage::directory')}" swh::deploy::objstorage::slicing: 0:1/1:5 # Deploy the storage server as a public resource swh::deploy::storage::backend::listen::host: 0.0.0.0 swh::deploy::storage::backend::workers: 4 swh::deploy::storage::backend::max_requests: 100 swh::deploy::storage::backend::max_requests_jitter: 10 # Deploy the indexer storage server as a public resource swh::deploy::indexer::storage::backend::listen::host: 0.0.0.0 swh::deploy::indexer::storage::backend::workers: 4 nginx::worker_processes: 4 ## Reverse-proxy and frontend hitch::frontend: "[*]:443" hitch::proxy_support: true varnish::http_port: 80 apache::http_port: 9080 # Disable default vhost on port 80 apache::default_vhost: false # Elasticsearch elasticsearch::config::cluster::name: swh-search elasticsearch::config::discovery::seed_hosts: - search-esnode0.internal.staging.swh.network elasticsearch::config::cluster::initial_master_nodes: - search-esnode0 elasticsearch::jvm_options::heap_size: 16g elasticsearch::config::prometheus::indices: true swh::elasticsearch::search_nodes: - host: search-esnode0.internal.staging.swh.network port: 9200 swh::deploy::search::journal_client::service_types: - objects - indexed swh::deploy::search::journal_client::objects::consumer_group: swh.search.journal_client-v0.11 swh::deploy::search::journal_client::indexed::consumer_group: swh.search.journal_client.indexed-v0.11 swh::deploy::webapp::url: "https://webapp.staging.swh.network" swh::deploy::vault::e2e::storage: "%{alias('swh::remote_service::storage0::url')}" swh::config::keycloak::realm_name: SoftwareHeritageStaging # No historical file on staging swh::deploy::counters::cache_static_file: swh::deploy::counters::live_data_start: 1609462861 # 2021-01-01 swh::deploy::webapp::snapshot_e2e: uri: '/browse/snapshot/48dcf76ec1a3bd57ec117b1dace633691fdfd70d/branches/' regexp: - 'refs/tags/syslinux-3.20-pre2.*refs/tags/syslinux-3.20-pre3.*' diff --git a/data/subnets/vagrant.yaml b/data/subnets/vagrant.yaml index 11cf21a1..172d79de 100644 --- a/data/subnets/vagrant.yaml +++ b/data/subnets/vagrant.yaml @@ -1,243 +1,243 @@ --- dns::local_nameservers: - 192.168.100.29 - 192.168.200.22 # forwarder for : # - swh network # - Inria network # - external network dns::forwarders: - 192.168.100.29 - 192.168.200.22 - 128.93.77.234 - 1.1.1.1 dns::forwarder_insecure: true bind::zones::masters: - 192.168.100.29 ntp::servers: - sesi-ntp1.inria.fr - sesi-ntp2.inria.fr networks::manage_interfaces: false internal_network: 10.168.128.0/16 puppet::master::codedir: /tmp/puppet networks::private_routes: {} smtp::relay_hostname: 'none' swh::postgresql::network_accesses: - 10.168.100.0/24 swh::deploy::worker::instances: - checker_deposit - lister - loader_archive - loader_cran - loader_debian - loader_deposit - loader_git - loader_mercurial - loader_nixguix - loader_opam - loader_npm - loader_pypi - loader_svn - loader_high_priority dns::forward_zones: { } netbox::vhost::letsencrypt_cert: inventory-vagrant netbox::vhost::name: inventory-vagrant.internal.softwareheritage.org netbox::mail::from: inventory+vagrant@softwareheritage.org netbox::admin::email: sysop+vagrant@softwareheritage.org kafka::cluster::public_network: 10.168.130.0/24 kafka::cluster::heap_ops: "-Xmx512m -Xms512m" puppet::master::manage_puppetdb: true puppetdb::listen_address: 0.0.0.0 swh::puppetdb::etcdir: /etc/puppetdb puppetdb::confdir: "%{lookup('swh::puppetdb::etcdir')}/conf.d" puppetdb::ssl_dir: "%{lookup('swh::puppetdb::etcdir')}/ssl" swh::puppetdb::ssl_key_path: "%{lookup('puppetdb::ssl_dir')}/key.pem" swh::puppetdb::ssl_key: "%{::puppet_vardir}/ssl/private_keys/pergamon.softwareheritage.org.pem" swh::puppetdb::ssl_cert: "%{::puppet_vardir}/ssl/certs/pergamon.softwareheritage.org.pem" swh::puppetdb::ssl_cert_path: "%{lookup('puppetdb::ssl_dir')}/cert.pem" swh::puppetdb::ssl_ca_cert: "%{::puppet_vardir}/ssl/ca/ca_crt.pem" swh::puppetdb::ssl_ca_cert_path: "%{lookup('puppetdb::ssl_dir')}/ca_crt.pem" puppet::autosign_entries: - '*.softwareheritage.org' - '*.staging.swh.network' - '*.admin.swh.network' static_hostnames: 10.168.50.10: host: bardo.internal.admin.swh.network 10.168.50.20: host: rp1.internal.admin.swh.network aliases: - hedgedoc.softwareheritage.org - grafana.softwareheritage.org 10.168.50.30: host: grafana0.internal.admin.swh.network 10.168.50.50: host: dali.internal.admin.swh.network aliases: - db1.internal.admin.swh.network 10.168.100.18: host: banco.internal.softwareheritage.org aliases: - backup.internal.softwareheritage.org - kibana.internal.softwareheritage.org 10.168.100.19: host: logstash0.internal.softwareheritage.org aliases: - logstash.internal.softwareheritage.org 10.168.100.29: host: pergamon.internal.softwareheritage.org aliases: - pergamon.softwareheritage.org - icinga.internal.softwareheritage.org - stats.export.softwareheritage 10.168.100.30: host: jenkins.softwareheritage.org 10.168.100.31: host: moma.internal.softwareheritage.org aliases: - archive.internal.softwareheritage.org - deposit.internal.softwareheritage.org - objstorage.softwareheritage.org - objstorage.internal.softwareheritage.org 10.168.100.32: host: beaubourg.internal.softwareheritage.org 10.168.100.34: host: hypervisor3.internal.softwareheritage.org 10.168.100.52: host: riverside.internal.softwareheritage.org aliases: - sentry.softwareheritage.org 10.168.100.61: host: esnode1.internal.softwareheritage.org 10.168.100.62: host: esnode2.internal.softwareheritage.org 10.168.100.63: host: esnode3.internal.softwareheritage.org 10.168.100.71: host: webapp1.internal.softwareheritage.org 10.168.100.81: host: search-esnode1.internal.softwareheritage.org 10.168.100.82: host: search-esnode2.internal.softwareheritage.org 10.168.100.83: host: search-esnode3.internal.softwareheritage.org 10.168.100.85: host: search1.internal.softwareheritage.org 10.168.100.86: host: search-esnode4.internal.softwareheritage.org 10.168.100.87: host: search-esnode5.internal.softwareheritage.org 10.168.100.88: host: search-esnode6.internal.softwareheritage.org 10.168.100.95: host: counters1.internal.softwareheritage.org 10.168.100.101: host: uffizi.internal.softwareheritage.org 10.168.100.102: - host: gettys.internal.softwareheritage.org + host: getty.internal.softwareheritage.org 10.168.100.103: host: somerset.internal.softwareheritage.org 10.168.100.104: host: saatchi.internal.softwareheritage.org aliases: - rabbitmq.internal.softwareheritage.org 10.168.100.105: host: thyssen.internal.softwareheritage.org 10.168.100.106: host: kelvingrove.internal.softwareheritage.org aliases: - auth.softwareheritage.org 10.168.100.108: host: branly.internal.softwareheritage.org 10.168.100.109: host: saam.internal.softwareheritage.org 10.168.100.110: host: met.internal.softwareheritage.org 10.168.100.131: host: zookeeper1.internal.softwareheritage.org 10.168.100.132: host: zookeeper2.internal.softwareheritage.org 10.168.100.133: host: zookeeper3.internal.softwareheritage.org 10.168.100.150: host: jenkins1-debian.internal.softwareheritage.org 10.168.100.170: host: pompidou.internal.softwareheritage.org 10.168.100.210: host: belvedere.internal.softwareheritage.org aliases: - db.internal.softwareheritage.org 10.168.100.199: host: bojimans.internal.softwareheritage.org aliases: - inventory.internal.softwareheritage.org 10.168.100.201: host: kafka1.internal.softwareheritage.org aliases: - broker1.journal.softwareheritage.org 10.168.100.202: host: kafka2.internal.softwareheritage.org aliases: - broker2.journal.softwareheritage.org 10.168.100.203: host: kafka3.internal.softwareheritage.org aliases: - broker3.journal.softwareheritage.org 10.168.100.204: host: kafka4.internal.softwareheritage.org aliases: - broker4.journal.softwareheritage.org 10.168.130.11: host: db1.internal.staging.swh.network 10.168.130.20: host: rp1.internal.staging.swh.network aliases: - webapp.staging.swh.network - deposit.staging.swh.network - objstorage.staging.swh.network 10.168.130.30: host: webapp.internal.staging.swh.network 10.168.130.31: host: deposit.internal.staging.swh.network 10.168.130.41: host: storage1.internal.staging.swh.network aliases: - journal1.internal.staging.swh.network 10.168.130.50: host: scheduler0.internal.staging.swh.network 10.168.130.60: host: vault.internal.staging.swh.network 10.168.130.80: host: search-esnode0.internal.staging.swh.network 10.168.130.90: host: search0.internal.staging.swh.network 10.168.130.95: host: counters0.internal.staging.swh.network 10.168.130.100: host: worker0.internal.staging.swh.network 10.168.130.101: host: worker1.internal.staging.swh.network 10.168.130.102: host: worker2.internal.staging.swh.network 10.168.130.103: host: worker3.internal.staging.swh.network 10.168.130.110: host: objstorage0.internal.staging.swh.network 10.168.130.160: host: mirror-test.internal.staging.swh.network 10.168.200.22: host: ns0.euwest.azure.internal.softwareheritage.org diff --git a/site-modules/profile/manifests/kafka/management_scripts.pp b/site-modules/profile/manifests/kafka/management_scripts.pp new file mode 100644 index 00000000..ea39802a --- /dev/null +++ b/site-modules/profile/manifests/kafka/management_scripts.pp @@ -0,0 +1,40 @@ +# Journal management scripts +class profile::kafka::management_scripts { + $clusters = lookup('kafka::clusters') + + $zookeeper_port = lookup('zookeeper::client_port', Integer) + + $clusters.each | $cluster, $config | { + + $script_name = "/usr/local/sbin/manage_kafka_user_${cluster}.sh" + $kafka_plaintext_port = $config['plaintext_port'] + $zookeeper_chroot = $config['zookeeper::chroot'] + $zookeeper_servers = $config['zookeeper::servers'] + + $zookeeper_server_string = join( + $zookeeper_servers.map |$server| {"${server}:${zookeeper_port}"}, + ',' + ) + + $zookeeper_connection_string = "${zookeeper_server_string}${zookeeper_chroot}" + + $brokers_connection_string = join($config['brokers'].map | $broker, $broker_config | { + "${broker}:${kafka_plaintext_port}" }, ',' + ) + + # the template uses + # - zookeeper_connection_string + # - brokers_connection_string + # using an indirection to avoid a parsing bug + $filename = "/usr/local/sbin/create_kafka_users_${cluster}.sh" + file { $filename: + ensure => 'present', + content => template('profile/kafka/create_kafka_users.sh.erb'), + owner => 'root', + group => 'root', + mode => '0700', + } + + } + +} diff --git a/site-modules/profile/templates/kafka/create_kafka_users.sh.erb b/site-modules/profile/templates/kafka/create_kafka_users.sh.erb new file mode 100644 index 00000000..ba293dac --- /dev/null +++ b/site-modules/profile/templates/kafka/create_kafka_users.sh.erb @@ -0,0 +1,73 @@ +#!/bin/bash +# +# Managed by Puppet (class profile::kafka::management_scripts), changes will be lost. +# +set -e + +zookeepers=<%= @zookeeper_connection_string %> +brokers=<%= @brokers_connection_string %> + +usage () { + echo "$0 [--privileged] [--consumer-group-prefix prefix] username" +} + +if (( $# < 1 )) || (( $# > 4 )); then + usage + exit 1 +fi + +privileged="unprivileged" +cgrp_prefix="" + +while (( $# )); do + if [ $1 = "--privileged" ]; then + privileged="privileged" + shift + elif [ $1 = "--consumer-group-prefix" ]; then + cgrp_prefix=$2 + shift + shift + else + username=$1 + break + fi +done + +if [ -z "$username" ]; then + usage + exit 1 +fi + +if [ -z "$cgrp_prefix" ]; then + cgrp_prefix="$username-" +fi + +echo "Creating user $username, with $privileged access to consumer group prefix $cgrp_prefix" + +read -s -p "Password for user $username: " password +echo + +echo "Setting user credentials" + +/opt/kafka/bin/kafka-configs.sh \ + --zookeeper "$zookeepers" \ + --alter \ + --add-config "SCRAM-SHA-256=[iterations=8192,password=$password],SCRAM-SHA-512=[password=$password]" \ + --entity-type users \ + --entity-name $username + +topic_prefixes="swh.journal.objects. swh.journal.indexed." + +if [ $privileged = "privileged" ]; then + topic_prefixes="$topic_prefixes swh.journal.objects_privileged." +fi + +for topic_prefix in $topic_prefixes; do + echo "Granting access to topics $topic_prefix to $username" + for op in READ DESCRIBE; do + /opt/kafka/bin/kafka-acls.sh --bootstrap-server $brokers --add --resource-pattern-type PREFIXED --topic $topic_prefix --allow-principal User:$username --operation $op + done +done + +echo "Granting access to consumer group prefix $cgrp_prefix to $username" +/opt/kafka/bin/kafka-acls.sh --bootstrap-server $brokers --add --resource-pattern-type PREFIXED --group ${cgrp_prefix} --allow-principal User:$username --operation READ diff --git a/site-modules/role/manifests/swh_journal_orchestrator.pp b/site-modules/role/manifests/swh_journal_orchestrator.pp index 7041f272..7048a979 100644 --- a/site-modules/role/manifests/swh_journal_orchestrator.pp +++ b/site-modules/role/manifests/swh_journal_orchestrator.pp @@ -1,4 +1,5 @@ class role::swh_journal_orchestrator inherits role::swh_base { include profile::kafka include profile::kafka::prometheus_consumer_group_exporter + include profile::kafka::management_scripts } diff --git a/site-modules/role/manifests/swh_storage_with_journal.pp b/site-modules/role/manifests/swh_storage_with_journal.pp index 03a8cfa9..7e81c073 100644 --- a/site-modules/role/manifests/swh_storage_with_journal.pp +++ b/site-modules/role/manifests/swh_storage_with_journal.pp @@ -1,9 +1,8 @@ class role::swh_storage_with_journal inherits role::swh_base_storage { include profile::postgresql::client include profile::swh::deploy::journal::backfill # journal include profile::zookeeper include profile::kafka::broker - include profile::kafka::prometheus_consumer_group_exporter }