diff --git a/data/common/kafka.yaml b/data/common/kafka.yaml index 486e68ea..5a7fdd4a 100644 --- a/data/common/kafka.yaml +++ b/data/common/kafka.yaml @@ -1,97 +1,99 @@ --- zookeeper::clusters: rocquencourt: '1': kafka1.internal.softwareheritage.org '2': kafka2.internal.softwareheritage.org '3': kafka3.internal.softwareheritage.org '4': kafka4.internal.softwareheritage.org zookeeper::datastore: /var/lib/zookeeper zookeeper::client_port: 2181 zookeeper::election_port: 2888 zookeeper::leader_port: 3888 kafka::version: '2.6.0' kafka::scala_version: '2.13' kafka::mirror_url: https://archive.apache.org/dist/ kafka::cluster::heap_ops: "-Xmx6G -Xms6G" +kafka::inter_broker_protocol_version: "2.6" +kafka::log_message_format_version: "2.6" kafka::logdirs: - /srv/kafka/logdir kafka::broker_config: log.dirs: "%{alias('kafka::logdirs')}" num.recovery.threads.per.data.dir: 10 # Increase zookeeper and replication timeouts # https://cwiki.apache.org/confluence/display/KAFKA/KIP-537%3A+Increase+default+zookeeper+session+timeout will be default in 2.5.0 zookeeper.session.timeout.ms: 18000 replica.lag.time.max.ms: 30000 # Bump consumer offset retention to 30 days instead of the default of 7 days offsets.retention.minutes: 43200 # Increase the socket request max size to 200 MB socket.request.max.bytes: 209715200 # And the max message size to 100 MB message.max.bytes: 104857600 - # For upgrades after 2.6 - inter.broker.protocol.version: "2.6" + inter.broker.protocol.version: "%{alias('kafka::inter_broker_protocol_version')}" + log.message.format.version: "%{alias('kafka::log_message_format_version')}" # kafka::broker::password in private-data kafka::clusters: rocquencourt: zookeeper::chroot: '/kafka/softwareheritage' zookeeper::servers: - kafka1.internal.softwareheritage.org - kafka2.internal.softwareheritage.org - kafka3.internal.softwareheritage.org - kafka4.internal.softwareheritage.org brokers: kafka1.internal.softwareheritage.org: id: 1 public_hostname: broker1.journal.softwareheritage.org kafka2.internal.softwareheritage.org: id: 2 public_hostname: broker2.journal.softwareheritage.org kafka3.internal.softwareheritage.org: id: 3 public_hostname: broker3.journal.softwareheritage.org kafka4.internal.softwareheritage.org: id: 4 public_hostname: broker4.journal.softwareheritage.org superusers: - User:swh-admin-olasd # Users connecting in the plaintext endpoint are ANONYMOUS # TODO: remove when explicit ACLs are given to producers - User:ANONYMOUS broker::heap_opts: "%{lookup('kafka::cluster::heap_ops')}" tls: true plaintext_port: 9092 public_tls_port: 9093 internal_tls_port: 9094 public_listener_network: 128.93.166.0/26 # to label the prometheus exporter metrics environment: production rocquencourt_staging: zookeeper::chroot: '/kafka/softwareheritage' zookeeper::servers: - journal1.internal.staging.swh.network brokers: storage1.internal.staging.swh.network: id: 2 public_hostname: broker1.journal.staging.swh.network broker::heap_opts: "%{alias('kafka::broker::heap_opts')}" superusers: - User:swh-admin-olasd # Users connecting in the plaintext endpoint are ANONYMOUS # TODO: remove when explicit ACLs are given to producers - User:ANONYMOUS tls: true plaintext_port: 9092 public_tls_port: 9093 internal_tls_port: 9094 cluster_config_overrides: offsets.topic.replication.factor: 1 # this is mandatory with only one node public_listener_network: "%{alias('kafka::cluster::public_network')}" # to label the prometheus exporter metrics environment: staging diff --git a/data/deployments/staging/common.yaml b/data/deployments/staging/common.yaml index 6cd3995e..2670a66e 100644 --- a/data/deployments/staging/common.yaml +++ b/data/deployments/staging/common.yaml @@ -1,352 +1,356 @@ --- swh::deploy::environment: staging dns::search_domains: - internal.staging.swh.network swh::deploy::storage::db::host: db1.internal.staging.swh.network swh::deploy::storage::db::user: swh swh::deploy::storage::db::dbname: swh swh::deploy::indexer::storage::db::host: db1.internal.staging.swh.network swh::deploy::indexer::storage::db::user: swh-indexer swh::deploy::indexer::storage::db::dbname: swh-indexer swh::deploy::scheduler::db::host: db1.internal.staging.swh.network swh::deploy::scheduler::db::dbname: swh-scheduler swh::deploy::scheduler::db::user: swh-scheduler swh::deploy::deposit::db::host: db1.internal.staging.swh.network swh::deploy::deposit::db::dbuser: swh-deposit swh::deploy::deposit::db::dbname: swh-deposit swh::deploy::vault::db::host: db1.internal.staging.swh.network swh::deploy::vault::db::user: swh-vault swh::deploy::vault::db::dbname: swh-vault swh::deploy::worker::lister::db::host: db1.internal.staging.swh.network swh::deploy::worker::lister::db::user: swh-lister swh::deploy::worker::lister::db::name: swh-lister swh::deploy::scrubber::db::host: db1.internal.staging.swh.network swh::deploy::webapp::db::host: db1.internal.staging.swh.network # swh::deploy::webapp::db::password in private data swh::deploy::webapp::inbound_email::domain: "%{lookup('swh::deploy::webapp::inbound_email::staging::domain')}" swh::deploy::worker::instances: [] swh::deploy::lister::queues: [] #### Rabbitmq instance to use # swh::deploy::worker::task_broker::password in private data swh::deploy::worker::task_broker: "amqp://swhconsumer:%{hiera('swh::deploy::worker::task_broker::password')}@scheduler0.internal.staging.swh.network:5672/%2f" #### Storage/Indexer/Vault/Scheduler services to use in staging area swh::remote_service::storage0::url: "http://storage1.internal.staging.swh.network:%{hiera('swh::remote_service::storage::port')}/" swh::remote_service::storage::config::storage0: cls: remote url: "%{alias('swh::remote_service::storage0::url')}" swh::remote_service::storage::config: "%{alias('swh::remote_service::storage::config::storage0')}" swh::remote_service::storage::config::writable: &swh_remote_service_storage_config_writable "%{alias('swh::remote_service::storage::config::storage0')}" swh::remote_service::vault::config::vault0: cls: remote url: "http://vault.internal.staging.swh.network:%{hiera('swh::remote_service::vault::port')}/" swh::remote_service::vault::config: "%{alias('swh::remote_service::vault::config::vault0')}" swh::remote_service::vault::config::writable: "%{alias('swh::remote_service::vault::config::vault0')}" swh::remote_service::indexer::config::storage0: cls: remote url: "http://storage1.internal.staging.swh.network:%{hiera('swh::remote_service::indexer::port')}/" swh::remote_service::indexer::config: "%{alias('swh::remote_service::indexer::config::storage0')}" swh::remote_service::indexer::config::writable: "%{alias('swh::remote_service::indexer::config::storage0')}" swh::remote_service::scheduler::config::scheduler0: cls: remote url: "http://scheduler0.internal.staging.swh.network:%{hiera('swh::remote_service::scheduler::port')}/" swh::remote_service::scheduler::config: "%{alias('swh::remote_service::scheduler::config::scheduler0')}" swh::remote_service::scheduler::config::writable: "%{alias('swh::remote_service::scheduler::config::scheduler0')}" swh::remote_service::counters::url: "http://counters0.internal.staging.swh.network:%{hiera('swh::remote_service::counters::port')}/" swh::deploy::deposit::url: https://deposit.staging.swh.network swh::deploy::deposit::internal_url: "https://deposit-rp.internal.staging.swh.network" # do not save pack swh::deploy::worker::loader_git::save_data_path: "" swh::deploy::worker::loader_git::concurrency: 1 zookeeper::clusters: rocquencourt_staging: '2': storage1.internal.staging.swh.network kafka::broker::heap_opts: "-Xmx3G -Xms3G" swh::deploy::journal::brokers: - journal1.internal.staging.swh.network swh::deploy::deposit::vhost::letsencrypt_cert: deposit_staging swh::deploy::deposit::reverse_proxy::backend_http_host: deposit.internal.staging.swh.network swh::deploy::webapp::vhost::letsencrypt_cert: archive_staging swh::deploy::webapp::reverse_proxy::backend_http_host: webapp.internal.staging.swh.network swh::deploy::graphql::vhost::letsencrypt_cert: graphql_staging swh::deploy::graphql::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}" swh::deploy::graphql::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}" swh::deploy::graphql::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}" swh::deploy::graphql::vhost::hsts_header: "%{hiera('apache::hsts_header')}" swh::deploy::graphql::vhost::access_log_format: combined_with_duration swh::deploy::graphql::icinga_check_string: 'GraphQL' swh::deploy::graphql::reverse_proxy::backend_http_host: rancher-node-staging-worker1.internal.staging.swh.network swh::deploy::graphql::reverse_proxy::backend_http_port: "80" # swh::deploy::graphql::reverse_proxy::basic_auth::swh-stg in private data swh::deploy::graphql::reverse_proxy::basic_auth::users: - swh-stg swh::remote_service::objstorage::config::rw: cls: remote url: "http://storage1.internal.staging.swh.network:%{hiera('swh::remote_service::objstorage::port')}/" swh::remote_service::objstorage::config::ro: cls: filtered storage_conf: "%{alias('swh::remote_service::objstorage::config::rw')}" filters_conf: - type: readonly swh::deploy::objstorage::vhost::letsencrypt_cert: objstorage_staging swh::deploy::objstorage::reverse_proxy::backend_http_host: objstorage0.internal.staging.swh.network swh::deploy::objstorage::reverse_proxy::basic_auth::users: - swh-stg - enea-stg - snyk-stg-01 swh::deploy::objstorage::backend::public_server_name: objstorage.staging.swh.network objstorage.internal.staging.swh.network swh::remote_service::objstorage::config: "%{alias('swh::remote_service::objstorage::config::ro')}" swh::remote_service::objstorage::config::writable: "%{alias('swh::remote_service::objstorage::config::rw')}" swh::deploy::objstorage::backend::server_names: - "%{alias('swh::deploy::objstorage::backend::public_server_name')}" - "%{::swh_hostname.internal_fqdn}" - "%{::hostname}" - 127.0.0.1 - localhost - "::1" swh::deploy::reverse_proxy::services: - deposit - webapp - objstorage - graphql swh::postgresql::version: '12' swh::postgresql::port: 5433 swh::postgresql::cluster_name: "%{lookup('swh::postgresql::version')}/main" swh::postgresql::datadir_base: "%{lookup('swh::base_directory')}/postgres" swh::postgresql::datadir: "%{lookup('swh::postgresql::datadir_base')}/%{lookup('swh::postgresql::cluster_name')}" swh::postgresql::listen_addresses: - 0.0.0.0 swh::postgresql::network_accesses: - 192.168.100.0/24 # Monitoring - 192.168.130.0/24 # Staging services swh::postgresql::shared_buffers: 32GB postgresql::server::config_entries: shared_buffers: "%{alias('swh::postgresql::shared_buffers')}" cluster_name: "%{alias('swh::postgresql::cluster_name')}" swh::dbs: storage: name: swh user: swh scheduler: name: swh-scheduler user: swh-scheduler vault: name: swh-vault user: swh-vault lister: name: swh-lister user: swh-lister deposit: name: swh-deposit user: swh-deposit indexer::storage: name: swh-indexer user: swh-indexer webapp: name: swh-web user: swh-web scrubber: name: swh-scrubber user: swh-scrubber mirror: name: swh-mirror user: swh-mirror password: "%{lookup('swh::deploy::mirror::db::password')}" pgbouncer::auth_hba_file: "/etc/postgresql/%{lookup('swh::postgresql::cluster_name')}/pg_hba.conf" pgbouncer::common::listen_addresses: - 0.0.0.0 pgbouncer::databases: - source_db: swh host: localhost auth_user: postgres port: 5433 alias: staging-swh - source_db: swh-scheduler host: localhost auth_user: postgres port: 5433 alias: staging-swh-scheduler - source_db: swh-vault host: localhost auth_user: postgres port: 5433 alias: staging-swh-vault - source_db: swh-lister host: localhost auth_user: postgres port: 5433 alias: staging-swh-lister - source_db: swh-deposit host: localhost auth_user: postgres port: 5433 alias: staging-swh-deposit - source_db: swh-indexer host: localhost auth_user: postgres port: 5433 alias: staging-swh-indexer - source_db: swh-web host: localhost auth_user: postgres port: 5433 alias: staging-swh-web - source_db: swh-mirror host: localhost auth_user: postgres port: 5433 alias: swh-mirror - source_db: swh-scrubber host: localhost auth_user: postgres port: 5433 alias: staging-swh-scrubber # open objstorage api swh::deploy::objstorage::backend::listen::host: 0.0.0.0 swh::deploy::objstorage::backend::workers: 16 swh::deploy::objstorage::directory: "%{hiera('swh::deploy::storage::directory')}" swh::deploy::objstorage::slicing: 0:1/1:5 # Deploy the storage server as a public resource swh::deploy::storage::backend::listen::host: 0.0.0.0 swh::deploy::storage::backend::workers: 4 swh::deploy::storage::backend::max_requests: 100 swh::deploy::storage::backend::max_requests_jitter: 10 # Deploy the indexer storage server as a public resource swh::deploy::indexer::storage::backend::listen::host: 0.0.0.0 swh::deploy::indexer::storage::backend::workers: 4 nginx::worker_processes: 4 ## Reverse-proxy and frontend hitch::frontend: "[*]:443" hitch::proxy_support: true varnish::http_port: 80 apache::http_port: 9080 # Disable default vhost on port 80 apache::default_vhost: false # Elasticsearch elasticsearch::config::cluster::name: swh-search elasticsearch::config::discovery::seed_hosts: - search-esnode0.internal.staging.swh.network elasticsearch::config::cluster::initial_master_nodes: - search-esnode0 elasticsearch::jvm_options::heap_size: 16g elasticsearch::config::prometheus::indices: true swh::elasticsearch::search_nodes: - host: search-esnode0.internal.staging.swh.network port: 9200 swh::deploy::search::journal_client::service_types: - objects - indexed swh::deploy::search::journal_client::objects::consumer_group: swh.search.journal_client-v0.11 swh::deploy::search::journal_client::indexed::consumer_group: swh.search.journal_client.indexed-v0.11 swh::deploy::webapp::url: "https://webapp.staging.swh.network" swh::deploy::vault::e2e::storage: "%{alias('swh::remote_service::storage0::url')}" swh::config::keycloak::realm_name: SoftwareHeritageStaging # No historical file on staging swh::deploy::counters::cache_static_file: swh::deploy::counters::live_data_start: 1609462861 # 2021-01-01 swh::deploy::webapp::snapshot_e2e: uri: '/browse/snapshot/48dcf76ec1a3bd57ec117b1dace633691fdfd70d/branches/' regexp: - 'refs/tags/syslinux-3.20-pre2.*refs/tags/syslinux-3.20-pre3.*' swh::deploy::worker::loader_high_priority::queues: # bzr - save_code_now:swh.loader.bzr.tasks.LoadBazaar # cvs - save_code_now:swh.loader.cvs.tasks.LoadCvsRepository # git - save_code_now:swh.loader.git.tasks.UpdateGitRepository # mercurial - save_code_now:swh.loader.mercurial.tasks.LoadMercurial - save_code_now:swh.loader.mercurial.tasks.LoadArchiveMercurial # svn - save_code_now:swh.loader.svn.tasks.LoadSvnRepository - save_code_now:swh.loader.svn.tasks.MountAndLoadSvnRepository - save_code_now:swh.loader.svn.tasks.DumpMountAndLoadSvnRepository # archives - save_code_now:swh.loader.package.archive.tasks.LoadArchive swh::deploy::scheduler::swh-scheduler-runner-priority::config::task_types: - load-bzr - load-cvs - load-git - load-svn - load-archive-files - load-hg syncoid::public_keys::storage1: type: ssh-ed25519 key: "AAAAC3NzaC1lZDI1NTE5AAAAIB0y7dvB0cBluC+Dy+w51P6JCbB18whd/IekP5148XsS" syncoid::public_keys::db1: type: ssh-ed25519 key: "AAAAC3NzaC1lZDI1NTE5AAAAILRVodfvLudSiOdWOPDSoN5MIwZPbyZAyClfr/SQUK4w" swh::deploy::maven_index_exporter::url: maven-exporter.internal.staging.swh.network swh::deploy::indexer_journal_client::origin_intrinsic_metadata::batch_size: 100 swh::deploy::indexer_journal_client::content_mimetype::journal_authentication: false swh::deploy::indexer_journal_client::content_fossology_license::journal_authentication: false swh::deploy::indexer_journal_client::extrinsic_metadata::journal_authentication: false swh::deploy::indexer_journal_client::origin_intrinsic_metadata::journal_authentication: false cassandra::default_cluster_name: archive_staging + +kafka::version: 3.3.1 +kafka::inter_broker_protocol_version: "3.3" +kafka::log_message_format_version: "3.3" diff --git a/site-modules/profile/manifests/kafka/broker.pp b/site-modules/profile/manifests/kafka/broker.pp index 05615313..e04e7ad2 100644 --- a/site-modules/profile/manifests/kafka/broker.pp +++ b/site-modules/profile/manifests/kafka/broker.pp @@ -1,240 +1,240 @@ # Kafka broker profile class profile::kafka::broker { include ::profile::kafka $base_kafka_config = lookup('kafka::broker_config', Hash) $kafka_clusters = lookup('kafka::clusters', Hash) $kafka_cluster = $kafka_clusters.filter |$cluster, $data| { member($data['brokers'].keys(), $::swh_hostname['internal_fqdn']) }.keys()[0] $kafka_cluster_config = $kafka_clusters[$kafka_cluster] $zookeeper_chroot = $kafka_cluster_config['zookeeper::chroot'] $zookeeper_servers = $kafka_cluster_config['zookeeper::servers'] $zookeeper_port = lookup('zookeeper::client_port', Integer) $zookeeper_server_string = join( $zookeeper_servers.map |$server| {"${server}:${zookeeper_port}"}, ',' ) $zookeeper_connect_string = "${zookeeper_server_string}${zookeeper_chroot}" $broker_config = $kafka_cluster_config['brokers'][$::swh_hostname['internal_fqdn']] $broker_id = $broker_config['id'] $internal_hostname = $swh_hostname['internal_fqdn'] $public_hostname = pick($broker_config['public_hostname'], $internal_hostname.regsubst('\.internal', '')) $internal_listener = $internal_hostname $public_listener_network = pick($kafka_cluster_config['public_listener_network'], lookup('internal_network')) $public_listener = ip_for_network($public_listener_network) $cluster_config_overrides = pick_default($kafka_cluster_config['cluster_config_overrides'], {}) $broker_config_overrides = pick_default($broker_config['config_overrides'], {}) $kafka_config = $base_kafka_config + $cluster_config_overrides + $broker_config_overrides + { 'zookeeper.connect' => $zookeeper_connect_string, 'broker.id' => $broker_id, } $cluster_superusers = join( # broker usernames $kafka_cluster_config['brokers'].keys.map |$broker| {"User:broker-${broker}"} + pick_default($kafka_cluster_config['superusers'], []), ';' ) $heap_opts = $kafka_cluster_config['broker::heap_opts'] $kafka_logdirs = lookup('kafka::logdirs', Array) $kafka_logdirs.each |$logdir| { exec {"create ${logdir}": creates => $logdir, command => "mkdir -p ${logdir}", path => ['/bin', '/usr/bin', '/sbin', '/usr/sbin'], } -> file {$logdir: ensure => directory, owner => 'kafka', group => 'kafka', mode => '0750', } } $do_tls = $kafka_cluster_config['tls'] if $do_tls { include ::profile::letsencrypt::host_cert $cert_paths = ::profile::letsencrypt::certificate_paths($trusted['certname']) # $cert_paths['cert'], $cert_paths['chain'], $cert_paths['privkey'] $ks_password = fqdn_rand_string(16, '', lookup('kafka::broker::truststore_seed')) $ks_location = '/opt/kafka/config/broker.ks' java_ks {'kafka:broker': ensure => latest, certificate => $cert_paths['fullchain'], private_key => $cert_paths['privkey'], name => $trusted['certname'], target => $ks_location, password => $ks_password, trustcacerts => true, require => Class['Java'], } $plaintext_port = $kafka_cluster_config['plaintext_port'] $internal_tls_port = $kafka_cluster_config['internal_tls_port'] $public_tls_port = $kafka_cluster_config['public_tls_port'] $sasl_listeners = ['INTERNAL', 'EXTERNAL'] $sasl_mechanisms = ['SCRAM-SHA-512', 'SCRAM-SHA-256'] $broker_username = "broker-${::swh_hostname['internal_fqdn']}" $broker_password = lookup("kafka::broker::password") $kafka_jaas_config = Hash.new(flatten($sasl_listeners.map |$listener| { $sasl_mechanisms.map |$mechanism| { [ "listener.name.${listener.downcase}.${mechanism.downcase}.sasl.jaas.config", "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"${broker_username}\" password=\"${broker_password}\";", ] } })) $kafka_tls_config = { 'ssl.keystore.location' => $ks_location, 'ssl.keystore.password' => $ks_password, 'listeners' => join([ "INTERNAL_PLAINTEXT://${internal_listener}:${plaintext_port}", "INTERNAL://${internal_listener}:${internal_tls_port}", "EXTERNAL://${public_listener}:${public_tls_port}", ], ','), 'advertised.listeners' => join([ "INTERNAL_PLAINTEXT://${internal_hostname}:${plaintext_port}", "INTERNAL://${internal_hostname}:${internal_tls_port}", "EXTERNAL://${public_hostname}:${public_tls_port}", ], ','), 'listener.security.protocol.map' => join([ 'INTERNAL_PLAINTEXT:PLAINTEXT', 'INTERNAL:SASL_SSL', 'EXTERNAL:SASL_SSL', ], ','), 'inter.broker.listener.name' => 'INTERNAL_PLAINTEXT', 'sasl.enabled.mechanisms' => join($sasl_mechanisms, ','), 'super.users' => $cluster_superusers, - 'authorizer.class.name' => 'kafka.security.auth.SimpleAclAuthorizer', + 'authorizer.class.name' => 'kafka.security.authorizer.AclAuthorizer', } + $kafka_jaas_config # Reset the TLS listeners when the keystore gets refreshed ['INTERNAL', 'EXTERNAL'].each |$tls_listener_name| { Java_ks['kafka:broker'] ~> exec {"kafka-reload-tls:${tls_listener_name}": command => join([ '/opt/kafka/bin/kafka-configs.sh', '--bootstrap-server', "${internal_hostname}:${plaintext_port}", '--entity-name', "${broker_id}", '--entity-type', 'brokers', '--add-config', "listener.name.${tls_listener_name}.ssl.keystore.location=${ks_location}", '--alter', ], ' '), refreshonly => true, require => Service['kafka'], } } } else { $kafka_tls_config = { 'listeners' => "PLAINTEXT://${internal_hostname}:${kafka_cluster_config['plaintext_port']}", } } include ::profile::prometheus::jmx $exporter = $::profile::prometheus::jmx::jar_path $exporter_network = lookup('prometheus::kafka::listen_network', Optional[String], 'first', undef) $exporter_address = lookup('prometheus::kafka::listen_address', Optional[String], 'first', undef) $actual_exporter_address = pick($exporter_address, ip_for_network($exporter_network)) $exporter_port = lookup('prometheus::kafka::listen_port') $target = "${actual_exporter_address}:${exporter_port}" $exporter_config = "${::profile::prometheus::jmx::base_directory}/kafka.yml" file {$exporter_config: owner => 'root', group => 'root', mode => '0644', source => 'puppet:///modules/profile/kafka/jmx_exporter.yml', } class {'::kafka::broker': config => $kafka_config + $kafka_tls_config, opts => join(["-javaagent:${exporter}=${exporter_port}:${exporter_config}"], ' '), limit_nofile => '524288', heap_opts => $heap_opts, env => { # Deployment options from https://docs.confluent.io/current/kafka/deployment.html 'KAFKA_JVM_PERFORMANCE_OPTS' => join([ '-server', '-Djava.awt.headless=true', '-XX:MetaspaceSize=96m', '-XX:+UseG1GC', '-XX:+ExplicitGCInvokesConcurrent', '-XX:MaxGCPauseMillis=20', '-XX:InitiatingHeapOccupancyPercent=35', '-XX:G1HeapRegionSize=16M', '-XX:MinMetaspaceFreeRatio=50', '-XX:MaxMetaspaceFreeRatio=80', ], ' '), }, require => [ File[$exporter], File[$exporter_config], ], } ::systemd::dropin_file {"kafka/restart.conf": ensure => present, unit => "kafka.service", filename => 'restart.conf', content => "[Service]\nRestart=on-failure\nRestartSec=5\n", } ::systemd::dropin_file {"kafka/stop-timeout.conf": ensure => present, unit => "kafka.service", filename => 'stop-timeout.conf', content => "[Service]\nTimeoutStopSec=infinity\n", } ::systemd::dropin_file {"kafka/exitcode.conf": ensure => present, unit => "kafka.service", filename => 'exitcode.conf', content => "[Service]\nSuccessExitStatus=143\n", } ::profile::prometheus::export_scrape_config {'kafka': target => $target, labels => { cluster => $kafka_cluster, } } ::profile::cron::d {'kafka-purge-logs': command => 'find /var/log/kafka -type f -name *.gz -a -ctime +60 -exec rm {} \+', target => 'kafka', minute => 'fqdn_rand', hour => 2, } ::profile::cron::d {'kafka-zip-logs': command => 'find /var/log/kafka -type f -name *.log.* -a -not -name *.gz -a -not -name *-gc.log* -a -ctime +1 -exec gzip {} \+', target => 'kafka', minute => 'fqdn_rand', hour => 3, } }