diff --git a/manifests/systemd_journal.pp b/manifests/systemd_journal.pp index 7252db9..5bee37d 100644 --- a/manifests/systemd_journal.pp +++ b/manifests/systemd_journal.pp @@ -1,8 +1,7 @@ # Manage the configuration of the systemd journal class profile::systemd_journal { - $role = hiera('systemd_journal::role') - include profile::systemd_journal::apt_config include profile::systemd_journal::base_config + include profile::systemd_journal::journalbeat } diff --git a/manifests/systemd_journal/journalbeat.pp b/manifests/systemd_journal/journalbeat.pp new file mode 100644 index 0000000..6e7cc23 --- /dev/null +++ b/manifests/systemd_journal/journalbeat.pp @@ -0,0 +1,78 @@ +# Journalbeat: a systemd journal collection beater for the ELK stack +class profile::systemd_journal::journalbeat { + $package = 'journalbeat' + $user = 'journalbeat' + $group = 'nogroup' + $homedir = '/var/lib/journalbeat' + $configdir = '/etc/journalbeat' + $configfile = "${configdir}/journalbeat.yml" + $service = 'journalbeat' + $servicefile = "/etc/systemd/system/${service}.service" + + $logstash_hosts = hiera('systemd_journal::logstash_hosts') + + include ::systemd + + package {$package: + ensure => present + } + + user {$user: + ensure => present, + gid => $group, + groups => 'systemd-journal', + home => $homedir, + managehome => true, + system => true, + } + + # Uses variables + # - $user + # - $homedir + # - $configfile + # + file {$servicefile: + ensure => present, + owner => 'root', + group => 'root', + mode => '0644', + content => template('profile/systemd/journalbeat/journalbeat.service.erb'), + require => Package[$package], + notify => [ + Exec['systemd-daemon-reload'], + Service[$service], + ], + } + + file {$configdir: + ensure => directory, + owner => 'root', + group => 'root', + mode => '0644', + } + + # Uses variables + # - $logstash_hosts + # + file {$configfile: + ensure => present, + owner => 'root', + group => 'root', + mode => '0644', + content => template('profile/systemd/journalbeat/journalbeat.yml.erb'), + notify => [ + Exec['systemd-daemon-reload'], + Service[$service], + ], + } + + service {$service: + ensure => running, + enable => true, + require => [ + File[$servicefile], + File[$configfile], + Exec['systemd-daemon-reload'], + ], + } +} diff --git a/templates/systemd_journal/journalbeat/journalbeat.service.erb b/templates/systemd_journal/journalbeat/journalbeat.service.erb new file mode 100644 index 0000000..ad92780 --- /dev/null +++ b/templates/systemd_journal/journalbeat/journalbeat.service.erb @@ -0,0 +1,20 @@ +# Managed by puppet (class profile::systemd_journal::journalbeat), changes will be lost + +[Unit] +Description=Send systemd journal messages to logstash +After=unbound.service +Requires=unbound.service + +[Service] +Type=simple +Restart=always +RestartSec=20s +ExecStart=/usr/bin/journalbeat -e -c <% @configfile %> +User=<% @user %> +Group=systemd-journal +ReadOnlyDirectories=/ +ReadWriteDirectories=-<% @homedir %> +WorkingDirectory=<% @homedir %> + +[Install] +WantedBy=multi-user.target diff --git a/templates/systemd_journal/journalbeat/journalbeat.yml.erb b/templates/systemd_journal/journalbeat/journalbeat.yml.erb new file mode 100644 index 0000000..49e4c4f --- /dev/null +++ b/templates/systemd_journal/journalbeat/journalbeat.yml.erb @@ -0,0 +1,305 @@ +#======================== Journalbeat Configuration ============================ + +journalbeat: + # What position in journald to seek to at start up + # options: cursor, tail, head (defaults to tail) + seek_position: cursor + + # If seek_position is set to cursor and seeking to cursor fails + # fall back to this method. If set to none will it will exit + # options: tail, head, none (defaults to tail) + cursor_seek_fallback: head + + # Store the cursor of the successfully published events + write_cursor_state: true + + # Path to the file to store the cursor (defaults to ".journalbeat-cursor-state") + cursor_state_file: cursor-state + + # How frequently should we save the cursor to disk (defaults to 5s) + #cursor_flush_period: 5s + + # Path to the file to store the queue of events pending (defaults to ".journalbeat-pending-queue") + pending_queue.file: pending-queue + + # How frequently should we save the queue to disk (defaults to 1s). + # Pending queue represents the WAL of events queued to be published + # or being published and waiting for acknowledgement. In case of a + # regular restart of journalbeat all the events not yet acknowledged + # will be flushed to disk during the shutdown. + # In case of disaster most probably journalbeat won't get a chance to shutdown + # itself gracefully and this flush period option will serve you as a + # backup creation frequency option. + pending_queue.flush_period: 1s + + # Lowercase and remove leading underscores, e.g. "_MESSAGE" -> "message" + # (default to false) + clean_field_names: true + + # All journal entries are strings by default. You can try to convert them to numbers. + # (defaults to false) + convert_to_numbers: false + + # Store all the fields of the Systemd Journal entry under this field + # Can be almost any string suitable to be a field name of an ElasticSearch document. + # Dots can be used to create nested fields. + # Two exceptions: + # - no repeated dots; + # - no trailing dots, e.g. "journal..field_name." will fail + # (defaults to "" hence stores on the upper level of the event) + #move_metadata_to_field: "" + + # Specific units to monitor. + #units: ["httpd.service"] + + # Directory to monitor instead of connecting to the default journal location + #directory: /var/log/journal/remote + + #default_type: journal + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: journalbeat + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# The internal queue size for bulk events in the processing pipeline. +# Do not modify this value. +#bulk_queue_size: 0 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, and +# add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, and DigitalOcean. +# +#processors: +#- add_cloud_metadata: +# + +#================================ Outputs ====================================== + +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. + +output.elasticsearch: + enabled: false + +#----------------------------- Logstash output --------------------------------- +output.logstash: + # Boolean flag to enable or disable the output module. + enabled: true + + # The Logstash hosts + hosts: +<% @logstash_hosts.each do |host| -%> + - <%= host %> +<% end -%> + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional load balance the events between the Logstash hosts + #loadbalance: true + + # Number of batches to be send asynchronously to logstash while processing + # new batches. + #pipelining: 0 + + # Optional index name. The default index name is set to name of the beat + # in all lowercase. + #index: 'beatname' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/beatname" + + # Name of the generated files. The default is `beatname` and it generates + # files: `beatname`, `beatname.1`, `beatname.2`, etc. + #filename: beatname + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every beatname restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Pretty print json event + #pretty: true + +#================================= Paths ====================================== + +# The home path for the beatname installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +path.home: /var/lib/journalbeat + +# The configuration path for the beatname installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the beatname installation. This is the default base path +# for all the files in which beatname needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a beatname installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#================================ Logging ====================================== +# There are three options for the log output: syslog, file, stderr. +# Under Windows systems, the log files are per default sent to the file output, +# under all other system per default to syslog. + +# Sets log level. The default log level is info. +# Available log levels are: critical, error, warning, info, debug +logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +logging.selectors: ["*"] + +# Send all logging output to syslog. The default is false. +logging.to_syslog: true + +# If enabled, beatname periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +logging.metrics.enabled: false + +# The period after which to log the internal metrics. The default is 30s. +logging.metrics.period: 30s + +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: false +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/beatname + + # The name of the files where the logs are written to. + #name: beatname + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7