diff --git a/.fixtures.yml b/.fixtures.yml index ee1a042..2c850bd 100644 --- a/.fixtures.yml +++ b/.fixtures.yml @@ -1,35 +1,35 @@ fixtures: forge_modules: archive: repo: puppet/archive ref: 0.5.1 augeas_core: repo: puppetlabs/augeas_core ref: 1.0.4 stdlib: repo: puppetlabs/stdlib ref: 4.13.1 java: repo: puppetlabs/java - ref: 6.2.0 + ref: 6.5.0 concat: repo: puppetlabs/concat ref: 2.2.1 datacat: repo: richardc/datacat ref: 0.6.2 apt: repo: puppetlabs/apt ref: 7.4.1 zypprepo: repo: puppet/zypprepo ref: 2.2.2 yumrepo_core: repo: puppetlabs/yumrepo_core ref: 1.0.3 java_ks: puppetlabs/java_ks elastic_stack: repo: elastic/elastic_stack ref: 6.1.0 symlinks: elasticsearch: "#{source_dir}" diff --git a/.travis.yml b/.travis.yml index 664d430..cd60211 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,174 +1,136 @@ sudo: required # group: deprecated-2017Q4 services: - docker cache: bundler: true directories: - spec/fixtures/artifacts - spec/fixtures/modules language: ruby script: travis_retry bundle exec rake $TASK jobs: allow_failures: - - env: - - TASK=beaker:ubuntu-server-1404-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:ubuntu-server-1404-x64:snapshot - env: - TASK=beaker:ubuntu-server-1604-x64:snapshot - env: - OSS_PACKAGE=true - TASK=beaker:ubuntu-server-1604-x64:snapshot - - env: - - TASK=beaker:centos-6-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:centos-6-x64:snapshot - env: - TASK=beaker:centos-7-x64:snapshot - env: - OSS_PACKAGE=true - TASK=beaker:centos-7-x64:snapshot - env: - TASK=beaker:debian-8-x64:snapshot - env: - OSS_PACKAGE=true - TASK=beaker:debian-8-x64:snapshot - env: - TASK=beaker:debian-9-x64:snapshot - env: - OSS_PACKAGE=true - TASK=beaker:debian-9-x64:snapshot include: - stage: intake env: - TASK=intake - env: - TASK=intake - PUPPET_VERSION='~> 5.0' - stage: acceptance env: - BEAKER_PUPPET_COLLECTION=puppet6 - TASK=beaker:amazonlinux-1-x64:acceptance - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:amazonlinux-1-x64:acceptance[7.8.0] + - TASK=beaker:amazonlinux-1-x64:acceptance[6.8.12] - env: - BEAKER_PUPPET_COLLECTION=puppet6 - TASK=beaker:amazonlinux-2-x64:acceptance - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:amazonlinux-2-x64:acceptance[7.8.0] - - env: - - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:centos-6-x64:acceptance - - env: - - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:centos-6-x64:acceptance[7.8.0] + - TASK=beaker:amazonlinux-2-x64:acceptance[6.8.12] - env: - BEAKER_PUPPET_COLLECTION=puppet6 - TASK=beaker:centos-7-x64:acceptance - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:centos-7-x64:acceptance[7.8.0] + - TASK=beaker:centos-7-x64:acceptance[6.8.12] - env: - BEAKER_PUPPET_COLLECTION=puppet6 - TASK=beaker:centos-8-x64:acceptance - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:centos-8-x64:acceptance[7.8.0] - - env: - - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:oracle-6-x64:acceptance - - env: - - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:oracle-6-x64:acceptance[7.8.0] + - TASK=beaker:centos-8-x64:acceptance[6.8.12] - env: - BEAKER_PUPPET_COLLECTION=puppet6 - TASK=beaker:oracle-7-x64:acceptance - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:oracle-7-x64:acceptance[7.8.0] + - TASK=beaker:oracle-7-x64:acceptance[6.8.12] - env: - BEAKER_PUPPET_COLLECTION=puppet6 - TASK=beaker:debian-8-x64:acceptance - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:debian-8-x64:acceptance[7.8.0] + - TASK=beaker:debian-8-x64:acceptance[6.8.12] - env: - BEAKER_PUPPET_COLLECTION=puppet6 - TASK=beaker:debian-9-x64:acceptance - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:debian-9-x64:acceptance[7.8.0] + - TASK=beaker:debian-9-x64:acceptance[6.8.12] - env: - BEAKER_PUPPET_COLLECTION=puppet6 - TASK=beaker:debian-10-x64:acceptance - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:debian-10-x64:acceptance[7.8.0] + - TASK=beaker:debian-10-x64:acceptance[6.8.12] - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:ubuntu-server-1404-x64:acceptance + - TASK=beaker:ubuntu-server-1604-x64:acceptance - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:ubuntu-server-1404-x64:acceptance[7.8.0] + - TASK=beaker:ubuntu-server-1604-x64:acceptance[6.8.12] - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:ubuntu-server-1604-x64:acceptance + - TASK=beaker:ubuntu-server-1804-x64:acceptance - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:ubuntu-server-1604-x64:acceptance[7.8.0] + - TASK=beaker:ubuntu-server-1804-x64:acceptance[6.8.12] - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:ubuntu-server-1804-x64:acceptance + - TASK=beaker:ubuntu-server-2004-x64:acceptance - env: - BEAKER_PUPPET_COLLECTION=puppet6 - - TASK=beaker:ubuntu-server-1804-x64:acceptance[7.8.0] - # - env: - # - BEAKER_PUPPET_COLLECTION=puppet6 - # - TASK=beaker:ubuntu-server-2004-x64:acceptance - # - env: - # - BEAKER_PUPPET_COLLECTION=puppet6 - # - TASK=beaker:ubuntu-server-2004-x64:acceptance[7.8.0] + - TASK=beaker:ubuntu-server-2004-x64:acceptance[6.8.12] - stage: snapshots - env: - - TASK=beaker:ubuntu-server-1404-x64:snapshot - env: - - OSS_PACKAGE=true - - TASK=beaker:ubuntu-server-1404-x64:snapshot - env: - TASK=beaker:ubuntu-server-1604-x64:snapshot - env: - OSS_PACKAGE=true - TASK=beaker:ubuntu-server-1604-x64:snapshot - - env: - - TASK=beaker:centos-6-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:centos-6-x64:snapshot - env: - TASK=beaker:centos-7-x64:snapshot - env: - OSS_PACKAGE=true - TASK=beaker:centos-7-x64:snapshot - env: - TASK=beaker:debian-8-x64:snapshot - env: - OSS_PACKAGE=true - TASK=beaker:debian-8-x64:snapshot - env: - TASK=beaker:debian-9-x64:snapshot - env: - OSS_PACKAGE=true - TASK=beaker:debian-9-x64:snapshot notifications: slack: secure: T1FO+ttrJNH+bXmNR6349qcttG68Qr1xmMqVVRnUr7+129GQO5174Z8MFC8ck0qOCZGHO7GCNO5seNFflrjF/5EKbdkmVnqhf9gVa9kN7I4psMzxJX9bp29xJA6m3wA4VqCosDKVFSfilDZujAblWT+KDHZLjP8sEWEnHPvCjf69S2XDQEWUoxZan5V9IJQas4XR+hMdIZTA3ChVrEyqRfeehAZImbAr/LH8zChZaTdHZQY7p2rN3+qVNi3+GISV9fNPpOCynnX/ACbdUaRt3+1etxGGaQMPzGmTejN3VlMw4OZRXImb6HQ2rXE+fNCASXiKiwylxTbriQsS0dFv4skxH03YlYM8pqaBpeIOwzf4n45tTzdAQZJMC5cOb+RvwS7qkAwuaVlVxiiA+MWRG/UcFpWS+iNn4KEKxbpBjYP8X1JIP9DlHLME7DNMM2pePv9X6ZjY6eDhVM1gbKi77dXOo5y2Sp0ru8QkLpIKFVXS01O+x7oDHHv2Osvih0jNMgM66Byso3KJYJ6EJ0D2/3Q9ZNpVM4CMuIY5pBQfXf691zqkBHI6JUnU6VMw97cH0k6Gq0ypZoW5trXdnRC5aEg4jKKid84zKmAeTpj/iMuagyb/a5msJstIVboynRtfDHR0J8WWhfSU2wzqKAb6L66iyRe62Fe8OGzLhk2+KNU= env: global: - secure: WFFcjwBIRBG2zyk4c8Ugq0tgI1YaH/+s5eV9h3i2kR1ggobT+nrNqn3hCOkmPtwGYPBNjVj6yp+7qy//MRe8AS2eo1XuMD/P4MYcDGmZiMnqPhz1UsLltGTYlh3y6jl9DJvNujFBQMnAu/ey2g/iWrcHdtl2qninvN3wOrXi2Bs= - secure: bvBaKoV5wBj2eQb4Zx8E2NaBDsMOyuHczRByVLNX5YqeuRWL9kcsUYzAUshFpd2GFa4tzfnSLKCp0+h3T4Uei5e8CjV5dx0VFmijXoZif0OJplRaJ+S3dJSluTV04NoE4u6l5Pg6kkFTMnAaApKVB4je2nSlgvrm/tuavhd9i0M= - secure: akshyW92CqV3Wt+rzQ3ScxIG55ILEaiwQ011rNF1kCXTds5HrHOGy++4VEidaTpems8OQH2+hCLK5r/7FXXgRQEV/TRYRGhp/y9mwqdioyDQ1D0yA3f42NWGNDGg2yOTTbhqQFJg394LDMiLmnevoiajEVIH+Ksr5bV/cIJc4Tc= diff --git a/Rakefile b/Rakefile index cc9b964..715ae18 100644 --- a/Rakefile +++ b/Rakefile @@ -1,217 +1,219 @@ require 'digest/sha1' require 'rubygems' require 'puppetlabs_spec_helper/rake_tasks' require 'puppet_blacksmith/rake_tasks' require 'net/http' require 'uri' require 'fileutils' require 'rspec/core/rake_task' require 'puppet-strings' require 'puppet-strings/tasks' require 'yaml' require 'json' require_relative 'spec/spec_utilities' ENV['VAULT_APPROLE_ROLE_ID'] ||= '48adc137-3270-fc4a-ae65-1306919d4bb0' oss_package = ENV['OSS_PACKAGE'] and ENV['OSS_PACKAGE'] == 'true' +elasticsearch_default_version = '7.10.1' + # Workaround for certain rspec/beaker versions module TempFixForRakeLastComment def last_comment last_description end end Rake::Application.send :include, TempFixForRakeLastComment exclude_paths = [ 'pkg/**/*', 'vendor/**/*', 'spec/**/*' ] require 'puppet-lint/tasks/puppet-lint' require 'puppet-syntax/tasks/puppet-syntax' PuppetSyntax.exclude_paths = exclude_paths PuppetSyntax.future_parser = true if ENV['FUTURE_PARSER'] == 'true' %w[ 80chars class_inherits_from_params_class class_parameter_defaults single_quote_string_with_variable ].each do |check| PuppetLint.configuration.send("disable_#{check}") end PuppetLint.configuration.ignore_paths = exclude_paths PuppetLint.configuration.log_format = \ '%{path}:%{line}:%{check}:%{KIND}:%{message}' # Append custom cleanup tasks to :clean task :clean => [ :'artifact:clean', :spec_clean ] desc 'remove outdated module fixtures' task :spec_prune do mods = 'spec/fixtures/modules' fixtures = YAML.load_file '.fixtures.yml' fixtures['fixtures']['forge_modules'].each do |mod, params| next unless params.is_a? Hash \ and params.key? 'ref' \ and File.exist? "#{mods}/#{mod}" metadata = JSON.parse(File.read("#{mods}/#{mod}/metadata.json")) FileUtils.rm_rf "#{mods}/#{mod}" unless metadata['version'] == params['ref'] end end task :spec_prep => [:spec_prune] RSpec::Core::RakeTask.new(:spec_verbose) do |t| t.pattern = 'spec/{classes,defines,unit,functions,templates}/**/*_spec.rb' t.rspec_opts = [ '--format documentation', '--require "ci/reporter/rspec"', '--format CI::Reporter::RSpecFormatter', '--color' ] end task :spec_verbose => :spec_prep RSpec::Core::RakeTask.new(:spec_puppet) do |t| t.pattern = 'spec/{classes,defines,functions,templates,unit/facter}/**/*_spec.rb' t.rspec_opts = ['--color'] end task :spec_puppet => :spec_prep RSpec::Core::RakeTask.new(:spec_unit) do |t| t.pattern = 'spec/unit/{type,provider}/**/*_spec.rb' t.rspec_opts = ['--color'] end task :spec_unit => :spec_prep task :beaker => [:spec_prep] desc 'Run all linting/unit tests.' task :intake => [ :syntax, :rubocop, :lint, :validate, :spec_unit, :spec_puppet ] # Plumbing for snapshot tests desc 'Run the snapshot tests' RSpec::Core::RakeTask.new('beaker:snapshot', [:filter]) do |task, args| task.rspec_opts = ['--color'] task.pattern = 'spec/acceptance/tests/acceptance_spec.rb' task.rspec_opts = [] task.rspec_opts << '--format documentation' if ENV['CI'].nil? task.rspec_opts << "--example '#{args[:filter]}'" if args[:filter] ENV['SNAPSHOT_TEST'] = 'true' if Rake::Task.task_defined? 'artifact:snapshot:not_found' puts 'No snapshot artifacts found, skipping snapshot tests.' exit(0) end end beaker_node_sets.each do |node| desc "Run the snapshot tests against the #{node} nodeset" task "beaker:#{node}:snapshot", [:filter] => %w[ spec_prep artifact:snapshot:deb artifact:snapshot:rpm ] do |_task, args| ENV['BEAKER_set'] = node Rake::Task['beaker:snapshot'].reenable Rake::Task['beaker:snapshot'].invoke args[:filter] end desc "Run acceptance tests against #{node}" RSpec::Core::RakeTask.new( "beaker:#{node}:acceptance", [:version, :filter] => [:spec_prep] ) do |task, args| ENV['BEAKER_set'] = node - args.with_defaults(:version => '6.8.6', :filter => nil) + args.with_defaults(:version => elasticsearch_default_version, :filter => nil) task.pattern = 'spec/acceptance/tests/acceptance_spec.rb' task.rspec_opts = [] task.rspec_opts << '--format documentation' task.rspec_opts << "--example '#{args[:filter]}'" if args[:filter] ENV['ELASTICSEARCH_VERSION'] ||= args[:version] Rake::Task['artifact:fetch'].invoke(ENV['ELASTICSEARCH_VERSION']) end end namespace :artifact do desc 'Fetch specific installation artifacts' task :fetch, [:version] do |_t, args| fetch_archives( derive_artifact_urls_for(args[:version]) ) end namespace :snapshot do snapshot_version = JSON.parse(http_retry('https://artifacts-api.elastic.co/v1/versions'))['versions'].reject do |version| version.include? 'alpha' end.last ENV['snapshot_version'] = snapshot_version downloads = JSON.parse(http_retry("https://artifacts-api.elastic.co/v1/search/#{snapshot_version}/elasticsearch"))['packages'].select do |pkg, _| pkg =~ /(?:deb|rpm)/ and (oss_package ? pkg =~ /oss/ : pkg !~ /oss/) end.map do |package, urls| [package.split('.').last, urls] end.to_h # We end up with something like: # { # 'rpm' => {'url' => 'https://...', 'sha_url' => 'https://...'}, # 'deb' => {'url' => 'https://...', 'sha_url' => 'https://...'} # } # Note that checksums are currently broken on the Elastic unified release # side; once they start working we can verify them. if downloads.empty? puts 'No snapshot release available; skipping snapshot download' %w[deb rpm].each { |ext| task ext } task 'not_found' else # Download snapshot files downloads.each_pair do |extension, urls| filename = artifact urls['url'] checksum = artifact urls['sha_url'] link = artifact "elasticsearch-snapshot.#{extension}" FileUtils.rm link if File.exist? link task extension => link file link => filename do unless File.exist?(link) and File.symlink?(link) \ and File.readlink(link) == filename File.delete link if File.exist? link File.symlink File.basename(filename), link end end # file filename => checksum do file filename do get urls['url'], filename end task checksum do File.delete checksum if File.exist? checksum get urls['sha_url'], checksum end end end end desc 'Purge fetched artifacts' task :clean do FileUtils.rm_rf(Dir.glob('spec/fixtures/artifacts/*')) end end diff --git a/lib/facter/es_facts.rb b/lib/facter/es_facts.rb index 0114751..eb587f4 100644 --- a/lib/facter/es_facts.rb +++ b/lib/facter/es_facts.rb @@ -1,147 +1,137 @@ require 'net/http' require 'json' require 'yaml' # Helper module to encapsulate custom fact injection -# rubocop:disable Metrics/ModuleLength module EsFacts # Add a fact to the catalog of host facts def self.add_fact(prefix, key, value) key = "#{prefix}_#{key}".to_sym ::Facter.add(key) do setcode { value } end end def self.ssl?(config) tls_keys = [ - 'xpack.security.http.ssl.enabled', - 'shield.http.ssl', - 'searchguard.ssl.http.enabled' + 'xpack.security.http.ssl.enabled' ] tls_keys.any? { |key| (config.key? key) && (config[key] == true) } end # Helper to determine the instance http.port number def self.get_httpport(config) enabled = 'http.enabled' httpport = 'http.port' - if !config[enabled].nil? && config[enabled] == 'false' - false - elsif !config[httpport].nil? - { config[httpport] => ssl?(config) } - else - { '9200' => ssl?(config) } - end + return false, false if !config[enabled].nil? && config[enabled] == 'false' + return config[httpport], ssl?(config) unless config[httpport].nil? + ['9200', ssl?(config)] end # Entrypoint for custom fact populator # # This is a super old function but works; disable a bunch of checks. # rubocop:disable Lint/HandleExceptions # rubocop:disable Metrics/CyclomaticComplexity # rubocop:disable Metrics/PerceivedComplexity def self.run dir_prefix = '/etc/elasticsearch' # httpports is a hash of port_number => ssl? - httpports = {} transportports = [] http_bound_addresses = [] transport_bound_addresses = [] transport_publish_addresses = [] nodes = {} # only when the directory exists we need to process the stuff return unless File.directory?(dir_prefix) - Dir.foreach(dir_prefix) do |dir| - next if dir == '.' - - if File.readable?("#{dir_prefix}/#{dir}/elasticsearch.yml") - config_data = YAML.load_file("#{dir_prefix}/#{dir}/elasticsearch.yml") - httpport = get_httpport(config_data) - httpports.merge! httpport if httpport - end + if File.readable?("#{dir_prefix}/elasticsearch.yml") + config_data = YAML.load_file("#{dir_prefix}/elasticsearch.yml") + httpport, ssl = get_httpport(config_data) end begin - if httpports.keys.count > 0 - - add_fact('elasticsearch', 'ports', httpports.keys.join(',')) - - httpports.each_pair do |httpport, ssl| - next if ssl + add_fact('elasticsearch', 'port', httpport) - key_prefix = "elasticsearch_#{httpport}" + unless ssl + key_prefix = 'elasticsearch' + # key_prefix = "elasticsearch_#{httpport}" - uri = URI("http://localhost:#{httpport}") - http = Net::HTTP.new(uri.host, uri.port) - http.read_timeout = 10 - http.open_timeout = 2 - response = http.get('/') - json_data = JSON.parse(response.body) - next if json_data['status'] && json_data['status'] != 200 + uri = URI("http://localhost:#{httpport}") + http = Net::HTTP.new(uri.host, uri.port) + http.read_timeout = 10 + http.open_timeout = 2 + response = http.get('/') + json_data = JSON.parse(response.body) + if json_data['status'] && json_data['status'] == 200 add_fact(key_prefix, 'name', json_data['name']) add_fact(key_prefix, 'version', json_data['version']['number']) uri2 = URI("http://localhost:#{httpport}/_nodes/#{json_data['name']}") http2 = Net::HTTP.new(uri2.host, uri2.port) http2.read_timeout = 10 http2.open_timeout = 2 response2 = http2.get(uri2.path) json_data_node = JSON.parse(response2.body) add_fact(key_prefix, 'cluster_name', json_data_node['cluster_name']) node_data = json_data_node['nodes'].first add_fact(key_prefix, 'node_id', node_data[0]) nodes_data = json_data_node['nodes'][node_data[0]] process = nodes_data['process'] add_fact(key_prefix, 'mlockall', process['mlockall']) plugins = nodes_data['plugins'] plugin_names = [] plugins.each do |plugin| plugin_names << plugin['name'] plugin.each do |key, value| prefix = "#{key_prefix}_plugin_#{plugin['name']}" add_fact(prefix, key, value) unless key == 'name' end end add_fact(key_prefix, 'plugins', plugin_names.join(',')) nodes_data['http']['bound_address'].each { |i| http_bound_addresses << i } nodes_data['transport']['bound_address'].each { |i| transport_bound_addresses << i } transport_publish_addresses << nodes_data['transport']['publish_address'] unless nodes_data['transport']['publish_address'].nil? transportports << nodes_data['settings']['transport']['tcp']['port'] unless nodes_data['settings']['transport']['tcp'].nil? or nodes_data['settings']['transport']['tcp']['port'].nil? - node = { 'http_ports' => httpports.keys, - 'transport_ports' => transportports, - 'http_bound_addresses' => http_bound_addresses, - 'transport_bound_addresses' => transport_bound_addresses, - 'transport_publish_addresses' => transport_publish_addresses, - json_data['name'] => { 'settings' => nodes_data['settings'], 'http' => nodes_data['http'], 'transport' => nodes_data['transport'] } } + node = { + 'http_ports' => httpports.keys, + 'transport_ports' => transportports, + 'http_bound_addresses' => http_bound_addresses, + 'transport_bound_addresses' => transport_bound_addresses, + 'transport_publish_addresses' => transport_publish_addresses, + json_data['name'] => { + 'settings' => nodes_data['settings'], + 'http' => nodes_data['http'], + 'transport' => nodes_data['transport'] + } + } nodes.merge! node end end rescue end Facter.add(:elasticsearch) do setcode do nodes end nodes unless nodes.empty? end end # rubocop:enable Metrics/CyclomaticComplexity # rubocop:enable Metrics/PerceivedComplexity end EsFacts.run diff --git a/lib/puppet/provider/elastic_plugin.rb b/lib/puppet/provider/elastic_plugin.rb index 07fa44e..97e4d6c 100644 --- a/lib/puppet/provider/elastic_plugin.rb +++ b/lib/puppet/provider/elastic_plugin.rb @@ -1,208 +1,161 @@ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', '..')) require 'uri' require 'puppet_x/elastic/es_versioning' require 'puppet_x/elastic/plugin_parsing' # Generalized parent class for providers that behave like Elasticsearch's plugin # command line tool. # rubocop:disable Metrics/ClassLength class Puppet::Provider::ElasticPlugin < Puppet::Provider # Elasticsearch's home directory. # # @return String def homedir case Facter.value('osfamily') when 'OpenBSD' '/usr/local/elasticsearch' else '/usr/share/elasticsearch' end end def exists? # First, attempt to list whether the named plugin exists by finding a # plugin descriptor file, which each plugin should have. We must wildcard # the name to match meta plugins, see upstream issue for this change: # https://github.com/elastic/elasticsearch/pull/28022 properties_files = Dir[File.join(@resource[:plugin_dir], plugin_path, '**', '*plugin-descriptor.properties')] return false if properties_files.empty? begin # Use the basic name format that the plugin tool supports in order to # determine the version from the resource name. plugin_version = Puppet_X::Elastic.plugin_version(@resource[:name]) # Naively parse the Java .properties file to check version equality. # Because we don't have the luxury of installing arbitrary gems, perform # simple parse with a degree of safety checking in the call chain # # Note that x-pack installs "meta" plugins which bundle multiple plugins # in one. Therefore, we need to find the first "sub" plugin that # indicates which version of x-pack this is. properties = properties_files.sort.map do |prop_file| IO.readlines(prop_file).map(&:strip).reject do |line| line.start_with?('#') or line.empty? end.map do |property| property.split('=') end.reject do |pairs| pairs.length != 2 end.to_h end.find { |prop| prop.key? 'version' } if properties and properties['version'] != plugin_version debug "Elasticsearch plugin #{@resource[:name]} not version #{plugin_version}, reinstalling" destroy return false end rescue ElasticPluginParseFailure debug "Failed to parse plugin version for #{@resource[:name]}" end # If there is no version string, we do not check version equality debug "No version found in #{@resource[:name]}, not enforcing any version" true end def plugin_path @resource[:plugin_path] || Puppet_X::Elastic.plugin_name(@resource[:name]) end - # Intelligently returns the correct installation arguments for version 1 - # version of Elasticsearch. + # Intelligently returns the correct installation arguments for Elasticsearch. # # @return [Array] # arguments to pass to the plugin installation utility - def install1x - if !@resource[:url].nil? - [ - Puppet_X::Elastic.plugin_name(@resource[:name]), - '--url', - @resource[:url] - ] - elsif !@resource[:source].nil? - [ - Puppet_X::Elastic.plugin_name(@resource[:name]), - '--url', - "file://#{@resource[:source]}" - ] - else - [@resource[:name]] - end - end - - # Intelligently returns the correct installation arguments for version 2 - # version of Elasticsearch. - # - # @return [Array] - # arguments to pass to the plugin installation utility - def install2x + def install_args if !@resource[:url].nil? [@resource[:url]] elsif !@resource[:source].nil? ["file://#{@resource[:source]}"] else [@resource[:name]] end end # Format proxy arguments for consumption by the elasticsearch plugin # management tool (i.e., Java properties). # # @return Array # of flags for command-line tools def proxy_args(url) parsed = URI(url) %w[http https].map do |schema| [:host, :port, :user, :password].map do |param| option = parsed.send(param) "-D#{schema}.proxy#{param.to_s.capitalize}=#{option}" unless option.nil? end end.flatten.compact end # Install this plugin on the host. - # rubocop:disable Metrics/CyclomaticComplexity def create commands = [] - commands += proxy_args(@resource[:proxy]) if is2x? and @resource[:proxy] + commands += proxy_args(@resource[:proxy]) if @resource[:proxy] commands << 'install' - commands << '--batch' if batch_capable? - commands += is1x? ? install1x : install2x + commands << '--batch' + commands += install_args debug("Commands: #{commands.inspect}") retry_count = 3 retry_times = 0 begin with_environment do plugin(commands) end rescue Puppet::ExecutionFailure => e retry_times += 1 debug("Failed to install plugin. Retrying... #{retry_times} of #{retry_count}") sleep 2 retry if retry_times < retry_count raise "Failed to install plugin. Received error: #{e.inspect}" end end - # rubocop:enable Metrics/CyclomaticComplexity # Remove this plugin from the host. def destroy with_environment do plugin(['remove', Puppet_X::Elastic.plugin_name(@resource[:name])]) end end - # Determine the installed version of Elasticsearch on this host. - def es_version - Puppet_X::Elastic::EsVersioning.version( - resource[:elasticsearch_package_name], resource.catalog - ) - end - - def is1x? - Puppet::Util::Package.versioncmp(es_version, '2.0.0') < 0 - end - - def is2x? - (Puppet::Util::Package.versioncmp(es_version, '2.0.0') >= 0) && \ - (Puppet::Util::Package.versioncmp(es_version, '3.0.0') < 0) - end - - def batch_capable? - Puppet::Util::Package.versioncmp(es_version, '2.2.0') >= 0 - end - # Run a command wrapped in necessary env vars def with_environment(&block) env_vars = { 'ES_JAVA_OPTS' => @resource[:java_opts], 'ES_PATH_CONF' => @resource[:configdir] } saved_vars = {} - unless @resource[:java_home].nil? or @resource[:java_home] == '' - env_vars['JAVA_HOME'] = @resource[:java_home] - end - - if !is2x? and @resource[:proxy] - env_vars['ES_JAVA_OPTS'] += proxy_args(@resource[:proxy]) - end + # Use 'java_home' param if supplied, otherwise default to Elasticsearch shipped JDK + env_vars['JAVA_HOME'] = if @resource[:java_home].nil? or @resource[:java_home] == '' + "#{homedir}/jdk" + else + @resource[:java_home] + end env_vars['ES_JAVA_OPTS'] = env_vars['ES_JAVA_OPTS'].join(' ') env_vars.each do |env_var, value| saved_vars[env_var] = ENV[env_var] ENV[env_var] = value end ret = block.yield saved_vars.each do |env_var, value| ENV[env_var] = value end ret end end diff --git a/lib/puppet/provider/elastic_user_command.rb b/lib/puppet/provider/elastic_user_command.rb index 35ef51d..b6ca8e7 100644 --- a/lib/puppet/provider/elastic_user_command.rb +++ b/lib/puppet/provider/elastic_user_command.rb @@ -1,123 +1,125 @@ # Parent provider for Elasticsearch Shield/X-Pack file-based user management # tools. class Puppet::Provider::ElasticUserCommand < Puppet::Provider attr_accessor :homedir # Elasticsearch's home directory. # # @return String def self.homedir @homedir ||= case Facter.value('osfamily') when 'OpenBSD' '/usr/local/elasticsearch' else '/usr/share/elasticsearch' end end # Run the user management command with specified tool arguments. def self.command_with_path(args, configdir = nil) options = { + :combine => true, :custom_environment => { 'ES_PATH_CONF' => configdir || '/etc/elasticsearch' - } + }, + :failonfail => true } execute( [command(:users_cli)] + (args.is_a?(Array) ? args : [args]), options ) end # Gather local file-based users into an array of Hash objects. def self.fetch_users begin output = command_with_path('list') rescue Puppet::ExecutionFailure => e debug("#fetch_users had an error: #{e.inspect}") return nil end debug("Raw command output: #{output}") output.split("\n").select { |u| # Keep only expected "user : role1,role2" formatted lines u[/^[^:]+:\s+\S+$/] }.map { |u| # Break into ["user ", " role1,role2"] u.split(':').first.strip }.map do |user| { :name => user, :ensure => :present, :provider => name } end end # Fetch an array of provider objects from the the list of local users. def self.instances fetch_users.map do |user| new user end end # Generic prefetch boilerplate. def self.prefetch(resources) instances.each do |prov| if (resource = resources[prov.name]) resource.provider = prov end end end def initialize(value = {}) super(value) @property_flush = {} end # Enforce the desired state for this user on-disk. def flush arguments = [] case @property_flush[:ensure] when :absent arguments << 'userdel' arguments << resource[:name] else arguments << 'useradd' arguments << resource[:name] arguments << '-p' << resource[:password] end self.class.command_with_path(arguments, resource[:configdir]) @property_hash = self.class.fetch_users.detect do |u| u[:name] == resource[:name] end end # Set this provider's `:ensure` property to `:present`. def create @property_flush[:ensure] = :present end def exists? @property_hash[:ensure] == :present end # Set this provider's `:ensure` property to `:absent`. def destroy @property_flush[:ensure] = :absent end # Manually set this user's password. def passwd self.class.command_with_path( [ 'passwd', resource[:name], '-p', resource[:password] ], resource[:configdir] ) end end diff --git a/lib/puppet/provider/elasticsearch_keystore/ruby.rb b/lib/puppet/provider/elasticsearch_keystore/ruby.rb index b21e78e..6233564 100644 --- a/lib/puppet/provider/elasticsearch_keystore/ruby.rb +++ b/lib/puppet/provider/elasticsearch_keystore/ruby.rb @@ -1,166 +1,167 @@ Puppet::Type.type(:elasticsearch_keystore).provide( :elasticsearch_keystore ) do desc 'Provider for `elasticsearch-keystore` based secret management.' def self.defaults_dir @defaults_dir ||= case Facter.value('osfamily') when 'RedHat' '/etc/sysconfig' else '/etc/default' end end def self.home_dir @home_dir ||= case Facter.value('osfamily') when 'OpenBSD' '/usr/local/elasticsearch' else '/usr/share/elasticsearch' end end attr_accessor :defaults_dir, :home_dir commands :keystore => "#{home_dir}/bin/elasticsearch-keystore" def self.run_keystore(args, instance, configdir = '/etc/elasticsearch', stdin = nil) options = { :custom_environment => { - 'ES_INCLUDE' => File.join(defaults_dir, "elasticsearch-#{instance}"), + 'ES_INCLUDE' => File.join(defaults_dir, "elasticsearch-#{instance}"), 'ES_PATH_CONF' => "#{configdir}/#{instance}" }, - :uid => 'elasticsearch', - :gid => 'elasticsearch' + :uid => 'elasticsearch', + :gid => 'elasticsearch', + :failonfail => true } unless stdin.nil? stdinfile = Tempfile.new('elasticsearch-keystore') stdinfile << stdin stdinfile.flush options[:stdinfile] = stdinfile.path end begin stdout = execute([command(:keystore)] + args, options) ensure unless stdin.nil? stdinfile.close stdinfile.unlink end end stdout.exitstatus.zero? ? stdout : raise(Puppet::Error, stdout) end def self.present_keystores Dir[File.join(%w[/ etc elasticsearch *])].select do |directory| File.exist? File.join(directory, 'elasticsearch.keystore') end.map do |instance| settings = run_keystore(['list'], File.basename(instance)).split("\n") { :name => File.basename(instance), :ensure => :present, :provider => name, :settings => settings } end end def self.instances present_keystores.map do |keystore| new keystore end end def self.prefetch(resources) instances.each do |prov| if (resource = resources[prov.name]) resource.provider = prov end end end def initialize(value = {}) super(value) @property_flush = {} end # rubocop:disable Metrics/CyclomaticComplexity # rubocop:disable Metrics/PerceivedComplexity def flush case @property_flush[:ensure] when :present debug(self.class.run_keystore(['create'], resource[:name], resource[:configdir])) @property_flush[:settings] = resource[:settings] when :absent File.delete(File.join([ '/', 'etc', 'elasticsearch', resource[:instance], 'elasticsearch.keystore' ])) end # Note that since the property is :array_matching => :all, we have to # expect that the hash is wrapped in an array. if @property_flush[:settings] and not @property_flush[:settings].first.empty? # Flush properties that _should_ be present @property_flush[:settings].first.each_pair do |setting, value| next unless @property_hash[:settings].nil? \ or not @property_hash[:settings].include? setting debug(self.class.run_keystore( ['add', '--force', '--stdin', setting], resource[:name], resource[:configdir], value )) end # Remove properties that are no longer present if resource[:purge] and not (@property_hash.nil? or @property_hash[:settings].nil?) (@property_hash[:settings] - @property_flush[:settings].first.keys).each do |setting| debug(self.class.run_keystore( ['remove', setting], resource[:name], resource[:configdir] )) end end end @property_hash = self.class.present_keystores.detect do |u| u[:name] == resource[:name] end end # rubocop:enable Metrics/CyclomaticComplexity # rubocop:enable Metrics/PerceivedComplexity # settings property setter # # @return [Hash] settings def settings=(new_settings) @property_flush[:settings] = new_settings end # settings property getter # # @return [Hash] settings def settings @property_hash[:settings] end # Sets the ensure property in the @property_flush hash. # # @return [Symbol] :present def create @property_flush[:ensure] = :present end # Determine whether this resource is present on the system. # # @return [Boolean] def exists? @property_hash[:ensure] == :present end # Set flushed ensure property to absent. # # @return [Symbol] :absent def destroy @property_flush[:ensure] = :absent end end diff --git a/manifests/config.pp b/manifests/config.pp index dffda9f..62d540a 100644 --- a/manifests/config.pp +++ b/manifests/config.pp @@ -1,225 +1,225 @@ # This class exists to coordinate all configuration related actions, # functionality and logical units in a central place. # # It is not intended to be used directly by external resources like node # definitions or other modules. # # @example importing this class into other classes to use its functionality: # class { 'elasticsearch::config': } # # @author Richard Pijnenburg # @author Tyler Langlois # @author Gavin Williams # class elasticsearch::config { #### Configuration Exec { path => [ '/bin', '/usr/bin', '/usr/local/bin' ], cwd => '/', } $init_defaults = merge( { 'MAX_OPEN_FILES' => '65535', }, $elasticsearch::init_defaults ) if ( $elasticsearch::ensure == 'present' ) { file { $elasticsearch::homedir: ensure => 'directory', group => $elasticsearch::elasticsearch_group, owner => $elasticsearch::elasticsearch_user; $elasticsearch::configdir: ensure => 'directory', group => $elasticsearch::elasticsearch_group, - owner => 'root', + owner => $elasticsearch::elasticsearch_user, mode => '2750'; $elasticsearch::datadir: ensure => 'directory', group => $elasticsearch::elasticsearch_group, owner => $elasticsearch::elasticsearch_user, mode => '2750'; $elasticsearch::logdir: ensure => 'directory', group => $elasticsearch::elasticsearch_group, owner => $elasticsearch::elasticsearch_user, mode => '2750'; $elasticsearch::real_plugindir: ensure => 'directory', group => $elasticsearch::elasticsearch_group, owner => $elasticsearch::elasticsearch_user, mode => 'o+Xr'; "${elasticsearch::homedir}/lib": ensure => 'directory', group => '0', owner => 'root', recurse => true; } # Defaults file, either from file source or from hash to augeas commands if ($elasticsearch::init_defaults_file != undef) { file { "${elasticsearch::defaults_location}/elasticsearch": ensure => $elasticsearch::ensure, source => $elasticsearch::init_defaults_file, owner => 'root', group => $elasticsearch::elasticsearch_group, mode => '0660', before => Service['elasticsearch'], notify => $elasticsearch::_notify_service, } } else { augeas { "${elasticsearch::defaults_location}/elasticsearch": incl => "${elasticsearch::defaults_location}/elasticsearch", lens => 'Shellvars.lns', changes => template("${module_name}/etc/sysconfig/defaults.erb"), before => Service['elasticsearch'], notify => $elasticsearch::_notify_service, } } # Generate config file $_config = deep_implode($elasticsearch::config) # Generate SSL config if $elasticsearch::ssl { if ($elasticsearch::keystore_password == undef) { fail('keystore_password required') } if ($elasticsearch::keystore_path == undef) { $_keystore_path = "${elasticsearch::configdir}/elasticsearch.ks" } else { $_keystore_path = $elasticsearch::keystore_path } # Set the correct xpack. settings based on ES version if (versioncmp($elasticsearch::version, '7') >= 0) { $_tls_config = { 'xpack.security.http.ssl.enabled' => true, 'xpack.security.http.ssl.keystore.path' => $_keystore_path, 'xpack.security.http.ssl.keystore.password' => $elasticsearch::keystore_password, 'xpack.security.transport.ssl.enabled' => true, 'xpack.security.transport.ssl.keystore.path' => $_keystore_path, 'xpack.security.transport.ssl.keystore.password' => $elasticsearch::keystore_password, } } else { $_tls_config = { 'xpack.security.transport.ssl.enabled' => true, 'xpack.security.http.ssl.enabled' => true, 'xpack.ssl.keystore.path' => $_keystore_path, 'xpack.ssl.keystore.password' => $elasticsearch::keystore_password, } } # Trust CA Certificate java_ks { 'elasticsearch_ca': ensure => 'latest', certificate => $elasticsearch::ca_certificate, target => $_keystore_path, password => $elasticsearch::keystore_password, trustcacerts => true, } # Load node certificate and private key java_ks { 'elasticsearch_node': ensure => 'latest', certificate => $elasticsearch::certificate, private_key => $elasticsearch::private_key, target => $_keystore_path, password => $elasticsearch::keystore_password, } } else { $_tls_config = {} } # # Logging file or hash # if ($elasticsearch::logging_file != undef) { # $_log4j_content = undef # } else { # if ($elasticsearch::logging_template != undef ) { # $_log4j_content = template($elasticsearch::logging_template) # } else { # $_log4j_content = template("${module_name}/etc/elasticsearch/log4j2.properties.erb") # } # $_logging_source = undef # } # file { # "${elasticsearch::configdir}/log4j2.properties": # ensure => file, # content => $_log4j_content, # source => $_logging_source, # mode => '0644', # notify => $elasticsearch::_notify_service, # require => Class['elasticsearch::package'], # before => Class['elasticsearch::service'], # } # Generate Elasticsearch config $_es_config = merge( $elasticsearch::config, { 'path.data' => $elasticsearch::datadir }, { 'path.logs' => $elasticsearch::logdir }, $_tls_config ) datacat_fragment { 'main_config': target => "${elasticsearch::configdir}/elasticsearch.yml", data => $_es_config, } datacat { "${elasticsearch::configdir}/elasticsearch.yml": template => "${module_name}/etc/elasticsearch/elasticsearch.yml.erb", notify => $elasticsearch::_notify_service, require => Class['elasticsearch::package'], owner => $elasticsearch::elasticsearch_user, group => $elasticsearch::elasticsearch_group, mode => '0440', } # Add any additional JVM options $elasticsearch::jvm_options.each |String $jvm_option| { file_line { "jvm_option_${jvm_option}": ensure => present, path => "${elasticsearch::configdir}/jvm.options", line => $jvm_option, notify => $elasticsearch::_notify_service, } } if $elasticsearch::system_key != undef { file { "${elasticsearch::configdir}/system_key": ensure => 'file', source => $elasticsearch::system_key, mode => '0400', } } # Add secrets to keystore if $elasticsearch::secrets != undef { elasticsearch_keystore { 'elasticsearch_secrets': configdir => $elasticsearch::configdir, purge => $elasticsearch::purge_secrets, settings => $elasticsearch::secrets, notify => $::elaticsearch::_notify_service, } } } elsif ( $elasticsearch::ensure == 'absent' ) { file { $elasticsearch::real_plugindir: ensure => 'absent', force => true, backup => false, } file { "${elasticsearch::defaults_location}/elasticsearch": ensure => 'absent', subscribe => Service['elasticsearch'], } } } diff --git a/manifests/service.pp b/manifests/service.pp index 45a7465..6e46270 100644 --- a/manifests/service.pp +++ b/manifests/service.pp @@ -1,85 +1,53 @@ # This class exists to coordinate all service management related actions, # functionality and logical units in a central place. # # *Note*: "service" is the Puppet term and type for background processes # in general and is used in a platform-independent way. E.g. "service" means # "daemon" in relation to Unix-like systems. # -# @param ensure -# Controls if the managed resources shall be `present` or `absent`. -# If set to `absent`, the managed software packages will be uninstalled, and -# any traces of the packages will be purged as well as possible, possibly -# including existing configuration files. -# System modifications (if any) will be reverted as well as possible (e.g. -# removal of created users, services, changed log settings, and so on). -# This is a destructive parameter and should be used with care. -# -# @param init_defaults -# Defaults file content in hash representation -# -# @param init_defaults_file -# Defaults file as puppet resource -# -# @param init_template -# Service file as a template -# -# @param service_flags -# Flags to pass to the service. -# -# @param status -# Defines the status of the service. If set to `enabled`, the service is -# started and will be enabled at boot time. If set to `disabled`, the -# service is stopped and will not be started at boot time. If set to `running`, -# the service is started but will not be enabled at boot time. You may use -# this to start a service on the first Puppet run instead of the system startup. -# If set to `unmanaged`, the service will not be started at boot time and Puppet -# does not care whether the service is running or not. For example, this may -# be useful if a cluster management software is used to decide when to start -# the service plus assuring it is running on the desired node. -# # @author Richard Pijnenburg # @author Tyler Langlois # @author Gavin Williams # class elasticsearch::service { #### Service management if $elasticsearch::ensure == 'present' { case $elasticsearch::status { # make sure service is currently running, start it on boot 'enabled': { $_service_ensure = 'running' $_service_enable = true } # make sure service is currently stopped, do not start it on boot 'disabled': { $_service_ensure = 'stopped' $_service_enable = false } # make sure service is currently running, do not start it on boot 'running': { $_service_ensure = 'running' $_service_enable = false } # do not start service on boot, do not care whether currently running # or not 'unmanaged': { $_service_ensure = undef $_service_enable = false } default: { } } } else { # make sure the service is stopped and disabled (the removal itself will be # done by package.pp) $_service_ensure = 'stopped' $_service_enable = false } service { $elasticsearch::service_name: ensure => $_service_ensure, enable => $_service_enable, } } diff --git a/manifests/service/init.pp b/manifests/service/init.pp deleted file mode 100644 index 1a03280..0000000 --- a/manifests/service/init.pp +++ /dev/null @@ -1,162 +0,0 @@ -# This class exists to coordinate all service management related actions, -# functionality and logical units in a central place. -# -# *Note*: "service" is the Puppet term and type for background processes -# in general and is used in a platform-independent way. E.g. "service" means -# "daemon" in relation to Unix-like systems. -# -# @param ensure -# Controls if the managed resources shall be `present` or -# `absent`. If set to `absent`, the managed software packages will being -# uninstalled and any traces of the packages will be purged as well as -# possible. This may include existing configuration files (the exact -# behavior is provider). This is thus destructive and should be used with -# care. -# -# @param init_defaults -# Defaults file content in hash representation -# -# @param init_defaults_file -# Defaults file as puppet resource -# -# @param init_template -# Service file as a template -# -# @param status -# Defines the status of the service. If set to `enabled`, the service is -# started and will be enabled at boot time. If set to `disabled`, the -# service is stopped and will not be started at boot time. If set to `running`, -# the service is started but will not be enabled at boot time. You may use -# this to start a service on the first Puppet run instead of the system startup. -# If set to `unmanaged`, the service will not be started at boot time and Puppet -# does not care whether the service is running or not. For example, this may -# be useful if a cluster management software is used to decide when to start -# the service plus assuring it is running on the desired node. -# -# @author Richard Pijnenburg -# @author Tyler Langlois -# @author Gavin Williams -# -class elasticsearch::service::init ( - Enum['absent', 'present'] $ensure = $elasticsearch::ensure, - Hash $init_defaults = {}, - Optional[String] $init_defaults_file = undef, - Optional[String] $init_template = undef, - Elasticsearch::Status $status = $elasticsearch::status, -) { - - #### Service management - - if $ensure == 'present' { - - case $status { - # make sure service is currently running, start it on boot - 'enabled': { - $service_ensure = 'running' - $service_enable = true - } - # make sure service is currently stopped, do not start it on boot - 'disabled': { - $service_ensure = 'stopped' - $service_enable = false - } - # make sure service is currently running, do not start it on boot - 'running': { - $service_ensure = 'running' - $service_enable = false - } - # do not start service on boot, do not care whether currently running - # or not - 'unmanaged': { - $service_ensure = undef - $service_enable = false - } - default: { } - } - } else { - - # make sure the service is stopped and disabled (the removal itself will be - # done by package.pp) - $service_ensure = 'stopped' - $service_enable = false - - } - - if(has_key($init_defaults, 'ES_USER') and $init_defaults['ES_USER'] != $elasticsearch::elasticsearch_user) { - fail('Found ES_USER setting for init_defaults but is not same as elasticsearch_user setting. Please use elasticsearch_user setting.') - } - - $new_init_defaults = merge( - { - 'ES_USER' => $elasticsearch::elasticsearch_user, - 'ES_GROUP' => $elasticsearch::elasticsearch_group, - 'MAX_OPEN_FILES' => '65536', - }, - $init_defaults - ) - - $notify_service = $elasticsearch::restart_config_change ? { - true => Service["elasticsearch-instance-${name}"], - false => undef, - } - - if ($ensure == 'present') { - - # Defaults file, either from file source or from hash to augeas commands - if ($init_defaults_file != undef) { - file { "${elasticsearch::defaults_location}/elasticsearch-${name}": - ensure => $ensure, - source => $init_defaults_file, - owner => 'root', - group => '0', - mode => '0644', - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } else { - augeas { "defaults_${name}": - incl => "${elasticsearch::defaults_location}/elasticsearch-${name}", - lens => 'Shellvars.lns', - changes => template("${module_name}/etc/sysconfig/defaults.erb"), - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } - - } else { # absent - - file { "${elasticsearch::defaults_location}/elasticsearch-${name}": - ensure => 'absent', - subscribe => Service["elasticsearch-${$name}"], - } - - } - - # Note that service files are persisted even in the case of absent instances. - # This is to ensure that manifest can remain idempotent and have the service - # file available in order to permit Puppet to introspect system state. - # init file from template - if ($init_template != undef) { - elasticsearch_service_file { "/etc/init.d/elasticsearch-${name}": - ensure => 'present', - content => file($init_template), - instance => $name, - notify => $notify_service, - package_name => $elasticsearch::package_name, - } - -> file { "/etc/init.d/elasticsearch-${name}": - ensure => 'file', - owner => 'root', - group => '0', - mode => '0755', - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } - - service { "elasticsearch-instance-${name}": - ensure => $service_ensure, - enable => $service_enable, - name => "elasticsearch-${name}", - } -} diff --git a/manifests/service/openbsd.pp b/manifests/service/openbsd.pp deleted file mode 100644 index 120953e..0000000 --- a/manifests/service/openbsd.pp +++ /dev/null @@ -1,122 +0,0 @@ -# This class exists to coordinate all service management related actions, -# functionality and logical units in a central place. -# -# *Note*: "service" is the Puppet term and type for background processes -# in general and is used in a platform-independent way. E.g. "service" means -# "daemon" in relation to Unix-like systems. -# -# @param ensure -# Controls if the managed resources shall be `present` or -# `absent`. If set to `absent`, the managed software packages will being -# uninstalled and any traces of the packages will be purged as well as -# possible. This may include existing configuration files (the exact -# behavior is provider). This is thus destructive and should be used with -# care. -# -# @param init_template -# Service file as a template -# -# @param pid_dir -# Directory where to store the serice pid file. -# -# @param service_flags -# Flags to pass to the service. -# -# @param status -# Defines the status of the service. If set to `enabled`, the service is -# started and will be enabled at boot time. If set to `disabled`, the -# service is stopped and will not be started at boot time. If set to `running`, -# the service is started but will not be enabled at boot time. You may use -# this to start a service on the first Puppet run instead of the system startup. -# If set to `unmanaged`, the service will not be started at boot time and Puppet -# does not care whether the service is running or not. For example, this may -# be useful if a cluster management software is used to decide when to start -# the service plus assuring it is running on the desired node. -# -# @author Richard Pijnenburg -# @author Tyler Langlois -# @author Gavin Williams -# -class elasticsearch::service::openbsd ( - Enum['absent', 'present'] $ensure = $elasticsearch::ensure, - Optional[String] $init_template = $elasticsearch::init_template, - Optional[String] $pid_dir = $elasticsearch::pid_dir, - Optional[String] $service_flags = undef, - Elasticsearch::Status $status = $elasticsearch::status, -) { - - #### Service management - - if $ensure == 'present' { - - case $status { - # make sure service is currently running, start it on boot - 'enabled': { - $service_ensure = 'running' - $service_enable = true - } - # make sure service is currently stopped, do not start it on boot - 'disabled': { - $service_ensure = 'stopped' - $service_enable = false - } - # make sure service is currently running, do not start it on boot - 'running': { - $service_ensure = 'running' - $service_enable = false - } - # do not start service on boot, do not care whether currently running - # or not - 'unmanaged': { - $service_ensure = undef - $service_enable = false - } - default: { } - } - } else { - - # make sure the service is stopped and disabled (the removal itself will be - # done by package.pp) - $service_ensure = 'stopped' - $service_enable = false - - } - - $notify_service = $elasticsearch::restart_config_change ? { - true => Service["elasticsearch-instance-${name}"], - false => undef, - } - - if ($status != 'unmanaged') { - # Note that service files are persisted even in the case of absent instances. - # This is to ensure that manifest can remain idempotent and have the service - # file available in order to permit Puppet to introspect system state. - # init file from template - if ($init_template != undef) { - elasticsearch_service_file { "/etc/rc.d/elasticsearch_${name}": - ensure => 'present', - content => file($init_template), - instance => $name, - pid_dir => $pid_dir, - notify => $notify_service, - package_name => 'elasticsearch', - } - -> file { "/etc/rc.d/elasticsearch_${name}": - ensure => 'file', - owner => 'root', - group => '0', - mode => '0555', - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } - - # action - service { "elasticsearch-instance-${name}": - ensure => $service_ensure, - enable => $service_enable, - name => "elasticsearch_${name}", - flags => $service_flags, - } - } -} diff --git a/manifests/service/openrc.pp b/manifests/service/openrc.pp deleted file mode 100644 index 0c1761c..0000000 --- a/manifests/service/openrc.pp +++ /dev/null @@ -1,167 +0,0 @@ -# This class exists to coordinate all service management related actions, -# functionality and logical units in a central place. -# -# *Note*: "service" is the Puppet term and type for background processes -# in general and is used in a platform-independent way. E.g. "service" means -# "daemon" in relation to Unix-like systems. -# -# @param ensure -# Controls if the managed resources shall be `present` or -# `absent`. If set to `absent`, the managed software packages will being -# uninstalled and any traces of the packages will be purged as well as -# possible. This may include existing configuration files (the exact -# behavior is provider). This is thus destructive and should be used with -# care. -# -# @param init_defaults -# Defaults file content in hash representation -# -# @param init_defaults_file -# Defaults file as puppet resource -# -# @param init_template -# Service file as a template -# -# @param status -# Defines the status of the service. If set to `enabled`, the service is -# started and will be enabled at boot time. If set to `disabled`, the -# service is stopped and will not be started at boot time. If set to `running`, -# the service is started but will not be enabled at boot time. You may use -# this to start a service on the first Puppet run instead of the system startup. -# If set to `unmanaged`, the service will not be started at boot time and Puppet -# does not care whether the service is running or not. For example, this may -# be useful if a cluster management software is used to decide when to start -# the service plus assuring it is running on the desired node. -# -# @author Richard Pijnenburg -# @author Tyler Langlois -# @author Gavin Williams -# -class elasticsearch::service::openrc ( - Enum['absent', 'present'] $ensure = $elasticsearch::ensure, - Hash $init_defaults = {}, - Optional[String] $init_defaults_file = undef, - Optional[String] $init_template = undef, - Elasticsearch::Status $status = $elasticsearch::status, -) { - - #### Service management - - if $ensure == 'present' { - - case $status { - # make sure service is currently running, start it on boot - 'enabled': { - $service_ensure = 'running' - $service_enable = true - } - # make sure service is currently stopped, do not start it on boot - 'disabled': { - $service_ensure = 'stopped' - $service_enable = false - } - # make sure service is currently running, do not start it on boot - 'running': { - $service_ensure = 'running' - $service_enable = false - } - # do not start service on boot, do not care whether currently running - # or not - 'unmanaged': { - $service_ensure = undef - $service_enable = false - } - default: { } - } - } else { - - # make sure the service is stopped and disabled (the removal itself will be - # done by package.pp) - $service_ensure = 'stopped' - $service_enable = false - - } - - if(has_key($init_defaults, 'ES_USER') and $init_defaults['ES_USER'] != $elasticsearch::elasticsearch_user) { - fail('Found ES_USER setting for init_defaults but is not same as elasticsearch_user setting. Please use elasticsearch_user setting.') - } - - $new_init_defaults = merge( - { - 'ES_USER' => $elasticsearch::elasticsearch_user, - 'ES_GROUP' => $elasticsearch::elasticsearch_group, - 'MAX_OPEN_FILES' => '65536', - }, - $init_defaults - ) - - $notify_service = $elasticsearch::restart_config_change ? { - true => Service["elasticsearch-instance-${name}"], - false => undef, - } - - - if ( $status != 'unmanaged' and $ensure == 'present' ) { - - # defaults file content. Either from a hash or file - if ($init_defaults_file != undef) { - file { "${elasticsearch::defaults_location}/elasticsearch.${name}": - ensure => $ensure, - source => $init_defaults_file, - owner => 'root', - group => '0', - mode => '0644', - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } else { - augeas { "defaults_${name}": - incl => "${elasticsearch::defaults_location}/elasticsearch.${name}", - lens => 'Shellvars.lns', - changes => template("${module_name}/etc/sysconfig/defaults.erb"), - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } - - } elsif ($status != 'unmanaged') { - - file { "${elasticsearch::defaults_location}/elasticsearch.${name}": - ensure => 'absent', - subscribe => Service["elasticsearch.${$name}"], - } - - } - - - if ($status != 'unmanaged') { - # Note that service files are persisted even in the case of absent instances. - # This is to ensure that manifest can remain idempotent and have the service - # file available in order to permit Puppet to introspect system state. - # init file from template - if ($init_template != undef) { - elasticsearch_service_file { "/etc/init.d/elasticsearch.${name}": - ensure => 'present', - content => file($init_template), - instance => $name, - notify => $notify_service, - package_name => 'elasticsearch', - } - -> file { "/etc/init.d/elasticsearch.${name}": - ensure => 'file', - owner => 'root', - group => '0', - mode => '0755', - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } - - # action - service { "elasticsearch-instance-${name}": - ensure => $service_ensure, - enable => $service_enable, - name => "elasticsearch.${name}", - } - } -} diff --git a/manifests/service/systemd.pp b/manifests/service/systemd.pp deleted file mode 100644 index d7fdfb5..0000000 --- a/manifests/service/systemd.pp +++ /dev/null @@ -1,195 +0,0 @@ -# This class exists to coordinate all service management related actions, -# functionality and logical units in a central place. -# -# *Note*: "service" is the Puppet term and type for background processes -# in general and is used in a platform-independent way. E.g. "service" means -# "daemon" in relation to Unix-like systems. -# -# @param ensure -# Controls if the managed resources shall be `present` or -# `absent`. If set to `absent`, the managed software packages will being -# uninstalled and any traces of the packages will be purged as well as -# possible. This may include existing configuration files (the exact -# behavior is provider). This is thus destructive and should be used with -# care. -# -# @param init_defaults -# Defaults file content in hash representation -# -# @param init_defaults_file -# Defaults file as puppet resource -# -# @param init_template -# Service file as a template -# -# @param status -# Defines the status of the service. If set to `enabled`, the service is -# started and will be enabled at boot time. If set to `disabled`, the -# service is stopped and will not be started at boot time. If set to `running`, -# the service is started but will not be enabled at boot time. You may use -# this to start a service on the first Puppet run instead of the system startup. -# If set to `unmanaged`, the service will not be started at boot time and Puppet -# does not care whether the service is running or not. For example, this may -# be useful if a cluster management software is used to decide when to start -# the service plus assuring it is running on the desired node. -# -# @author Richard Pijnenburg -# @author Tyler Langlois -# @author Gavin Williams -# -class elasticsearch::service::systemd ( - Enum['absent', 'present'] $ensure = $elasticsearch::ensure, - Hash $init_defaults = {}, - Optional[String] $init_defaults_file = undef, - Optional[String] $init_template = undef, - Elasticsearch::Status $status = $elasticsearch::status, -) { - - #### Service management - - if $ensure == 'present' { - - case $status { - # make sure service is currently running, start it on boot - 'enabled': { - $_service_ensure = 'running' - $_service_enable = true - } - # make sure service is currently stopped, do not start it on boot - 'disabled': { - $_service_ensure = 'stopped' - $_service_enable = false - } - # make sure service is currently running, do not start it on boot - 'running': { - $_service_ensure = 'running' - $_service_enable = false - } - # do not start service on boot, do not care whether currently running - # or not - 'unmanaged': { - $_service_ensure = undef - $_service_enable = false - } - default: { } - } - } else { - # make sure the service is stopped and disabled (the removal itself will be - # done by package.pp) - $_service_ensure = 'stopped' - $_service_enable = false - } - - if(has_key($init_defaults, 'ES_USER') and $init_defaults['ES_USER'] != $elasticsearch::elasticsearch_user) { - fail('Found ES_USER setting for init_defaults but is not same as elasticsearch_user setting. Please use elasticsearch_user setting.') - } - - $new_init_defaults = merge( - { - 'ES_USER' => $elasticsearch::elasticsearch_user, - 'ES_GROUP' => $elasticsearch::elasticsearch_group, - 'MAX_OPEN_FILES' => '65536', - 'MAX_THREADS' => '4096', - }, - $init_defaults - ) - - $_notify_service = $elasticsearch::restart_config_change ? { - true => [ Exec["systemd_reload_${name}"], Service["elasticsearch-instance-${name}"] ], - false => Exec["systemd_reload_${name}"] - } - - if ($ensure == 'present') { - - # Defaults file, either from file source or from hash to augeas commands - if ($init_defaults_file != undef) { - file { "${elasticsearch::defaults_location}/elasticsearch-${name}": - ensure => $ensure, - source => $init_defaults_file, - owner => 'root', - group => '0', - mode => '0644', - before => Service["elasticsearch-instance-${name}"], - notify => $_notify_service, - } - } else { - augeas { "defaults_${name}": - incl => "${elasticsearch::defaults_location}/elasticsearch-${name}", - lens => 'Shellvars.lns', - changes => template("${module_name}/etc/sysconfig/defaults.erb"), - before => Service["elasticsearch-instance-${name}"], - notify => $_notify_service, - } - } - - $_service_require = Exec["systemd_reload_${name}"] - - } else { # absent - - file { "${elasticsearch::defaults_location}/elasticsearch-${name}": - ensure => 'absent', - subscribe => Service["elasticsearch-instance-${name}"], - notify => Exec["systemd_reload_${name}"], - } - - $_service_require = undef - } - - exec { "systemd_reload_${name}": - command => '/bin/systemctl daemon-reload', - refreshonly => true, - } - - # init file from template - if ($init_template != undef) { - # Check for values in init defaults we may want to set in the init template - if (has_key($new_init_defaults, 'MAX_OPEN_FILES')) { - $nofile = $new_init_defaults['MAX_OPEN_FILES'] - } else { - $nofile = '65536' - } - - if (has_key($new_init_defaults, 'MAX_LOCKED_MEMORY')) { - $memlock = $new_init_defaults['MAX_LOCKED_MEMORY'] - } else { - $memlock = undef - } - - if (has_key($new_init_defaults, 'MAX_THREADS')) { - $nproc = $new_init_defaults['MAX_THREADS'] - } else { - $nproc = '4096' - } - - # elasticsearch_service_file { "${elasticsearch::systemd_service_path}/elasticsearch-${name}.service": - # ensure => 'present', - # content => file($init_template), - # defaults_location => $elasticsearch::defaults_location, - # group => $elasticsearch::elasticsearch_group, - # homedir => $elasticsearch::homedir, - # instance => $name, - # memlock => $memlock, - # nofile => $nofile, - # nproc => $nproc, - # package_name => 'elasticsearch', - # pid_dir => $elasticsearch::pid_dir, - # user => $elasticsearch::elasticsearch_user, - # notify => $_notify_service, - # } - # -> file { "${elasticsearch::systemd_service_path}/elasticsearch-${name}.service": - # ensure => 'file', - # owner => 'root', - # group => 'root', - # mode => '0644', - # before => Service["elasticsearch-instance-${name}"], - # notify => $_notify_service, - # } - } - - service { $elasticsearch::service_name: - ensure => $_service_ensure, - enable => $_service_enable, - provider => 'systemd', - require => $_service_require, - } -} diff --git a/metadata.json b/metadata.json index fcba0f7..f8a7e63 100644 --- a/metadata.json +++ b/metadata.json @@ -1,86 +1,83 @@ { "name": "elastic-elasticsearch", "version": "6.4.0", "source": "https://github.com/elastic/puppet-elasticsearch", "author": "elastic", "license": "Apache-2.0", "summary": "Module for managing and configuring Elasticsearch nodes", - "description": "Module for managing and configuring Elasticsearch nodes", "project_page": "https://github.com/elastic/puppet-elasticsearch", "issues_url": "https://github.com/elastic/puppet-elasticsearch/issues", "dependencies": [ { "name": "elastic/elastic_stack", - "version_requirement": ">= 6.1.0 < 7.0.0" + "version_requirement": ">= 6.1.0 < 8.0.0" }, { "name": "richardc/datacat", "version_requirement": ">= 0.6.2 < 1.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">= 4.13.0 < 7.0.0" } ], "operatingsystem_support": [ { "operatingsystem": "RedHat", "operatingsystemrelease": [ - "5", - "6", - "7" + "7", + "8" ] }, { "operatingsystem": "CentOS", "operatingsystemrelease": [ - "5", - "6", - "7" + "7", + "8" ] }, { "operatingsystem": "OracleLinux", "operatingsystemrelease": [ - "5", - "6", - "7" + "7", + "8" ] }, { "operatingsystem": "Scientific", "operatingsystemrelease": [ - "5", - "6", - "7" + "7", + "8" ] }, { "operatingsystem": "Debian", "operatingsystemrelease": [ - "7", - "8" + "8", + "9", + "10" ] }, { "operatingsystem": "Ubuntu", "operatingsystemrelease": [ - "14.04", - "16.04" + "16.04", + "18.04", + "20.04" ] }, { "operatingsystem": "SLES", "operatingsystemrelease": [ "12.1", "12.2" ] } ], "requirements": [ { "name": "puppet", "version_requirement": ">= 4.10.0 < 7.0.0" } ] } diff --git a/spec/classes/000_elasticsearch_init_spec.rb b/spec/classes/000_elasticsearch_init_spec.rb index c37fe5e..1bd73cc 100644 --- a/spec/classes/000_elasticsearch_init_spec.rb +++ b/spec/classes/000_elasticsearch_init_spec.rb @@ -1,438 +1,440 @@ require 'spec_helper' describe 'elasticsearch', :type => 'class' do default_params = { :config => { 'node.name' => 'foo' } } on_supported_os.each do |os, facts| context "on #{os}" do case facts[:os]['family'] when 'Debian' let(:defaults_path) { '/etc/default' } let(:system_service_folder) { '/lib/systemd/system' } let(:pkg_ext) { 'deb' } let(:pkg_prov) { 'dpkg' } let(:version_add) { '' } if (facts[:os]['name'] == 'Debian' and \ facts[:os]['release']['major'].to_i >= 8) or \ (facts[:os]['name'] == 'Ubuntu' and \ facts[:os]['release']['major'].to_i >= 15) let(:systemd_service_path) { '/lib/systemd/system' } test_pid = true else test_pid = false end when 'RedHat' let(:defaults_path) { '/etc/sysconfig' } let(:system_service_folder) { '/lib/systemd/system' } let(:pkg_ext) { 'rpm' } let(:pkg_prov) { 'rpm' } let(:version_add) { '-1' } if facts[:os]['release']['major'].to_i >= 7 let(:systemd_service_path) { '/lib/systemd/system' } test_pid = true else test_pid = false end when 'Suse' let(:defaults_path) { '/etc/sysconfig' } let(:pkg_ext) { 'rpm' } let(:pkg_prov) { 'rpm' } let(:version_add) { '-1' } if facts[:os]['name'] == 'OpenSuSE' and facts[:os]['release']['major'].to_i <= 12 let(:systemd_service_path) { '/lib/systemd/system' } else let(:systemd_service_path) { '/usr/lib/systemd/system' } end end let(:facts) do - facts.merge('scenario' => '', 'common' => '') + facts.merge('scenario' => '', 'common' => '', 'elasticsearch' => {}) end let(:params) do default_params.merge({}) end + it { should compile.with_all_deps } + # Varies depending on distro it { should contain_augeas("#{defaults_path}/elasticsearch") } # Systemd-specific files if test_pid == true it { should contain_service('elasticsearch').with( :ensure => 'running', :enable => true ) } end context 'java installation' do let(:pre_condition) do <<-MANIFEST include ::java MANIFEST end it { should contain_class('elasticsearch::config') .that_requires('Class[java]') } end context 'package installation' do context 'via repository' do context 'with specified version' do let(:params) do default_params.merge( :version => '1.0' ) end it { should contain_package('elasticsearch') .with(:ensure => "1.0#{version_add}") } end if facts[:os]['family'] == 'RedHat' context 'Handle special CentOS/RHEL package versioning' do let(:params) do default_params.merge( :version => '1.1-2' ) end it { should contain_package('elasticsearch') .with(:ensure => '1.1-2') } end end end context 'when setting package version and package_url' do let(:params) do default_params.merge( :version => '0.90.10', :package_url => "puppet:///path/to/some/es-0.90.10.#{pkg_ext}" ) end it { expect { should raise_error(Puppet::Error) } } end context 'via package_url setting' do ['file:/', 'ftp://', 'http://', 'https://', 'puppet:///'].each do |schema| context "using #{schema} schema" do let(:params) do default_params.merge( :package_url => "#{schema}domain-or-path/pkg.#{pkg_ext}" ) end unless schema.start_with? 'puppet' it { should contain_exec('create_package_dir_elasticsearch') .with(:command => 'mkdir -p /opt/elasticsearch/swdl') } it { should contain_file('/opt/elasticsearch/swdl') .with( :purge => false, :force => false, :require => 'Exec[create_package_dir_elasticsearch]' ) } end case schema when 'file:/' it { should contain_file( "/opt/elasticsearch/swdl/pkg.#{pkg_ext}" ).with( :source => "/domain-or-path/pkg.#{pkg_ext}", :backup => false ) } when 'puppet:///' it { should contain_file( "/opt/elasticsearch/swdl/pkg.#{pkg_ext}" ).with( :source => "#{schema}domain-or-path/pkg.#{pkg_ext}", :backup => false ) } else [true, false].each do |verify_certificates| context "with download_tool_verify_certificates '#{verify_certificates}'" do let(:params) do default_params.merge( :package_url => "#{schema}domain-or-path/pkg.#{pkg_ext}", :download_tool_verify_certificates => verify_certificates ) end flag = (not verify_certificates) ? ' --no-check-certificate' : '' it { should contain_exec('download_package_elasticsearch') .with( :command => "wget#{flag} -O /opt/elasticsearch/swdl/pkg.#{pkg_ext} #{schema}domain-or-path/pkg.#{pkg_ext} 2> /dev/null", :require => 'File[/opt/elasticsearch/swdl]' ) } end end end it { should contain_package('elasticsearch') .with( :ensure => 'present', :source => "/opt/elasticsearch/swdl/pkg.#{pkg_ext}", :provider => pkg_prov ) } end end context 'using http:// schema with proxy_url' do let(:params) do default_params.merge( :package_url => "http://www.domain.com/package.#{pkg_ext}", :proxy_url => 'http://proxy.example.com:12345/' ) end it { should contain_exec('download_package_elasticsearch') .with( :environment => [ 'use_proxy=yes', 'http_proxy=http://proxy.example.com:12345/', 'https_proxy=http://proxy.example.com:12345/' ] ) } end end end # package context 'when setting the module to absent' do let(:params) do default_params.merge( :ensure => 'absent' ) end case facts[:os]['family'] when 'Suse' it { should contain_package('elasticsearch') .with(:ensure => 'absent') } else it { should contain_package('elasticsearch') .with(:ensure => 'purged') } end it { should contain_service('elasticsearch') .with( :ensure => 'stopped', :enable => 'false' ) } it { should contain_file('/usr/share/elasticsearch/plugins') .with(:ensure => 'absent') } it { should contain_file("#{defaults_path}/elasticsearch") .with(:ensure => 'absent') } end context 'When managing the repository' do let(:params) do default_params.merge( :manage_repo => true ) end it { should contain_class('elastic_stack::repo') } end context 'When not managing the repository' do let(:params) do default_params.merge( :manage_repo => false ) end it { should compile.with_all_deps } end end end on_supported_os( :hardwaremodels => ['x86_64'], :supported_os => [ { 'operatingsystem' => 'CentOS', 'operatingsystemrelease' => ['7'] } ] ).each do |os, facts| context "on #{os}" do let(:facts) { facts.merge( :scenario => '', :common => '' ) } describe 'main class tests' do # init.pp it { should compile.with_all_deps } it { should contain_class('elasticsearch') } it { should contain_class('elasticsearch::package') } it { should contain_class('elasticsearch::config') .that_requires('Class[elasticsearch::package]') } it { should contain_class('elasticsearch::service') .that_requires('Class[elasticsearch::config]') } # Base directories it { should contain_file('/etc/elasticsearch') } it { should contain_file('/usr/share/elasticsearch') } it { should contain_file('/usr/share/elasticsearch/lib') } it { should contain_file('/var/lib/elasticsearch') } it { should contain_exec('remove_plugin_dir') } end context 'package installation' do describe 'with default package' do it { should contain_package('elasticsearch') .with(:ensure => 'present') } it { should_not contain_package('my-elasticsearch') .with(:ensure => 'present') } end describe 'with specified package name' do let(:params) do default_params.merge( :package_name => 'my-elasticsearch' ) end it { should contain_package('elasticsearch') .with(:ensure => 'present', :name => 'my-elasticsearch') } it { should_not contain_package('elasticsearch') .with(:ensure => 'present', :name => 'elasticsearch') } end describe 'with auto upgrade enabled' do let(:params) do default_params.merge( :autoupgrade => true ) end it { should contain_package('elasticsearch') .with(:ensure => 'latest') } end end describe 'running a a different user' do let(:params) do default_params.merge( :elasticsearch_user => 'myesuser', :elasticsearch_group => 'myesgroup' ) end it { should contain_file('/etc/elasticsearch') - .with(:owner => 'root', :group => 'myesgroup') } + .with(:owner => 'myesuser', :group => 'myesgroup') } it { should contain_file('/var/log/elasticsearch') .with(:owner => 'myesuser') } it { should contain_file('/usr/share/elasticsearch') .with(:owner => 'myesuser', :group => 'myesgroup') } it { should contain_file('/var/lib/elasticsearch') .with(:owner => 'myesuser', :group => 'myesgroup') } end describe 'setting jvm_options' do jvm_options = [ '-Xms16g', '-Xmx16g' ] let(:params) do default_params.merge( :jvm_options => jvm_options ) end jvm_options.each do |jvm_option| it { should contain_file_line("jvm_option_#{jvm_option}") .with( :ensure => 'present', :path => '/etc/elasticsearch/jvm.options', :line => jvm_option )} end end context 'with restart_on_change => true' do let(:params) do default_params.merge( :restart_on_change => true ) end describe 'should restart elasticsearch' do it { should contain_file('/etc/elasticsearch/elasticsearch.yml') .that_notifies('Service[elasticsearch]')} end describe 'setting jvm_options triggers restart' do let(:params) do super().merge( :jvm_options => ['-Xmx16g'] ) end it { should contain_file_line('jvm_option_-Xmx16g') .that_notifies('Service[elasticsearch]')} end end # This check helps catch dependency cycles. context 'create_resource' do # Helper for these tests def singular(s) case s when 'indices' 'index' when 'snapshot_repositories' 'snapshot_repository' else s[0..-2] end end { 'indices' => { 'test-index' => {} }, # 'instances' => { 'es-instance' => {} }, 'pipelines' => { 'testpipeline' => { 'content' => {} } }, 'plugins' => { 'head' => {} }, 'roles' => { 'elastic_role' => {} }, 'scripts' => { 'foo' => { 'source' => 'puppet:///path/to/foo.groovy' } }, 'snapshot_repositories' => { 'backup' => { 'location' => '/backups' } }, 'templates' => { 'foo' => { 'content' => {} } }, 'users' => { 'elastic' => { 'password' => 'foobar' } } }.each_pair do |deftype, params| describe deftype do let(:params) do default_params.merge( deftype => params ) end it { should compile } it { should send( "contain_elasticsearch__#{singular(deftype)}", params.keys.first ) } end end end describe 'oss' do let(:params) do default_params.merge(:oss => true) end it do should contain_package('elasticsearch').with( :name => 'elasticsearch-oss' ) end end end end end diff --git a/spec/fixtures/facts/v5-nodes.json b/spec/fixtures/facts/v5-nodes.json deleted file mode 100644 index 7e6dc16..0000000 --- a/spec/fixtures/facts/v5-nodes.json +++ /dev/null @@ -1,371 +0,0 @@ -{ - "_nodes" : { - "total" : 1, - "successful" : 1, - "failed" : 0 - }, - "cluster_name" : "elasticsearch", - "nodes" : { - "9lRSXfREQnqIgBWP0FBi0Q" : { - "name" : "v5", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.5.0", - "build_hash" : "260387d", - "total_indexing_buffer" : 211261849, - "roles" : [ - "master", - "data", - "ingest" - ], - "settings" : { - "client" : { - "type" : "node" - }, - "cluster" : { - "name" : "elasticsearch" - }, - "http" : { - "type" : { - "default" : "netty4" - }, - "port" : "9202" - }, - "node" : { - "name" : "v5" - }, - "path" : { - "logs" : "/Users/tylerjl/Work/elasticsearch-5.5.0/logs", - "home" : "/Users/tylerjl/Work/elasticsearch-5.5.0" - }, - "transport" : { - "type" : { - "default" : "netty4" - } - } - }, - "os" : { - "refresh_interval_in_millis" : 1000, - "name" : "Mac OS X", - "arch" : "x86_64", - "version" : "10.12.5", - "available_processors" : 4, - "allocated_processors" : 4 - }, - "process" : { - "refresh_interval_in_millis" : 1000, - "id" : 16828, - "mlockall" : false - }, - "jvm" : { - "pid" : 16828, - "version" : "1.8.0_121", - "vm_name" : "Java HotSpot(TM) 64-Bit Server VM", - "vm_version" : "25.121-b13", - "vm_vendor" : "Oracle Corporation", - "start_time_in_millis" : 1502814708699, - "mem" : { - "heap_init_in_bytes" : 2147483648, - "heap_max_in_bytes" : 2112618496, - "non_heap_init_in_bytes" : 2555904, - "non_heap_max_in_bytes" : 0, - "direct_max_in_bytes" : 2112618496 - }, - "gc_collectors" : [ - "ParNew", - "ConcurrentMarkSweep" - ], - "memory_pools" : [ - "Code Cache", - "Metaspace", - "Compressed Class Space", - "Par Eden Space", - "Par Survivor Space", - "CMS Old Gen" - ], - "using_compressed_ordinary_object_pointers" : "true", - "input_arguments" : [ - "-Xms2g", - "-Xmx2g", - "-XX:+UseConcMarkSweepGC", - "-XX:CMSInitiatingOccupancyFraction=75", - "-XX:+UseCMSInitiatingOccupancyOnly", - "-XX:+AlwaysPreTouch", - "-Xss1m", - "-Djava.awt.headless=true", - "-Dfile.encoding=UTF-8", - "-Djna.nosys=true", - "-Djdk.io.permissionsUseCanonicalPath=true", - "-Dio.netty.noUnsafe=true", - "-Dio.netty.noKeySetOptimization=true", - "-Dio.netty.recycler.maxCapacityPerThread=0", - "-Dlog4j.shutdownHookEnabled=false", - "-Dlog4j2.disable.jmx=true", - "-Dlog4j.skipJansi=true", - "-XX:+HeapDumpOnOutOfMemoryError", - "-Des.path.home=/Users/tylerjl/Work/elasticsearch-5.5.0" - ] - }, - "thread_pool" : { - "force_merge" : { - "type" : "fixed", - "min" : 1, - "max" : 1, - "queue_size" : -1 - }, - "fetch_shard_started" : { - "type" : "scaling", - "min" : 1, - "max" : 8, - "keep_alive" : "5m", - "queue_size" : -1 - }, - "listener" : { - "type" : "fixed", - "min" : 2, - "max" : 2, - "queue_size" : -1 - }, - "index" : { - "type" : "fixed", - "min" : 4, - "max" : 4, - "queue_size" : 200 - }, - "refresh" : { - "type" : "scaling", - "min" : 1, - "max" : 2, - "keep_alive" : "5m", - "queue_size" : -1 - }, - "generic" : { - "type" : "scaling", - "min" : 4, - "max" : 128, - "keep_alive" : "30s", - "queue_size" : -1 - }, - "warmer" : { - "type" : "scaling", - "min" : 1, - "max" : 2, - "keep_alive" : "5m", - "queue_size" : -1 - }, - "search" : { - "type" : "fixed", - "min" : 7, - "max" : 7, - "queue_size" : 1000 - }, - "flush" : { - "type" : "scaling", - "min" : 1, - "max" : 2, - "keep_alive" : "5m", - "queue_size" : -1 - }, - "fetch_shard_store" : { - "type" : "scaling", - "min" : 1, - "max" : 8, - "keep_alive" : "5m", - "queue_size" : -1 - }, - "management" : { - "type" : "scaling", - "min" : 1, - "max" : 5, - "keep_alive" : "5m", - "queue_size" : -1 - }, - "get" : { - "type" : "fixed", - "min" : 4, - "max" : 4, - "queue_size" : 1000 - }, - "bulk" : { - "type" : "fixed", - "min" : 4, - "max" : 4, - "queue_size" : 200 - }, - "snapshot" : { - "type" : "scaling", - "min" : 1, - "max" : 2, - "keep_alive" : "5m", - "queue_size" : -1 - } - }, - "transport" : { - "bound_address" : [ - "[fe80::1]:9300", - "[::1]:9300", - "127.0.0.1:9300" - ], - "publish_address" : "127.0.0.1:9300", - "profiles" : { } - }, - "http" : { - "bound_address" : [ - "[fe80::1]:9202", - "[::1]:9202", - "127.0.0.1:9202" - ], - "publish_address" : "127.0.0.1:9202", - "max_content_length_in_bytes" : 104857600 - }, - "plugins" : [ ], - "modules" : [ - { - "name" : "aggs-matrix-stats", - "version" : "5.5.0", - "description" : "Adds aggregations whose input are a list of numeric fields and output includes a matrix.", - "classname" : "org.elasticsearch.search.aggregations.matrix.MatrixAggregationPlugin", - "has_native_controller" : false - }, - { - "name" : "ingest-common", - "version" : "5.5.0", - "description" : "Module for ingest processors that do not require additional security permissions or have large dependencies and resources", - "classname" : "org.elasticsearch.ingest.common.IngestCommonPlugin", - "has_native_controller" : false - }, - { - "name" : "lang-expression", - "version" : "5.5.0", - "description" : "Lucene expressions integration for Elasticsearch", - "classname" : "org.elasticsearch.script.expression.ExpressionPlugin", - "has_native_controller" : false - }, - { - "name" : "lang-groovy", - "version" : "5.5.0", - "description" : "Groovy scripting integration for Elasticsearch", - "classname" : "org.elasticsearch.script.groovy.GroovyPlugin", - "has_native_controller" : false - }, - { - "name" : "lang-mustache", - "version" : "5.5.0", - "description" : "Mustache scripting integration for Elasticsearch", - "classname" : "org.elasticsearch.script.mustache.MustachePlugin", - "has_native_controller" : false - }, - { - "name" : "lang-painless", - "version" : "5.5.0", - "description" : "An easy, safe and fast scripting language for Elasticsearch", - "classname" : "org.elasticsearch.painless.PainlessPlugin", - "has_native_controller" : false - }, - { - "name" : "parent-join", - "version" : "5.5.0", - "description" : "This module adds the support parent-child queries and aggregations", - "classname" : "org.elasticsearch.join.ParentJoinPlugin", - "has_native_controller" : false - }, - { - "name" : "percolator", - "version" : "5.5.0", - "description" : "Percolator module adds capability to index queries and query these queries by specifying documents", - "classname" : "org.elasticsearch.percolator.PercolatorPlugin", - "has_native_controller" : false - }, - { - "name" : "reindex", - "version" : "5.5.0", - "description" : "The Reindex module adds APIs to reindex from one index to another or update documents in place.", - "classname" : "org.elasticsearch.index.reindex.ReindexPlugin", - "has_native_controller" : false - }, - { - "name" : "transport-netty3", - "version" : "5.5.0", - "description" : "Netty 3 based transport implementation", - "classname" : "org.elasticsearch.transport.Netty3Plugin", - "has_native_controller" : false - }, - { - "name" : "transport-netty4", - "version" : "5.5.0", - "description" : "Netty 4 based transport implementation", - "classname" : "org.elasticsearch.transport.Netty4Plugin", - "has_native_controller" : false - } - ], - "ingest" : { - "processors" : [ - { - "type" : "append" - }, - { - "type" : "convert" - }, - { - "type" : "date" - }, - { - "type" : "date_index_name" - }, - { - "type" : "dot_expander" - }, - { - "type" : "fail" - }, - { - "type" : "foreach" - }, - { - "type" : "grok" - }, - { - "type" : "gsub" - }, - { - "type" : "join" - }, - { - "type" : "json" - }, - { - "type" : "kv" - }, - { - "type" : "lowercase" - }, - { - "type" : "remove" - }, - { - "type" : "rename" - }, - { - "type" : "script" - }, - { - "type" : "set" - }, - { - "type" : "sort" - }, - { - "type" : "split" - }, - { - "type" : "trim" - }, - { - "type" : "uppercase" - } - ] - } - } - } -} diff --git a/spec/fixtures/facts/v5-root.json b/spec/fixtures/facts/v5-root.json deleted file mode 100644 index 62ac5ee..0000000 --- a/spec/fixtures/facts/v5-root.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "name" : "v5", - "cluster_name" : "elasticsearch", - "cluster_uuid" : "1ENUnFlUQcmF6qko-PjpHw", - "version" : { - "number" : "5.5.0", - "build_hash" : "260387d", - "build_date" : "2017-06-30T23:16:05.735Z", - "build_snapshot" : false, - "lucene_version" : "6.6.0" - }, - "tagline" : "You Know, for Search" -} diff --git a/spec/spec_helper_acceptance.rb b/spec/spec_helper_acceptance.rb index 3f1c49b..8431fcc 100644 --- a/spec/spec_helper_acceptance.rb +++ b/spec/spec_helper_acceptance.rb @@ -1,259 +1,260 @@ require 'beaker-rspec' require 'beaker/puppet_install_helper' require 'securerandom' require 'thread' require 'infrataster/rspec' require 'rspec/retry' require 'vault' require 'simp/beaker_helpers' include Simp::BeakerHelpers require_relative 'spec_helper_tls' require_relative 'spec_utilities' require_relative '../lib/puppet_x/elastic/deep_to_i' require_relative '../lib/puppet_x/elastic/deep_to_s' # def f # RSpec.configuration.fact # end run_puppet_install_helper('agent') unless ENV['BEAKER_provision'] == 'no' RSpec.configure do |c| # General-purpose spec-global variables c.add_setting :v, :default => {} # Puppet debug logging v[:puppet_debug] = ENV['BEAKER_debug'] ? true : false unless ENV['snapshot_version'].nil? v[:snapshot_version] = ENV['snapshot_version'] v[:is_snapshot] = ENV['SNAPSHOT_TEST'] == 'true' end unless ENV['ELASTICSEARCH_VERSION'].nil? and v[:snapshot_version].nil? v[:elasticsearch_full_version] = ENV['ELASTICSEARCH_VERSION'] || v[:snapshot_version] v[:elasticsearch_major_version] = v[:elasticsearch_full_version].split('.').first.to_i v[:elasticsearch_package] = {} v[:template] = if v[:elasticsearch_major_version] == 6 JSON.load(File.new('spec/fixtures/templates/6.x.json')) elsif v[:elasticsearch_major_version] >= 8 JSON.load(File.new('spec/fixtures/templates/post_8.0.json')) else JSON.load(File.new('spec/fixtures/templates/7.x.json')) end v[:template] = Puppet_X::Elastic.deep_to_i(Puppet_X::Elastic.deep_to_s(v[:template])) v[:pipeline] = JSON.load(File.new('spec/fixtures/pipelines/example.json')) v[:elasticsearch_plugins] = Dir[ artifact("*#{v[:elasticsearch_full_version]}.zip", ['plugins']) ].map do |plugin| plugin_filename = File.basename(plugin) plugin_name = plugin_filename.match(/^(?.+)-#{v[:elasticsearch_full_version]}.zip/)[:name] [ plugin_name, { :path => plugin, :url => derive_plugin_urls_for(v[:elasticsearch_full_version], [plugin_name]).keys.first } ] end.to_h end v[:oss] = (not ENV['OSS_PACKAGE'].nil?) and ENV['OSS_PACKAGE'] == 'true' v[:cluster_name] = SecureRandom.hex(10) # rspec-retry c.display_try_failure_messages = true c.default_sleep_interval = 10 # General-case retry keyword for unstable tests c.around :each, :with_retries do |example| example.run_with_retry retry: 10 end # Helper hook for module cleanup c.after :context, :with_cleanup do apply_manifest <<-EOS class { 'elasticsearch': ensure => 'absent', manage_repo => true, oss => #{v[:oss]}, } file { '/usr/share/elasticsearch/plugin': ensure => 'absent', force => true, recurse => true, require => Class['elasticsearch'], } EOS end c.before :context, :with_certificates do @keystore_password = SecureRandom.hex @role = [*('a'..'z')].sample(8).join # Setup TLS cert placement @tls = gen_certs(2, '/tmp') create_remote_file hosts, @tls[:ca][:cert][:path], @tls[:ca][:cert][:pem] @tls[:clients].each do |node| node.each do |_type, params| create_remote_file hosts, params[:path], params[:pem] end end end c.before :context, :with_license do Vault.address = ENV['VAULT_ADDR'] if ENV['CI'] Vault.auth.approle(ENV['VAULT_APPROLE_ROLE_ID'], ENV['VAULT_APPROLE_SECRET_ID']) else Vault.auth.token(ENV['VAULT_TOKEN']) end licenses = Vault.with_retries(Vault::HTTPConnectionError) do Vault.logical.read(ENV['VAULT_PATH']) end.data raise 'No license found!' unless licenses - license = case v[:elasticsearch_major_version] - when 6 - licenses[:v5] - else - licenses[:v7] - end + # license = case v[:elasticsearch_major_version] + # when 6 + # licenses[:v5] + # else + # licenses[:v7] + # end + license = licenses[:v7] create_remote_file hosts, '/tmp/license.json', license v[:elasticsearch_license_path] = '/tmp/license.json' end c.after :context, :then_purge do shell 'rm -rf {/usr/share,/etc,/var/lib}/elasticsearch*' end c.before :context, :first_purge do shell 'rm -rf {/usr/share,/etc,/var/lib}/elasticsearch*' end # Provide a hook filter to spit out some ES logs if the example fails. c.after(:example, :logs_on_failure) do |example| if example.exception hosts.each do |host| on host, "find / -name '#{v[:cluster_name]}.log' | xargs cat || true" do |result| puts result.formatted_output end end end end end files_dir = ENV['files_dir'] || './spec/fixtures/artifacts' # General bootstrapping steps for each host hosts.each do |host| # # Set the host to 'aio' in order to adopt the puppet-agent style of # # installation, and configure paths/etc. # host[:type] = 'aio' # configure_defaults_on host, 'aio' if fact('os.family') == 'Suse' install_package host, '--force-resolution augeas-devel libxml2-devel ruby-devel' on host, 'gem install ruby-augeas --no-ri --no-rdoc' end v[:ext] = case fact('os.family') when 'Debian' 'deb' else 'rpm' end if v[:elasticsearch_package] v[:elasticsearch_package].merge!( derive_full_package_url( v[:elasticsearch_full_version], [v[:ext]] ).flat_map do |url, filename| [[:url, url], [:filename, filename], [:path, artifact(filename)]] end.to_h ) end Infrataster::Server.define(:docker) do |server| server.address = host[:ip] server.ssh = host[:ssh].tap { |s| s.delete :forward_agent } end Infrataster::Server.define(:container) do |server| server.address = host[:vm_ip] # this gets ignored anyway server.from = :docker end end RSpec.configure do |c| if v[:is_snapshot] c.before :suite do scp_to default, "#{files_dir}/elasticsearch-snapshot.#{v[:ext]}", "/tmp/elasticsearch-snapshot.#{v[:ext]}" v[:snapshot_package] = "file:/tmp/elasticsearch-snapshot.#{v[:ext]}" end end c.before :suite do # Install module and dependencies install_dev_puppet_module :ignore_list => [ 'junit' ] + Beaker::DSL::InstallUtils::ModuleUtils::PUPPET_MODULE_INSTALL_IGNORE hosts.each do |host| modules = %w[archive augeas_core datacat java java_ks stdlib elastic_stack] dist_module = { 'Debian' => ['apt'], 'Suse' => ['zypprepo'], 'RedHat' => %w[concat yumrepo_core] }[fact('os.family')] modules += dist_module unless dist_module.nil? modules.each do |mod| copy_module_to( host, :module_name => mod, :source => "spec/fixtures/modules/#{mod}" ) end on(host, 'mkdir -p etc/puppet/modules/another/files/') # Apt doesn't update package caches sometimes, ensure we're caught up. shell 'apt-get update' if fact('os.family') == 'Debian' end # Use the Java class once before the suite of tests unless shell('command -v java', :accept_all_exit_codes => true).exit_code.zero? java = case fact('os.name') when 'OpenSuSE' 'package => "java-1_8_0-openjdk-headless",' else '' end apply_manifest <<-MANIFEST class { "java" : - distribution => "jre", + distribution => "jdk", #{java} } MANIFEST end end end # # Java 8 is only easy to manage on recent distros # def v5x_capable? # (fact('os.family') == 'RedHat' and \ # not (fact('os.name') == 'OracleLinux' and \ # f['os']['release']['major'] == '6')) or \ # f.dig 'os', 'distro', 'codename' == 'xenial' # end diff --git a/spec/unit/facter/es_facts_spec.rb b/spec/unit/facter/es_facts_spec.rb index 4aae002..d6bfe9e 100644 --- a/spec/unit/facter/es_facts_spec.rb +++ b/spec/unit/facter/es_facts_spec.rb @@ -1,137 +1,107 @@ require 'spec_helper' require 'webmock/rspec' describe 'elasticsearch facts' do before(:each) do - Dir[File.join(RSpec.configuration.fixture_path, 'facts', '*.json')].map do |json| - File.basename(json).split('.').first.split('-').first - end.uniq.sort.each_with_index do |instance, n| - stub_request(:get, "http://localhost:920#{n}/") - .with(:headers => { 'Accept' => '*/*', 'User-Agent' => 'Ruby' }) - .to_return( - :status => 200, - :body => File.read( - File.join( - fixture_path, - "facts/#{instance}-root.json" - ) + stub_request(:get, 'http://localhost:9200/') + .with(:headers => { 'Accept' => '*/*', 'User-Agent' => 'Ruby' }) + .to_return( + :status => 200, + :body => File.read( + File.join( + fixture_path, + 'facts/Warlock-root.json' ) ) + ) - stub_request(:get, "http://localhost:920#{n}/_nodes/#{instance}") - .with(:headers => { 'Accept' => '*/*', 'User-Agent' => 'Ruby' }) - .to_return( - :status => 200, - :body => File.read( - File.join( - fixture_path, - "facts/#{instance}-nodes.json" - ) + stub_request(:get, 'http://localhost:9200/_nodes/Warlock') + .with(:headers => { 'Accept' => '*/*', 'User-Agent' => 'Ruby' }) + .to_return( + :status => 200, + :body => File.read( + File.join( + fixture_path, + 'facts/Warlock-nodes.json' ) ) - end + ) allow(File) .to receive(:directory?) .and_return(true) - allow(Dir) - .to receive(:foreach) - .and_yield('es01').and_yield('es02').and_yield('es03').and_yield('es-ssl') - - %w[es01 es02 es03 es-ssl].each do |instance| - allow(File) - .to receive(:readable?) - .with("/etc/elasticsearch/#{instance}/elasticsearch.yml") - .and_return(true) - end + allow(File) + .to receive(:readable?) + .and_return(true) allow(YAML) .to receive(:load_file) - .with('/etc/elasticsearch/es01/elasticsearch.yml', any_args) + .with('/etc/elasticsearch/elasticsearch.yml', any_args) .and_return({}) - allow(YAML) - .to receive(:load_file) - .with('/etc/elasticsearch/es02/elasticsearch.yml', any_args) - .and_return('http.port' => '9201') - - allow(YAML) - .to receive(:load_file) - .with('/etc/elasticsearch/es03/elasticsearch.yml', any_args) - .and_return('http.port' => '9202') - - allow(YAML) - .to receive(:load_file) - .with('/etc/elasticsearch/es-ssl/elasticsearch.yml', any_args) - .and_return( - 'xpack.security.http.ssl.enabled' => true, - 'shield.http.ssl' => true, - 'http.port' => '9443' - ) - require 'lib/facter/es_facts' end - describe 'elasticsearch_ports' do - it 'finds listening ports' do - expect(Facter.fact(:elasticsearch_ports).value.split(',')) - .to contain_exactly('9200', '9201', '9202', '9443') + describe 'elasticsearch_port' do + it 'finds listening port' do + expect(Facter.fact(:elasticsearch_port).value) + .to eq('9200') end end describe 'instance' do it 'returns the node name' do - expect(Facter.fact(:elasticsearch_9200_name).value).to eq('Warlock') + expect(Facter.fact(:elasticsearch_name).value).to eq('Warlock') end it 'returns the node version' do - expect(Facter.fact(:elasticsearch_9200_version).value).to eq('1.4.2') + expect(Facter.fact(:elasticsearch_version).value).to eq('1.4.2') end it 'returns the cluster name' do - expect(Facter.fact(:elasticsearch_9200_cluster_name).value) + expect(Facter.fact(:elasticsearch_cluster_name).value) .to eq('elasticsearch') end it 'returns the node ID' do - expect(Facter.fact(:elasticsearch_9200_node_id).value) + expect(Facter.fact(:elasticsearch_node_id).value) .to eq('yQAWBO3FS8CupZnSvAVziQ') end it 'returns the mlockall boolean' do - expect(Facter.fact(:elasticsearch_9200_mlockall).value).to be_falsy + expect(Facter.fact(:elasticsearch_mlockall).value).to be_falsy end it 'returns installed plugins' do - expect(Facter.fact(:elasticsearch_9200_plugins).value).to eq('kopf') + expect(Facter.fact(:elasticsearch_plugins).value).to eq('kopf') end describe 'kopf plugin' do it 'returns the correct version' do - expect(Facter.fact(:elasticsearch_9200_plugin_kopf_version).value) + expect(Facter.fact(:elasticsearch_plugin_kopf_version).value) .to eq('1.4.3') end it 'returns the correct description' do - expect(Facter.fact(:elasticsearch_9200_plugin_kopf_description).value) + expect(Facter.fact(:elasticsearch_plugin_kopf_description).value) .to eq('kopf - simple web administration tool for ElasticSearch') end it 'returns the plugin URL' do - expect(Facter.fact(:elasticsearch_9200_plugin_kopf_url).value) + expect(Facter.fact(:elasticsearch_plugin_kopf_url).value) .to eq('/_plugin/kopf/') end it 'returns the plugin JVM boolean' do - expect(Facter.fact(:elasticsearch_9200_plugin_kopf_jvm).value) + expect(Facter.fact(:elasticsearch_plugin_kopf_jvm).value) .to be_falsy end it 'returns the plugin _site boolean' do - expect(Facter.fact(:elasticsearch_9200_plugin_kopf_site).value) + expect(Facter.fact(:elasticsearch_plugin_kopf_site).value) .to be_truthy end end # of describe plugin end # of describe instance end # of describe elasticsearch facts diff --git a/spec/unit/provider/elasticsearch_keystore/elasticsearch_keystore_spec.rb b/spec/unit/provider/elasticsearch_keystore/elasticsearch_keystore_spec.rb index 341d79e..a679b50 100644 --- a/spec/unit/provider/elasticsearch_keystore/elasticsearch_keystore_spec.rb +++ b/spec/unit/provider/elasticsearch_keystore/elasticsearch_keystore_spec.rb @@ -1,157 +1,161 @@ require 'spec_helper_rspec' shared_examples 'keystore instance' do |instance| describe "instance #{instance}" do subject { described_class.instances.find { |x| x.name == instance } } it { expect(subject.exists?).to be_truthy } it { expect(subject.name).to eq(instance) } it { expect(subject.settings) .to eq(['node.name', 'cloud.aws.access_key']) } end end describe Puppet::Type.type(:elasticsearch_keystore).provider(:elasticsearch_keystore) do let(:executable) { '/usr/share/elasticsearch/bin/elasticsearch-keystore' } let(:instances) { [] } before do Facter.clear Facter.add('osfamily') { setcode { 'Debian' } } allow(described_class) .to receive(:command) .with(:keystore) .and_return(executable) allow(File).to receive(:exist?) .with('/etc/elasticsearch/scripts/elasticsearch.keystore') .and_return(false) end describe 'instances' do before do allow(Dir).to receive(:[]) .with('/etc/elasticsearch/*') .and_return((['scripts'] + instances).map do |directory| "/etc/elasticsearch/#{directory}" end) instances.each do |instance| instance_dir = "/etc/elasticsearch/#{instance}" defaults_file = "/etc/default/elasticsearch-#{instance}" allow(File).to receive(:exist?) .with("#{instance_dir}/elasticsearch.keystore") .and_return(true) expect(described_class) .to receive(:execute) .with( [executable, 'list'], :custom_environment => { 'ES_INCLUDE' => defaults_file, 'ES_PATH_CONF' => "/etc/elasticsearch/#{instance}" }, - :uid => 'elasticsearch', :gid => 'elasticsearch' + :uid => 'elasticsearch', + :gid => 'elasticsearch', + :failonfail => true ) .and_return( Puppet::Util::Execution::ProcessOutput.new( "node.name\ncloud.aws.access_key\n", 0 ) ) end end it 'should have an instance method' do expect(described_class).to respond_to(:instances) end context 'without any keystores' do it 'should return no resources' do expect(described_class.instances.size).to eq(0) end end context 'with one instance' do let(:instances) { ['es-01'] } it { expect(described_class.instances.length).to eq(instances.length) } include_examples 'keystore instance', 'es-01' end context 'with multiple instances' do let(:instances) { ['es-01', 'es-02'] } it { expect(described_class.instances.length).to eq(instances.length) } include_examples 'keystore instance', 'es-01' include_examples 'keystore instance', 'es-02' end end # of describe instances describe 'prefetch' do it 'should have a prefetch method' do expect(described_class).to respond_to :prefetch end end describe 'flush' do let(:provider) { described_class.new(:name => 'es-03') } let(:resource) do Puppet::Type.type(:elasticsearch_keystore).new( :name => 'es-03', :provider => provider ) end it 'creates the keystore' do expect(described_class).to( receive(:execute) .with( [executable, 'create'], :custom_environment => { 'ES_INCLUDE' => '/etc/default/elasticsearch-es-03', 'ES_PATH_CONF' => '/etc/elasticsearch/es-03' }, - :uid => 'elasticsearch', :gid => 'elasticsearch' + :uid => 'elasticsearch', + :gid => 'elasticsearch', + :failonfail => true ) .and_return(Puppet::Util::Execution::ProcessOutput.new('', 0)) ) resource[:ensure] = :present provider.create provider.flush end it 'deletes the keystore' do expect(File).to( receive(:delete) .with(File.join(%w[/ etc elasticsearch es-03 elasticsearch.keystore])) ) resource[:ensure] = :absent provider.destroy provider.flush end it 'updates settings' do settings = { 'cloud.aws.access_key' => 'AKIAFOOBARFOOBAR', 'cloud.aws.secret_key' => 'AKIAFOOBARFOOBAR' } settings.each do |setting, value| expect(provider.class).to( receive(:run_keystore) .with(['add', '--force', '--stdin', setting], 'es-03', '/etc/elasticsearch', value) .and_return(Puppet::Util::Execution::ProcessOutput.new('', 0)) ) end # Note that the settings hash is passed in wrapped in an array to mimic # the behavior in real-world puppet runs. resource[:ensure] = :present resource[:settings] = [settings] provider.settings = [settings] provider.flush end end # of describe flush end # of describe Puppet::Type elasticsearch_keystore diff --git a/spec/unit/provider/elasticsearch_plugin/ruby_spec.rb b/spec/unit/provider/elasticsearch_plugin/ruby_spec.rb index 090b709..d3e3796 100644 --- a/spec/unit/provider/elasticsearch_plugin/ruby_spec.rb +++ b/spec/unit/provider/elasticsearch_plugin/ruby_spec.rb @@ -1,23 +1,23 @@ require_relative 'shared_examples' provider_class = Puppet::Type.type(:elasticsearch_plugin).provider(:elasticsearch_plugin) describe provider_class do let(:resource_name) { 'lmenezes/elasticsearch-kopf' } let(:resource) do Puppet::Type.type(:elasticsearch_plugin).new( :name => resource_name, :ensure => :present, :provider => 'elasticsearch_plugin' ) end let(:provider) do provider = provider_class.new provider.resource = resource provider end let(:shortname) { provider.plugin_name(resource_name) } let(:klass) { provider_class } - include_examples 'plugin provider', '5.0.1' + include_examples 'plugin provider', '7.0.0' end diff --git a/spec/unit/provider/elasticsearch_plugin/shared_examples.rb b/spec/unit/provider/elasticsearch_plugin/shared_examples.rb index 250ba0f..094390a 100644 --- a/spec/unit/provider/elasticsearch_plugin/shared_examples.rb +++ b/spec/unit/provider/elasticsearch_plugin/shared_examples.rb @@ -1,171 +1,147 @@ require 'spec_helper_rspec' shared_examples 'plugin provider' do |version| describe "elasticsearch #{version}" do before(:each) do allow(File).to receive(:open) allow(provider).to receive(:es_version).and_return version end describe 'setup' do it 'installs with default parameters' do expect(provider).to receive(:plugin).with( ['install', resource_name].tap do |args| if Puppet::Util::Package.versioncmp(version, '2.2.0') >= 0 args.insert 1, '--batch' end end ) provider.create end it 'installs via URLs' do resource[:url] = 'http://url/to/my/plugin.zip' expect(provider).to receive(:plugin).with( ['install'] + ['http://url/to/my/plugin.zip'].tap do |args| args.unshift('kopf', '--url') if version.start_with? '1' if Puppet::Util::Package.versioncmp(version, '2.2.0') >= 0 args.unshift '--batch' end args end ) provider.create end it 'installs with a local file' do resource[:source] = '/tmp/plugin.zip' expect(provider).to receive(:plugin).with( ['install'] + ['file:///tmp/plugin.zip'].tap do |args| args.unshift('kopf', '--url') if version.start_with? '1' if Puppet::Util::Package.versioncmp(version, '2.2.0') >= 0 args.unshift '--batch' end args end ) provider.create end describe 'proxying' do it 'installs behind a proxy' do resource[:proxy] = 'http://localhost:3128' - if version.start_with? '2' - expect(provider) - .to receive(:plugin) - .with([ - '-Dhttp.proxyHost=localhost', - '-Dhttp.proxyPort=3128', - '-Dhttps.proxyHost=localhost', - '-Dhttps.proxyPort=3128', - 'install', - resource_name - ]) - provider.create - else - expect(provider.with_environment do - ENV['ES_JAVA_OPTS'] - end).to eq([ + expect(provider) + .to receive(:plugin) + .with([ '-Dhttp.proxyHost=localhost', '-Dhttp.proxyPort=3128', '-Dhttps.proxyHost=localhost', - '-Dhttps.proxyPort=3128' - ].join(' ')) - end + '-Dhttps.proxyPort=3128', + 'install', + '--batch', + resource_name + ]) + provider.create end it 'uses authentication credentials' do resource[:proxy] = 'http://elastic:password@es.local:8080' - if version.start_with? '2' - expect(provider) - .to receive(:plugin) - .with([ - '-Dhttp.proxyHost=es.local', - '-Dhttp.proxyPort=8080', - '-Dhttp.proxyUser=elastic', - '-Dhttp.proxyPassword=password', - '-Dhttps.proxyHost=es.local', - '-Dhttps.proxyPort=8080', - '-Dhttps.proxyUser=elastic', - '-Dhttps.proxyPassword=password', - 'install', - resource_name - ]) - provider.create - else - expect(provider.with_environment do - ENV['ES_JAVA_OPTS'] - end).to eq([ + expect(provider) + .to receive(:plugin) + .with([ '-Dhttp.proxyHost=es.local', '-Dhttp.proxyPort=8080', '-Dhttp.proxyUser=elastic', '-Dhttp.proxyPassword=password', '-Dhttps.proxyHost=es.local', '-Dhttps.proxyPort=8080', '-Dhttps.proxyUser=elastic', - '-Dhttps.proxyPassword=password' - ].join(' ')) - end + '-Dhttps.proxyPassword=password', + 'install', + '--batch', + resource_name + ]) + provider.create end end describe 'configdir' do it 'sets the ES_PATH_CONF env var' do resource[:configdir] = '/etc/elasticsearch' expect(provider.with_environment do ENV['ES_PATH_CONF'] end).to eq('/etc/elasticsearch') end end end # of setup describe 'java_opts' do it 'uses authentication credentials' do resource[:java_opts] = ['-Des.plugins.staging=4a2ffaf5'] expect(provider.with_environment do ENV['ES_JAVA_OPTS'] end).to eq('-Des.plugins.staging=4a2ffaf5') end end describe 'java_home' do it 'sets the JAVA_HOME env var' do resource[:java_home] = '/opt/foo' expect(provider.with_environment do ENV['JAVA_HOME'] end).to eq('/opt/foo') end end describe 'java_home unset' do - existing_java_home = ENV['JAVA_HOME'] - it 'does not change JAVA_HOME env var' do + elasticsearch_java_home = '/usr/share/elasticsearch/jdk' + it 'defaults to the elasticsearch bundled JDK' do resource[:java_home] = '' expect(provider.with_environment do ENV['JAVA_HOME'] - end).to eq(existing_java_home) + end).to eq(elasticsearch_java_home) end end describe 'plugin_name' do let(:resource_name) { 'appbaseio/dejaVu' } it 'maintains mixed-case names' do expect(provider.plugin_path).to include('dejaVu') end end describe 'removal' do it 'uninstalls the plugin' do expect(provider).to receive(:plugin).with( ['remove', resource_name.split('-').last] ) provider.destroy end end end end