diff --git a/.fixtures.yml b/.fixtures.yml index c6ddc2f..2c850bd 100644 --- a/.fixtures.yml +++ b/.fixtures.yml @@ -1,35 +1,35 @@ fixtures: forge_modules: archive: repo: puppet/archive ref: 0.5.1 augeas_core: repo: puppetlabs/augeas_core ref: 1.0.4 stdlib: repo: puppetlabs/stdlib ref: 4.13.1 java: repo: puppetlabs/java - ref: 2.0.0 + ref: 6.5.0 concat: repo: puppetlabs/concat ref: 2.2.1 datacat: repo: richardc/datacat ref: 0.6.2 apt: repo: puppetlabs/apt - ref: 2.2.2 + ref: 7.4.1 zypprepo: repo: puppet/zypprepo ref: 2.2.2 yumrepo_core: repo: puppetlabs/yumrepo_core ref: 1.0.3 java_ks: puppetlabs/java_ks elastic_stack: repo: elastic/elastic_stack ref: 6.1.0 symlinks: elasticsearch: "#{source_dir}" diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 8e03da6..0000000 --- a/.travis.yml +++ /dev/null @@ -1,150 +0,0 @@ -sudo: required -group: deprecated-2017Q4 -services: -- docker -cache: - bundler: true - directories: - - spec/fixtures/artifacts - - spec/fixtures/modules -language: ruby -script: travis_retry bundle exec rake $TASK -jobs: - allow_failures: - - env: - - TASK=beaker:ubuntu-server-1404-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:ubuntu-server-1404-x64:snapshot - - env: - - TASK=beaker:ubuntu-server-1604-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:ubuntu-server-1604-x64:snapshot - - env: - - TASK=beaker:centos-6-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:centos-6-x64:snapshot - - env: - - TASK=beaker:centos-7-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:centos-7-x64:snapshot - - env: - - TASK=beaker:debian-8-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:debian-8-x64:snapshot - - env: - - TASK=beaker:debian-9-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:debian-9-x64:snapshot - include: - - stage: intake - env: - - TASK=intake - - PUPPET_VERSION='~> 4.10.0' - - env: - - TASK=intake - - env: - - TASK=intake - - PUPPET_VERSION='~> 5.0' - - env: - - TASK=intake - - PUPPET_VERSION='~> 6.0' - - stage: acceptance - env: - - TASK=beaker:centos-6-x64:acceptance - - env: - - TASK=beaker:centos-6-x64:acceptance[5.6.9] - - env: - - TASK=beaker:centos-6-x64:acceptance[2.4.6] - - env: - - TASK=beaker:centos-7-x64:acceptance - - env: - - TASK=beaker:centos-7-x64:acceptance[5.6.9] - - env: - - TASK=beaker:centos-7-x64:acceptance[2.4.6] - - env: - - TASK=beaker:amazonlinux-1-x64:acceptance - - env: - - TASK=beaker:amazonlinux-1-x64:acceptance[5.6.9] - - env: - - TASK=beaker:amazonlinux-1-x64:acceptance[2.4.6] - - env: - - TASK=beaker:oracle-6-x64:acceptance - - env: - - TASK=beaker:oracle-6-x64:acceptance[5.6.9] - - env: - - TASK=beaker:oracle-6-x64:acceptance[2.4.6] - - env: - - TASK=beaker:oracle-7-x64:acceptance - - env: - - TASK=beaker:oracle-7-x64:acceptance[5.6.9] - - env: - - TASK=beaker:oracle-7-x64:acceptance[2.4.6] - - env: - - TASK=beaker:debian-8-x64:acceptance - - env: - - TASK=beaker:debian-8-x64:acceptance[5.6.9] - - env: - - TASK=beaker:debian-8-x64:acceptance[2.4.6] - - env: - - TASK=beaker:debian-9-x64:acceptance - - env: - - TASK=beaker:debian-9-x64:acceptance[5.6.9] - - env: - - TASK=beaker:debian-9-x64:acceptance[2.4.6] - - env: - - TASK=beaker:ubuntu-server-1404-x64:acceptance - - env: - - TASK=beaker:ubuntu-server-1404-x64:acceptance[5.6.9] - - env: - - TASK=beaker:ubuntu-server-1404-x64:acceptance[2.4.6] - - env: - - TASK=beaker:ubuntu-server-1604-x64:acceptance - - env: - - TASK=beaker:ubuntu-server-1604-x64:acceptance[5.6.9] - - env: - - TASK=beaker:ubuntu-server-1604-x64:acceptance[2.4.6] - - stage: snapshots - env: - - TASK=beaker:ubuntu-server-1404-x64:snapshot - env: - - OSS_PACKAGE=true - - TASK=beaker:ubuntu-server-1404-x64:snapshot - - env: - - TASK=beaker:ubuntu-server-1604-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:ubuntu-server-1604-x64:snapshot - - env: - - TASK=beaker:centos-6-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:centos-6-x64:snapshot - - env: - - TASK=beaker:centos-7-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:centos-7-x64:snapshot - - env: - - TASK=beaker:debian-8-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:debian-8-x64:snapshot - - env: - - TASK=beaker:debian-9-x64:snapshot - - env: - - OSS_PACKAGE=true - - TASK=beaker:debian-9-x64:snapshot -notifications: - slack: - secure: T1FO+ttrJNH+bXmNR6349qcttG68Qr1xmMqVVRnUr7+129GQO5174Z8MFC8ck0qOCZGHO7GCNO5seNFflrjF/5EKbdkmVnqhf9gVa9kN7I4psMzxJX9bp29xJA6m3wA4VqCosDKVFSfilDZujAblWT+KDHZLjP8sEWEnHPvCjf69S2XDQEWUoxZan5V9IJQas4XR+hMdIZTA3ChVrEyqRfeehAZImbAr/LH8zChZaTdHZQY7p2rN3+qVNi3+GISV9fNPpOCynnX/ACbdUaRt3+1etxGGaQMPzGmTejN3VlMw4OZRXImb6HQ2rXE+fNCASXiKiwylxTbriQsS0dFv4skxH03YlYM8pqaBpeIOwzf4n45tTzdAQZJMC5cOb+RvwS7qkAwuaVlVxiiA+MWRG/UcFpWS+iNn4KEKxbpBjYP8X1JIP9DlHLME7DNMM2pePv9X6ZjY6eDhVM1gbKi77dXOo5y2Sp0ru8QkLpIKFVXS01O+x7oDHHv2Osvih0jNMgM66Byso3KJYJ6EJ0D2/3Q9ZNpVM4CMuIY5pBQfXf691zqkBHI6JUnU6VMw97cH0k6Gq0ypZoW5trXdnRC5aEg4jKKid84zKmAeTpj/iMuagyb/a5msJstIVboynRtfDHR0J8WWhfSU2wzqKAb6L66iyRe62Fe8OGzLhk2+KNU= -env: - global: - - secure: WFFcjwBIRBG2zyk4c8Ugq0tgI1YaH/+s5eV9h3i2kR1ggobT+nrNqn3hCOkmPtwGYPBNjVj6yp+7qy//MRe8AS2eo1XuMD/P4MYcDGmZiMnqPhz1UsLltGTYlh3y6jl9DJvNujFBQMnAu/ey2g/iWrcHdtl2qninvN3wOrXi2Bs= - - secure: bvBaKoV5wBj2eQb4Zx8E2NaBDsMOyuHczRByVLNX5YqeuRWL9kcsUYzAUshFpd2GFa4tzfnSLKCp0+h3T4Uei5e8CjV5dx0VFmijXoZif0OJplRaJ+S3dJSluTV04NoE4u6l5Pg6kkFTMnAaApKVB4je2nSlgvrm/tuavhd9i0M= - - secure: akshyW92CqV3Wt+rzQ3ScxIG55ILEaiwQ011rNF1kCXTds5HrHOGy++4VEidaTpems8OQH2+hCLK5r/7FXXgRQEV/TRYRGhp/y9mwqdioyDQ1D0yA3f42NWGNDGg2yOTTbhqQFJg394LDMiLmnevoiajEVIH+Ksr5bV/cIJc4Tc= diff --git a/CHANGELOG.md b/CHANGELOG.md index 512d989..1cec859 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,1004 +1,1026 @@ ## x.x.x (Month Day, Year) #### Features #### Fixes +## 7.0.0 (December 23, 2020) + +This is a new major version release of the Elasticsearch module that contains a number of breaking changes and adds support for deploying +and managing Elasticsearch 7.x. + +### Breaking changes +* Removed support for running multiple instances of Elasticsearch on a single host +* Removed support for Elasticsearch older than version 6.x + +### Features +* Support for deploying Elasticsearch 7.x +* Support and Testing for a number of newer OS' + * Amazon Linux 2 + * CentOS 8 + * Oracle Linux 8 + * Debian 10 + * Ubuntu 18.04 + * Ubuntu 20.04 + +### Bug fixes +Various little bug fixes as a result of cleaning up and refactoring a lot of the code. + ## 6.4.0 (August 28, 2019) #### Features * Support for Puppet 6 #### Fixes * Fix "Could not autoload" errors in some custom types/providers. * Resolved an issue arising from the use of `hiera()` in Hiera yaml data files. ## 6.3.4 (August 7, 2019) #### Puppet < 4.10 No Longer Supported Puppet 4.10.0 is the new minimum required version of Puppet. #### Fixes * The Elasticsearch log directory is no longer recursively managed to avoid stomping on the user/mode settings that Elasticsearch prefers. * Package management on apt-based systems no longer encounters dependency errors when `manage_repo => false`. * Plugin configuration files are now more well-supported by respecting subdirectory copy recursion. See `elasticsearch::configdir_recurselimit` for more information. * An error related to elasticsearch_roles and `yield` errors has been fixed * Correctly permit instances to be set to `absent` without errors. ## 6.3.3 (September 7, 2018) Note that this release includes changes to a default value that may affect users that rely on `curl` or `wget` to retrieve Elasticsearch package files, as now all certificates are validated by default. To preserve existing behavior, set `$elasticsearch::download_tool_verify_certificates` to `false`. #### Fixes * Update puppetlabs-stdlib dependency to < 6.0.0 * By default, package files downloaded with tools like wget or curl (i.e., the `$elasticsearch::download_tool` parameter) now verify certificates by default and a new boolean parameter has been added to indicate whether to ignore certificates (`$elasticsearch::download_tool_verify_certificates`). ## 6.3.2 (August 28, 2018) #### Fixes * Fix an issue with string coercion for certain fields in pipelines. ## 6.3.1 (August 6, 2018) Primarily a bugfix release. #### Fixes * REST-based resources are now coerced to string values to uniformly compare user defined values and Elasticsearch API responses. * Resolve deprecation warnings related to use of the deprecated is_array() function. * Fixed an erroneous inclusion of '<' in logging.yml * Resolve deprecation warnings related to use of the deprecated is_string() function. ## 6.3.0 (June 18, 2018) ### Migration Guide Elasticsearch 6.3 includes several big changes that are reflected in this module. When upgrading from module versions prior to 6.3, there are a number of upgrade considerations to take into account: * This module defaults to the upstream package repositories, which now include X-Pack bundled by default. To preserve previous behavior which does _not_ include X-Pack, follow the `README` instructions to configure `oss`-only repositories/packages. * Note that if your system was previously using the `elasticsearch` package and you instead choose to move to the `oss` distribution, the `elasticsearch` and `elasticsearch-oss` packages may conflict. If that occurs, consider ensuring that the `elasticsearch` package is absent before the `::elasticsearch` class runs. This module does not explicitly remove the conflicting package to avoid unexpected package removal. * Use of the `elastic_stack::repo` class for managing package repositories brings a couple changes: * All repository-level parameters and settings have been removed from the `::elasticsearch` class. These parameters can now be set on the `elastic_stack::repo` class. * This may mean that leftover yum/apt/etc. repositories named `elasticsearch` may persist after upgrade. * Some changes have been made to align this module's file-level permissions with upstream defaults on some configuration, data, and logging directories. Though these have been tested, operators should be aware that some permissions may change on-disk after upgrading to version >= 6.3.x of this module. #### Features * Added support for managing Elasticsearch licenses. * This module now uses the elastic-stack module to manage package repositories. * Supports OSS packaging distribution. * X-Pack bundled with 6.3 support. #### Fixes * Ensure that the stock Elasticsearch service is not running. * Service files for removed instances were previously set to ensure => absent on removal. Because this limits Puppet's ability to verify that the named service is running or not, these service files are always present now whether an instance is set to present or absent. * The service defaults file now enforces user/group ownership inline with the service user runtime. * The `scripts` configuration directory is now recursively copied instead of symlinked to avoid Elasticsearch security manager permission errors. * X-Pack and other meta-plugins are now properly detected as installed by the native plugin provider. ## 6.2.2 (March 13, 2018) #### Fixes * Fixed language compatibility errors that could arise when using JRuby 1.7 on Puppet Servers. ## 6.2.1 (February 14, 2018) This is primarily a bugfix release to address an issue when installing Elasticsearch 6.2.x plugins such as X-Pack that use the new meta-plugin architecture. While the change has been tested with several plugins and versions of Elasticsearch, if any unexpected behavior arises, help is available on the [Elastic forums](https://discuss.elastic.co/) or via [an issue in the puppet-elasticsearch Github repository](https://github.com/elastic/puppet-elasticsearch/issues). #### Fixes * Rewrote the `exists?` logic for the `elasticsearch_plugin` provider. This fundamentally changes how the module detects the presence of plugins but should be backwards compatible. ## 6.2.0 (February 9, 2018) #### Features * Add support for Amazon Linux 2 * Add support for managing Elasticsearch Snapshot Repository resources #### Fixes * Fixed an issue when setting `file_rolling_type => file` in Elasticsearch 6. * Removed ExecStartPre=- from systemd template ## 6.1.0 (December 18, 2017) #### Features * Removed `tea` module dependency for pre-existing types in `stdlib` module. * Support `file` as a `file_rolling_type`. * Added `java_opts` parameter to `elasticsearch::plugin` resource. * Brought some options in `jvm.options` up-to-date with upstream. * Plugins can now have their `JAVA_HOME` set through the `java_home` parameter. #### Fixes * Fixed issue with `ES_PATH_CONF` being unset in SysV init files. ## 6.0.0 (November 14, 2017) Major version upgrade with several important deprecations: * Puppet version 3 is no longer supported. * Package pinning is no longer supported. * Java installation is no longer supported. * The python and ruby defined types have been removed. * Repo management through `manage_repo` is now set to `true` by default. * All `*_hiera_merge` parameters have been removed. Minor: * elasticsearch::plugin only accepts `present` or `absent` * Some REST-resource based providers (such as templates and pipelines) now validate parameters (such as numeric port numbers) more rigorously. The following migration guide is intended to help aid in upgrading this module. ### Migration Guide #### Puppet 3.x No Longer Supported Puppet 4.5.0 is the new minimum required version of Puppet, which offers better safety, module metadata, and Ruby features. Migrating from Puppet 3 to Puppet 4 is beyond the scope of this guide, but the [official upgrade documentation](https://docs.puppet.com/upgrade/upgrade_steps.html) can help. As with any version or module upgrade, remember to restart any agents and master servers as needed. #### Package Pinning No Longer Supported Package pinning caused lots of unexpected behavior and usually caused more problems than solutions. If you still require package pinning, consider using the [`apt::pin` resource](https://forge.puppet.com/puppetlabs/apt#pin-a-specific-release) on Debian-based systems or a [`yum::versionlock` resource from the yum module](https://forge.puppet.com/puppet/yum#lock-a-package-with-the-versionlock-plugin) for Red Hat-based systems. #### Java Installation No Longer Supported Java installation was a very simple operation in this module which simply declared an instance of the `java` class but created conflicts for users who managed Java separately. If you still wish to configure Java alongside this module, consider using the [puppetlabs/java](https://forge.puppet.com/puppetlabs/java) module and installing Java with the following configuration: ```puppet class { "java" : distribution => "jre" } ``` This will install a version of Java suitable for Elasticsearch in most situations. Note that in some older distributions, you may need to take extra steps to install a more recent version of Java that supports Elasticsearch. #### Removal of Python and Ruby Resources These resource types were simple wrappers around `package` resources with their providers set to `pip` and `gem`, respectively. Simply defining your own resources similarly to: ```puppet package { 'elasticsearch' : provider => 'pip' } ``` Is sufficient. #### Automatic Package Repository Management This parameter is now set to `true` by default to automatically manage the Elastic repository. If you do not wish to configure the repository to automatically retrieve package updates, set this parameter to `false`: ```puppet class { 'elasticsearch': manage_repo => false } ``` #### Removal of `hiera_merge` Parameters Updates to Hiera in later versions of Puppet mean that you can set merging behavior in end-user configuration. Read [the upstream Hiera documentation regarding `lookup_options`](https://puppet.com/docs/puppet/4.10/hiera_merging.html#configuring-merge-behavior-in-hiera-data) to learn how to configure Hiera appropriately for your needs. ## 5.5.0 (November 13, 2017) #### Features * Updated puppetlabs/java dependency to `< 5.0.0` #### Fixes * Properly support plugin installation on 6.x series with explicit `ES_PATH_CONF` * set file ownership of systemd service file to root user/group * Fix propagating the pid_dir into OpenBSD rcscript ## 5.4.3 (September 1, 2017) #### Features * Bumped puppet/java dependency to < 3.0.0 #### Fixes * Append `--quiet` flag to >= 5.x versions of Elasticsearch systemd service units * Disable es_facts collection on SearchGuard nodes with TLS enabled ## 5.4.2 (August 18, 2017) #### Features * Bumped puppet/yum dependency to < 3.0.0 #### Fixes * Custom facts no longer attempt to connect to SSL/TLS secured ports. ## 5.4.1 (August 7, 2017) Fixed an issue where `logging_yml_ensure` and `log4j2_ensure` would not propagate to `elasticsearch::instance` resources. ## 5.4.0 (August 3, 2017) #### Features * The `api_timeout` parameter is now passed to the `es_instance_conn_validator` resource for index, pipeline, and template defined types. * Updated puppetlabs/apt dependency to < 5.0.0. * Both the `logging.yml` and `log4j2.properties` files can be selectively enabled/disabled with the `logging_yml_ensure` and `log4j2_ensure` parameters on the `elasticsearch` class and `elasticsearch::instance` defined type. * `jvm_options` are now controllable on a per-instance basis. #### Fixes * Fixed an edge case with `es_instance_validator` in which ruby connection errors were not caught. * Plugins with colon-delimited names (such as maven plugins) are properly handled now. * Fixed a bug that would cause dependency cycles when using parameters to create defined types. ## 5.3.1 (June 14, 2017) ### Summary Minor release to fix bugs related to the `elasticsearch_keystore` type and generated docs. #### Features * Moved documentation to Yard for doc auto-generation for all classes/types/etc. #### Fixes * Fixed dependency order bug with the `elasticsearch_keystore` type and augeas defaults resource. ## 5.3.0 (June 5, 2017) ### Summary Minor bugfix release with added support for managing Elasticsearch keystores, custom repository URLs, and more. #### Features * Failures are no longer raised when no instances are defined for a plugin and service restarts are not requested. * The `datadir` for instances can now be shared among multiple instances by using the `datadir_instance_directories` parameter. * `repo_baseurl` is now exposed as a top-level parameter for users who wish to control custom repositories. * `elasticsearch-keystore` values can now be managed via native Puppet resources. #### Fixes * log4j template now properly respects deprecation logging settings. ## 5.2.0 (May 5, 2017) ### Summary Release supporting several new features and bugfixes for 5.4.0 users and users who need the ability to update plugins. #### Features * Support for Shield/X-Pack logging configuration file added. * The `elasticsearch::script` type now supports recursively managing directories of scripts. * All module defined types can now be managed as top-level hash parameters to the `elasticsearch` class (primarily for hiera and PE) #### Fixes * Fixed a bug that prevented plugins from being updated properly. * Fixed deprecated `default.path` options introduced in Elasticsearch 5.4.0. ## 5.1.1 (April 13, 2017) ### Summary #### Features * Instance configs now have highest precedence when constructing the final yaml config file. #### Fixes This is a hotfix release to support users affected by [an upstream Elasticsearch issue](https://github.com/elastic/elasticsearch/issues/6887). See the [associated issue](https://github.com/elastic/puppet-elasticsearch/issues/802#issuecomment-293295930) for details regarding the workaround. The change implemented in this release is to place the `elasticsearch::instance` `config` parameter at the highest precedence when merging the final config yaml which permits users manually override `path.data` values. ## 5.1.0 (February 28, 2017) ### Summary Ingest pipeline and index settings support. Minor bugfixes. #### Features * Ingestion pipelines supported via custom resources. * Index settings support. #### Fixes * Custom facts no longer fail when trying to read unreadable elasticsearch config files. * `Accept` and `Content-Type` headers properly set for providers (to support ES 6.x) ## 5.0.0 (February 9, 2017) Going forward, This module will follow Elasticsearch's upstream major version to indicate compatability. That is, version 5.x of this module supports version 5 of Elasticsearch, and version 6.x of this module will be released once Elasticsearch 6 support is added. ### Summary Note that this is a **major version release**! Please read the release notes carefully before upgrading to avoid downtime/unexpected behavior. Remember to restart any puppetmaster servers to clear provider caches and pull in updated code. ### Backwards-Incompatible Changes * The `elasticsearch::shield::user` and `elasticsearch::shield::role` resources have been renamed to `elasticsearch::user` and `elasticsearch::role` since the resource now handles both Shield and X-Pack. * Both Shield and X-Pack configuration files are kept in `/etc/elasticsearch/shield` and `/etc/elasticsearch/x-pack`, respectively. If you previously managed Shield resources with version 0.x of this module, you may need to migrate files from `/usr/share/elasticsearch/shield`. * The default data directory has been changed to `/var/lib/elasticsearch`. If you used the previous default (the Elasticsearch home directory, `/usr/share/elasticsearch/data`), you may need to migrate your data. * The first changes that may be Elasticsearch 1.x-incompatible have been introduced (see the [elasticsearch support lifecycle](https://www.elastic.co/support/eol)). This only impacts version 1.x running on systemd-based distributions. * sysctl management has been removed (and the module removed as a dependency for this module), and puppet/yum is used in lieu of ceritsc/yum. #### Features * Support management of the global jvm.options configuration file. * X-Pack support added. * Restricted permissions to the elasticsearch.yml file. * Deprecation log configuration support added. * Synced systemd service file with upstream. #### Bugfixes * Fixed case in which index template could prepend an additional 'index.' to index settings. * Fixed a case in which dependency cycles could arise when pinning packages on CentOS. * No longer recursively change the Elasticsearch home directory's lib/ to the elasticsearch user. * Unused defaults values now purged from instance init defaults files. #### Changes * Changed default data directory to /var/lib * sysctl settings are no longer managed by the thias/sysctl module. * Calls to `elasticsearch -version` in elasticsearch::plugin code replaced with native Puppet code to resolve Elasticsearch package version. Should improve resiliency when managing plugins. * Shield and X-Pack configuration files are stored in /etc/elasticsearch instead of /usr/share/elasticsearch. * Removed deprecated ceritsc/yum module in favor of puppet/yum. #### Testing changes ## 0.15.1 (December 1, 2016) ### Summary Primarily a bugfix release for Elasticsearch 5.x support-related issues. Note updated minimum required puppet versions as well. #### Features #### Bugfixes * Removed ES_HEAP_SIZE check in init scripts for Elasticsearch 5.x * Changed sysctl value to a string to avoid type errors for some versions * Fixed a $LOAD_PATH error that appeared in some cases for puppet_x/elastic/es_versioning #### Changes * Updated minimium required version for Puppet and PE to reflect tested versions and versions supported by Puppet Labs #### Testing changes ## 0.15.0 (November 17, 2016) ### Summary * Support for Ubuntu Xenial (16.04) formally declared. * Initial support for running Elasticsearch 5.x series. #### Features * Support management of 5.x-style Elastic yum/apt package repositories. * Support service scripts for 5.x series of Elasticsearch #### Bugfixes * Update the apt::source call to not cause deprecation warnings * Updated module metadata to correctly require puppet-stdlib with validate_integer() #### Changes #### Testing changes * Ubuntu Xenial (16.04) added to the test matrix. ## 0.14.0 (October 12, 2016) ### Summary Primarily a bugfix release for issues related to plugin proxy functionality, various system service fixes, and directory permissions. This release also adds the ability to define logging rolling file settings and a CA file/path for template API access. #### Features * Added 'file_rolling_type' parameter to allow selecting file logging rotation type between "dailyRollingFile" or "rollingFile". Also added 'daily_rolling_date_pattern', 'rolling_file_max_backup_index' and 'rolling_file_max_file_size' for file rolling customization. #### Bugfixes * Permissions on the Elasticsearch plugin directory have been fixed to permit world read rights. * The service systemd unit now `Wants=` a network target to fix bootup parallelization problems. * Recursively create the logdir for elasticsearch when creating multiple instances * Files and directories with root ownership now specify UID/GID 0 instead to improve compatability with *BSDs. * Elasticsearch Debian init file changed to avoid throwing errors when DATA_DIR, WORK_DIR and/or LOG_DIR were an empty variable. * Fixed a broken File dependency when a plugin was set to absent and ::elasticsearch set to present. * Fixed issue when using the `proxy` parameter on plugins in Elasticsearch 2.x. #### Changes * The `api_ca_file` and `api_ca_path` parameters have been added to support custom CA bundles for API access. * Numerics in elasticsearch.yml will always be properly unquoted. * puppetlabs/java is now listed as a dependency in metadata.json to avoid unexpected installation problems. #### Testing changes ## 0.13.2 (August 29, 2016) ### Summary Primarily a bugfix release to resolve HTTPS use in elasticsearch::template resources, 5.x plugin operations, and plugin file permission enforcement. #### Features * Plugin installation for the 5.x series of Elasticsearch is now properly supported. #### Bugfixes * Recursively enforce correct plugin directory mode to avoid Elasticsearch startup permissions errors. * Fixed an edge case where dependency cycles could arise when managing absent resources. * Elasticsearch templates now properly use HTTPS when instructed to do so. #### Changes * Updated the elasticsearch_template type to return more helpful error output. * Updated the es_instance_conn_validator type to silence deprecation warnings in Puppet >= 4. #### Testing changes ## 0.13.1 (August 8, 2016) ### Summary Lingering bugfixes from elasticsearch::template changes. More robust systemd mask handling. Updated some upstream module parameters for deprecation warnings. Support for the Shield `system_key` file. #### Features * Added `system_key` parameter to the `elasticsearch` class and `elasticsearch::instance` type for placing Shield system keys. #### Bugfixes * Fixed systemd elasticsearch.service unit masking to use systemctl rather than raw symlinking to avoid puppet file backup errors. * Fixed a couple of cases that broke compatability with older versions of puppet (elasticsearch_template types on puppet versions prior to 3.6 and yumrepo parameters on puppet versions prior to 3.5.1) * Fixed issues that caused templates to be incorrectly detected as out-of-sync and thus always changed on each puppet run. * Resources are now explicitly ordered to ensure behavior such as plugins being installed before instance start, users managed before templates changed, etc. #### Changes * Updated repository gpg fingerprint key to long form to silence module warnings. #### Testing changes ## 0.13.0 (August 1, 2016) ### Summary Rewritten elasticsearch::template using native type and provider. Fixed and added additional proxy parameters to elasticsearch::plugin instances. Exposed repo priority parameters for apt and yum repos. #### Features * In addition to better consistency, the `elasticsearch::template` type now also accepts various `api_*` parameters to control how access to the Elasticsearch API is configured (there are top-level parameters that are inherited and can be overwritten in `elasticsearch::api_*`). * The `elasticsearch::config` parameter now supports deep hiera merging. * Added the `elasticsearch::repo_priority` parameter to support apt and yum repository priority configuration. * Added `proxy_username` and `proxy_password` parameters to `elasticsearch::plugin`. #### Bugfixes * Content of templates should now properly trigger new API PUT requests when the index template stored in Elasticsearch differs from the template defined in puppet. * Installing plugins with proxy parameters now works correctly due to changed Java property flags. * The `elasticsearch::plugin::module_dir` parameter has been re-implemented to aid in working around plugins with non-standard plugin directories. #### Changes * The `file` parameter on the `elasticsearch::template` defined type has been deprecated to be consistent with usage of the `source` parameter for other types. #### Testing changes ## 0.12.0 (July 20, 2016) IMPORTANT! A bug was fixed that mistakenly added /var/lib to the list of DATA_DIR paths on Debian-based systems. This release removes that environment variable, which could potentially change path.data directories for instances of Elasticsearch. Take proper precautions when upgrading to avoid unexpected downtime or data loss (test module upgrades, et cetera). ### Summary Rewritten yaml generator, code cleanup, and various bugfixes. Configuration file yaml no longer nested. Service no longer restarts by default, and exposes more granular restart options. #### Features * The additional parameters restart_config_change, restart_package_change, and restart_plugin_change have been added for more granular control over service restarts. #### Bugfixes * Special yaml cases such as arrays of hashes and strings like "::" are properly supported. * Previous Debian SysV init scripts mistakenly set the `DATA_DIR` environment variable to a non-default value. * Some plugins failed installation due to capitalization munging, the elasticsearch_plugin provider no longer forces downcasing. #### Changes * The `install_options` parameter on the `elasticsearch::plugin` type has been removed. This was an undocumented parameter that often caused problems for users. * The `elasticsearch.service` systemd unit is no longer removed but masked by default, effectively hiding it from systemd but retaining the upstream vendor unit on disk for package management consistency. * `restart_on_change` now defaults to false to reduce unexpected cluster downtime (can be set to true if desired). * Package pinning is now contained within a separate class, so users can opt to manage package repositories manually and still use this module's pinning feature. * All configuration hashes are now flattened into dot-notated yaml in the elasticsearch configuration file. This should be fairly transparent in terms of behavior, though the config file formatting will change. #### Testing changes * The acceptance test suite has been dramatically slimmed to cut down on testing time and reduce false positives. ## 0.11.0 ( May 23, 2016 ) ### Summary Shield support, SLES support, and overhauled testing setup. #### Features * Support for shield * TLS Certificate management * Users (role and password management for file-based realms) * Roles (file-based with mapping support) * Support (repository proxies)[https://github.com/elastic/puppet-elasticsearch/pull/615] * Support for (SSL auth on API calls)[https://github.com/elastic/puppet-elasticsearch/pull/577] #### Bugfixes * (Fix Facter calls)[https://github.com/elastic/puppet-elasticsearch/pull/590] in custom providers #### Changes #### Testing changes * Overhaul testing methodology, see CONTRIBUTING for updates * Add SLES 12, Oracle 6, and PE 2016.1.1 to testing matrix * Enforce strict variable checking #### Known bugs * This is the first release with Shield support, some untested edge cases may exist ##0.10.3 ( Feb 08, 2016 ) ###Summary Adding support for OpenBSD and minor fixes ####Features * Add required changes to work with ES 2.2.x plugins * Support for custom log directory * Support for OpenBSD ####Bugfixes * Add correct relation to file resource and plugin installation * Notify service when upgrading the package ####Changes * Remove plugin dir when upgrading Elasticsearch ####Testing changes ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.10.2 ( Jan 19, 2016 ) ###Summary Bugfix release and adding Gentoo support ####Features * Added Gentoo support ####Bugfixes * Create init script when set to unmanaged * init_template variable was not passed on correctly to other classes / defines * Fix issue with plugin type that caused run to stall * Export ES_GC_LOG_FILE in init scripts ####Changes * Improve documentation about init_defaults * Update common files * Removed recurse option on data directory management * Add retry functionality to plugin type ####Testing changes ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.10.1 ( Dec 17, 2015 ) ###Summary Bugfix release for proxy functionality in plugin installation ####Features ####Bugfixes * Proxy settings were not passed on correctly ####Changes * Cleanup .pmtignore to exclude more files ####Testing changes ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.10.0 ( Dec 14, 2015 ) ###Summary Module now works with ES 2.x completely ####Features * Work with ES 2.x new plugin system and remain to work with 1.x * Implemented datacat module from Richard Clamp so other modules can hook into it for adding configuration options * Fixed init and systemd files to work with 1.x and 2.x * Made the module work with newer pl-apt module versions * Export es_include so it is passed on to ES * Ability to supply long gpg key for apt repo ####Bugfixes * Documentation and typographical fixes * Do not force puppet:/// schema resource * Use package resource defaults rather than setting provider and source ####Changes ####Testing changes * Improve unit testing and shorten the runtime ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.9.9 ( Sep 01, 2015 ) ###Summary Bugfix release and extra features ####Features * Work with ES 2.x * Add Java 8 detection in debian init script * Improve offline plugin installation ####Bugfixes * Fix a bug with new ruby versions but older puppet versions causing type error * Fix config tempate to use correct ruby scoping * Fix regex retrieving proxy port while downloading plugin * Fix systemd template for better variable handling * Template define was using wrong pathing for removal ####Changes ####Testing changes ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.9.8 ( Jul 07, 2015 ) ###Summary ####Features * Work with ES 2.x ####Bugfixes * Fix plugin to maintain backwards compatibility ####Changes ####Testing changes * ensure testing works with Puppet 4.x ( Rspec and Acceptance ) ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.9.7 ( Jun 24, 2015 ) ###Summary This releases adds several important features and fixes an important plugin installation issue with ES 1.6 and higher. ####Features * Automate plugin dir extraction * use init service provider for Amazon Linux * Add Puppetlabs/apt and ceritsc/yum as required modules * Added Timeout to fetching facts in case ES does not respond * Add proxy settings for package download ####Bugfixes * Fixed systemd template to fix issue with LimitMEMLOCK setting * Improve package version handling when specifying a version * Add tmpfiles.d file to manage sub dir in /var/run path * Fix plugin installations for ES 1.6 and higher ####Changes * Removed Modulefile, only maintaining metadata.json file ####Testing changes * Added unit testing for package pinning feature * Added integration testing with Elasticsearch to find issues earlier * Fix openSUSE 13 testing ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.9.6 ( May 28, 2015 ) ###Summary Bugfix release 0.9.6 ####Features * Implemented package version pinning to avoid accidental upgrading * Added support for Debian 8 * Added support for upgrading plugins * Managing LimitNOFILE and LimitMEMLOCK settings in systemd ####Bugfixes ####Changes * Dropped official support for PE 3.1.x and 3.2.x ####Testing changes * Several testing changes implemented to increase coverage ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.9.5( Apr 16, 2015 ) ###Summary Bugfix release 0.9.5 We reverted the change that implemented the full 40 character for the apt repo key. This caused issues with some older versions of the puppetlabs-apt module ####Features ####Bugfixes * Revert using the full 40 character for the apt repo key. ####Changes ####Testing changes ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.9.4( Apr 14, 2015 ) ###Summary Bugfix release 0.9.4 ####Features * Add the ability to create and populate scripts ####Bugfixes * add support for init_defaults_file to elasticsearch::instance * Update apt key to full 40characters ####Changes * Fix readme regarding module_dir with plugins ####Testing changes * Adding staged removal test * Convert git urls to https * Add centos7 node config ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.9.3( Mar 24, 2015 ) ###Summary Bugfix release 0.9.3 ####Features ####Bugfixes * Not setting repo_version did not give the correct error * Systemd file did not contain User/Group values ####Changes * Brand rename from Elasticsearch to Elastic ####Testing changes * Moved from multiple Gemfiles to single Gemfile ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.9.2( Mar 06, 2015 ) ###Summary Bugfix release 0.9.2 ####Features * Introducing es_instance_conn_validator resource to verify instance availability ####Bugfixes * Fix missing data path when using the path config setting but not setting the data path ####Changes None ####Testing changes None ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.9.1 ( Feb 23, 2015 ) ###Summary This is the first bug fix release for 0.9 version. A bug was reported with the recursive file management. ####Features None ####Bugfixes * Fix recursive file management * Set undefined variables to work with strict_variables ####Changes None ####Testing changes None ####Known bugs * Possible package conflicts when using ruby/python defines with main package name ##0.9.0 ( Feb 02, 2015 ) ###Summary This release is the first one towards 1.0 release. Our planning is to provide LTS releases with the puppet module ####Features * Support for using hiera to define instances and plugins. * Support for openSUSE 13.x * Custom facts about the installed Elasticsearch instance(s) * Proxy host/port support for the plugin installation * Ability to supply a custom logging.yml template ####Bugfixes * Ensure file owners are correct accross all related files * Fix of possible service name conflict * Empty main config would fail with instances * Removal of standard files from packages we dont use * Ensuring correct sequence of plugin and template defines * Added ES_CLASSPATH export to init scripts ####Changes * Java installation to use puppetlabs-java module * Added Support and testing for Puppet 3.7 and PE 3.7 * Improve metadata.json based on scoring from Forge ####Testing changes * Added testing against Puppet 3.7 and PE 3.7 * Using rspec3 * Using rspec-puppet-facts gem simplifies rspec testing ####Known Bugs * Possible package conflicts when using ruby/python defines with main package name ##0.4.0 ( Jun 18, 2014 ) - Backwards compatible breaking release ###Summary This release introduces instances to facilitate the option to have more then a single instance running on the host system. ####Features * Rewrite module to incorperate multi instance support * New readme layout ####Bugfixes * None ####Changes * Adding ec2-linux osfamily for repo management * Retry behaviour for plugin installation ####Testing changes * Adding Puppet 3.6.x testing * Ubuntu 14.04 testing * Using new docker images * Pin rspec to 2.14.x ####Known Bugs * No known bugs ##0.3.2 ( May 15, 2014 ) * Add support for SLC/Scientific Linux CERN ( PR #121 ) * Add support for custom package names ( PR #122 ) * Fix python and ruby client defines to avoid name clashes. * Add ability to use stage instead of anchor for repo class * Minor fixes to system tests ##0.3.1 ( April 22, 2014 ) * Ensure we create the plugin directory before installing plugins * Added Puppet 3.5.x to rspec and system tests ##0.3.0 ( April 2, 2014 ) * Fix minor issue with yumrepo in repo class ( PR #92 ) * Implement openSUSE support * Implement Junit reporting for tests * Adding more system tests and convert to Docker images * Use Augeas for managing the defaults file * Add retry to package download exec * Add management to manage the logging.yml file * Improve inline documentation * Improve support for Debian 6 * Improve augeas for values with spaces * Run plugin install as ES user ( PR #108 ) * Fix rights for the plugin directory * Pin Rake for Ruby 1.8.7 * Adding new metadata for Forge. * Increase time for retry to insert the template ##0.2.4 ( Feb 21, 2014 ) * Set puppetlabs-stdlib dependency version from 3.0.0 to 3.2.0 to be inline with other modules * Let puppet run fail when template insert fails * Documentation improvements ( PR #77, #78, #83 ) * Added beaker system tests * Fixed template define after failing system tests * Some fixes so variables are more inline with intended structure ##0.2.3 ( Feb 06, 2014 ) * Add repository management feature * Improve testing coverage and implement basic resource coverage reporting * Add puppet 3.4.x testing * Fix dependency in template define ( PR #72 ) * For apt repo change from key server to key file ##0.2.2 ( Jan 23, 2014 ) * Ensure exec names are unique. This caused issues when using our logstash module * Add spec tests for plugin define ##0.2.1 ( Jan 22, 2014 ) * Simplify the management of the defaults file ( PR #64 ) * Doc improvements for the plugin define ( PR #66 ) * Allow creation of data directory ( PR #68 ) * Fail early when package version and package_url are defined ##0.2.0 ( Nov 19, 2013 ) * Large rewrite of the entire module described below * Make the core more dynamic for different service providers and multi instance capable * Add better testing and devided into different files * Fix template function. Replace of template is now only done when the file is changed * Add different ways to install the package except from the repository ( puppet/http/https/ftp/file ) * Update java class to install openjdk 1.7 * Add tests for python function * Update config file template to fix scoping issue ( from PR #57 ) * Add validation of templates * Small changes for preperation for system tests * Update readme for new functionality * Added more test scenario's * Added puppet parser validate task for added checking * Ensure we don't add stuff when removing the module * Update python client define * Add ruby client define * Add tests for ruby clients and update python client tests ##0.1.3 ( Sep 06, 2013 ) * Exec path settings has been updated to fix warnings ( PR #37, #47 ) * Adding define to install python bindings ( PR #43 ) * Scope deprecation fixes ( PR #41 ) * feature to install plugins ( PR #40 ) ##0.1.2 ( Jun 21, 2013 ) * Update rake file to ignore the param inherit * Added missing documentation to the template define * Fix for template define to allow multiple templates ( PR #36 by Bruce Morrison ) ##0.1.1 ( Jun 14, 2013 ) * Add Oracle Linux to the OS list ( PR #25 by Stas Alekseev ) * Respect the restart_on_change on the defaults ( PR #29 by Simon Effenberg ) * Make sure the config can be empty as advertised in the readme * Remove dependency cycle when the defaults file is updated ( PR #31 by Bruce Morrison ) * Enable retry on the template insert in case ES isn't started yet ( PR #32 by Bruce Morrison ) * Update templates to avoid deprecation notice with Puppet 3.2.x * Update template define to avoid auto insert issue with ES * Update spec tests to reflect changes to template define ##0.1.0 ( May 09, 2013 ) * Populate .gitignore ( PR #19 by Igor Galić ) * Add ability to install initfile ( PR #20 by Justin Lambert ) * Add ability to manage default file service parameters ( PR #21 by Mathieu Bornoz ) * Providing complete containment of the module ( PR #24 by Brian Lalor ) * Add ability to specify package version ( PR #25 by Justin Lambert ) * Adding license file ##0.0.7 ( Mar 23, 2013 ) * Ensure config directory is created and managed ( PR #13 by Martin Seener ) * Dont backup package if it changes * Create explicit dependency on template directory ( PR #16 by Igor Galić ) * Make the config directory variable ( PR #17 by Igor Galić and PR #18 by Vincent Janelle ) * Fixing template define ##0.0.6 ( Mar 05, 2013 ) * Fixing issue with configuration not printing out arrays * New feature to write the config hash shorter * Updated readme to reflect the new feature * Adding spec tests for config file generation ##0.0.5 ( Mar 03, 2013 ) * Option to disable restart on config file change ( PR #10 by Chris Boulton ) ##0.0.4 ( Mar 02, 2013 ) * Fixed a major issue with the config template ( Issue #9 ) ##0.0.3 ( Mar 02, 2013 ) * Adding spec tests * Fixed init issue on Ubuntu ( Issue #6 by Marcus Furlong ) * Fixed config template problem ( Issue #8 by surfchris ) * New feature to manage templates ##0.0.2 ( Feb 16, 2013 ) * Feature to supply a package instead of being dependent on the repository * Feature to install java in case one doesn't manage it externally * Adding RedHat and Amazon as Operating systems * fixed a typo - its a shard not a shared :) ( PR #5 by Martin Seener ) ##0.0.1 ( Jan 13, 2013 ) * Initial release of the module diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 8ce533a..0000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,69 +0,0 @@ -# Contributing - -If you have a bugfix or new feature that you would like to contribute to this puppet module, please find or open an issue about it first. -Talk about what you would like to do - it may be that somebody is already working on it, or that there are particular issues that you should know about before implementing the change. - -**Note**: If you have support-oriented questions that aren't a bugfix or feature request, please post your questions on the [discussion forums](https://discuss.elastic.co/c/elasticsearch). - -We enjoy working with contributors to get their code accepted. -There are many approaches to fixing a problem and it is important to find the best approach before writing too much code. - -The process for contributing to any of the Elastic repositories is similar. - -## The Contributor License Agreement - -Please make sure you have signed the [Contributor License Agreement](http://www.elastic.co/contributor-agreement/). -We are not asking you to assign copyright to us, but to give us the right to distribute your code without restriction. -We ask this of all contributors in order to assure our users of the origin and continuing existence of the code. -You only need to sign the CLA once. - -## Development Setup - -There are a few testing prerequisites to meet: - -* Ruby. - As long as you have a recent version with `bundler` available, `bundler` will install development dependencies. - -You can then install the necessary gems with: - - bundle install - -This will install the requisite rubygems for testin. - -* Docker. - Note that Docker is used to run tests that require a Linux container/VM - if you only need to run simple rspec/doc tests, this shouldn't be necessary. - If you are developing on a Linux machine with a working Docker instance, this should be sufficient. - On OS X, just use the official [Docker installation method](https://docs.docker.com/engine/installation/mac/) to get a working `docker` setup. - Confirm that you can communicate with the Docker hypervisor with `docker version`. - -## Testing - -Running through the tests on your own machine can get ahead of any problems others (or Jenkins) may run into. - -First, run the intake tests and ensure it completes without errors with your changes. -These are lightweight tests that verify syntax, style, and all other tests that do not require a container to run. - - bundle exec rake intake - -Next, run the more thorough acceptance tests. -For example, to run the acceptance tests against CentOS 7, run the following: - - bundle exec rake beaker:centos-7-x64 - -The final output line will tell you which, if any, tests failed. -Note that you can find all other container acceptance tests with the `bundle exec rake -T` command. - -## Opening Pull Requests - -In summary, to open a new PR: - -* Sign the Contributor License Agreement -* Run the tests to confirm everything works as expected -* Rebase your changes. - Update your local repository with the most recent code from this puppet module repository, and rebase your branch on top of the latest master branch. -* Submit a pull request - Push your local changes to your forked copy of the repository and submit a pull request. - In the pull request, describe what your changes do and mention the number of the issue where discussion has taken place, eg "Closes #123". - -Then sit back and wait! -There will probably be discussion about the pull request and, if any changes are needed, we would love to work with you to get your pull request merged into this puppet module. diff --git a/Gemfile b/Gemfile index fc7ad61..6abd16b 100644 --- a/Gemfile +++ b/Gemfile @@ -1,54 +1,58 @@ source ENV['GEM_SOURCE'] || 'https://rubygems.org' group :test do - gem 'puppet', (ENV['PUPPET_VERSION'] || '~> 4.10'), :require => false + gem 'puppet', (ENV['PUPPET_VERSION'] || '~> 6.0'), :require => false gem 'metadata-json-lint' gem 'specinfra', '~> 2.60' gem 'xmlrpc' gem 'ci_reporter_rspec' - gem 'facter' + gem 'facter', "~> 2.4" gem 'pry' gem 'puppet-lint' gem 'puppet-strings' gem 'puppet-syntax' gem 'puppetlabs_spec_helper', '>= 2.7.0' gem 'rake' gem 'rspec', '~> 3.0' gem 'rspec-puppet', '~> 2.6' gem 'rspec-puppet-facts' gem 'rspec-puppet-utils' gem 'rspec-retry' # Required to test against Ruby 1.9 gem 'rubocop', '~> 0.41.2' gem 'rubysl-securerandom' gem 'webmock' # Extra Puppet-lint gems gem 'puppet-lint-appends-check', :git => 'https://github.com/voxpupuli/puppet-lint-appends-check', :ref => '07be8ce22d69353db055820b60bb77fe020238a6', :require => false gem 'puppet-lint-empty_string-check', :require => false gem 'puppet-lint-file_ensure-check', :require => false gem 'puppet-lint-leading_zero-check', :require => false gem 'puppet-lint-param-docs', :require => false gem 'puppet-lint-trailing_comma-check', :require => false gem 'puppet-lint-undef_in_function-check', :require => false gem 'puppet-lint-unquoted_string-check', :require => false gem 'puppet-lint-version_comparison-check', :require => false end group :development do gem 'puppet-blacksmith' end group :system_tests do gem 'bcrypt' - gem 'beaker', '~> 3.7' + gem 'beaker', '>= 4.2.0' gem 'beaker-rspec', '~> 6.0' + gem 'beaker-docker' + gem 'beaker-puppet' + gem 'beaker-puppet_install_helper' + gem 'simp-beaker-helpers' gem 'docker-api', '~> 1.0' gem 'infrataster' gem 'vault' end diff --git a/LICENSE b/LICENSE index bd2e60d..4dbac1b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,202 +1,175 @@ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the + result of this License or out of the use or inability to use thes Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2012-2017 Elasticsearch - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/README.md b/README.md index 10df50e..13bc974 100644 --- a/README.md +++ b/README.md @@ -1,1041 +1,845 @@ # Elasticsearch Puppet Module [![Puppet Forge endorsed](https://img.shields.io/puppetforge/e/elastic/elasticsearch.svg)](https://forge.puppetlabs.com/elastic/elasticsearch) [![Puppet Forge Version](https://img.shields.io/puppetforge/v/elastic/elasticsearch.svg)](https://forge.puppetlabs.com/elastic/elasticsearch) [![Puppet Forge Downloads](https://img.shields.io/puppetforge/dt/elastic/elasticsearch.svg)](https://forge.puppetlabs.com/elastic/elasticsearch) +[![Puppet Forge Score](https://img.shields.io/puppetforge/f/elastic/elasticsearch.svg)](https://forge.puppetlabs.com/elastic/elasticsearch) +[![Build Status](https://travis-ci.org/elastic/puppet-elasticsearch.png?branch=master)](https://travis-ci.org/elastic/puppet-elasticsearch) + #### Table of Contents 1. [Module description - What the module does and why it is useful](#module-description) 2. [Setup - The basics of getting started with Elasticsearch](#setup) * [The module manages the following](#the-module-manages-the-following) * [Requirements](#requirements) 3. [Usage - Configuration options and additional functionality](#usage) 4. [Advanced features - Extra information on advanced usage](#advanced-features) 5. [Reference - An under-the-hood peek at what the module is doing and how](#reference) 6. [Limitations - OS compatibility, etc.](#limitations) 7. [Development - Guide for contributing to the module](#development) 8. [Support - When you need help with this module](#support) ## Module description This module sets up [Elasticsearch](https://www.elastic.co/overview/elasticsearch/) instances with additional resource for plugins, templates, and more. This module is actively tested against Elasticsearch 2.x, 5.x, and 6.x. +# WARNING: The 7.x major release of this module contains breaking changes! + +In order to simplify the management of Elasticsearch moving forward, and add support for both Elasticsearch 6.x and 7.x, support for +running multiple instances of Elasticsearch has been removed. + +This module also does not currently handle the migration from the instance based configuration to the new single deployment model. +Therefore in-place upgrades from version 6.x of this module to 7.x, or migrations from multi-instance to single deployment is not currently supported. +We hope to add support for this in a future release. + +Therefore please ensure that you test this major release in your environment before using it in production! + ## Setup ### The module manages the following * Elasticsearch repository files. * Elasticsearch package. * Elasticsearch configuration file. * Elasticsearch service. * Elasticsearch plugins. * Elasticsearch snapshot repositories. * Elasticsearch templates. * Elasticsearch ingest pipelines. * Elasticsearch index settings. -* Elasticsearch Shield/X-Pack users, roles, and certificates. +* Elasticsearch users, roles, and certificates. * Elasticsearch licenses. * Elasticsearch keystores. ### Requirements * The [stdlib](https://forge.puppetlabs.com/puppetlabs/stdlib) Puppet library. * [richardc/datacat](https://forge.puppetlabs.com/richardc/datacat) * [Augeas](http://augeas.net/) -* [puppetlabs-java_ks](https://forge.puppetlabs.com/puppetlabs/java_ks) for Shield/X-Pack certificate management (optional). +* [puppetlabs-java_ks](https://forge.puppetlabs.com/puppetlabs/java_ks) for certificate management (optional). -In addition, remember that Elasticsearch requires Java to be installed. +Beginning with Elasticsearch 7.0.0, a Java JDK has been bundled as part of the elasticsearch package. +However there still needs to be a version of Java present on the system being managed in order for Puppet to be able to run various utilities. We recommend managing your Java installation with the [puppetlabs-java](https://forge.puppetlabs.com/puppetlabs/java) module. #### Repository management When using the repository management, the following module dependencies are required: * General: [Elastic/elastic_stack](https://forge.puppet.com/elastic/elastic_stack) * Debian/Ubuntu: [Puppetlabs/apt](https://forge.puppetlabs.com/puppetlabs/apt) * openSUSE/SLES: [puppet/zypprepo](https://forge.puppetlabs.com/puppet/zypprepo) ### Beginning with Elasticsearch Declare the top-level `elasticsearch` class (managing repositories) and set up an instance: ```puppet include ::java class { 'elasticsearch': } -elasticsearch::instance { 'es-01': } ``` -**Note**: Elasticsearch 6.x requires a recent version of the JVM. - ## Usage ### Main class Most top-level parameters in the `elasticsearch` class are set to reasonable defaults. The following are some parameters that may be useful to override: #### Install a specific version ```puppet class { 'elasticsearch': - version => '6.0.0' + version => '7.9.3' } ``` Note: This will only work when using the repository. #### Automatically restarting the service (default set to false) By default, the module will not restart Elasticsearch when the configuration file, package, or plugins change. This can be overridden globally with the following option: ```puppet class { 'elasticsearch': restart_on_change => true } ``` Or controlled with the more granular options: `restart_config_change`, `restart_package_change`, and `restart_plugin_change.` #### Automatic upgrades (default set to false) ```puppet class { 'elasticsearch': autoupgrade => true } ``` #### Removal/Decommissioning ```puppet class { 'elasticsearch': ensure => 'absent' } ``` #### Install everything but disable service(s) afterwards ```puppet class { 'elasticsearch': status => 'disabled' } ``` #### API Settings Some resources, such as `elasticsearch::template`, require communicating with the Elasticsearch REST API. By default, these API settings are set to: ```puppet class { 'elasticsearch': api_protocol => 'http', api_host => 'localhost', api_port => 9200, api_timeout => 10, api_basic_auth_username => undef, api_basic_auth_password => undef, api_ca_file => undef, api_ca_path => undef, validate_tls => true, } ``` Each of these can be set at the top-level `elasticsearch` class and inherited for each resource or overridden on a per-resource basis. #### Dynamically Created Resources This module supports managing all of its defined types through top-level parameters to better support Hiera and Puppet Enterprise. -For example, to manage an instance and index template directly from the `elasticsearch` class: +For example, to manage an index template directly from the `elasticsearch` class: ```puppet class { 'elasticsearch': - instances => { - 'es-01' => { - 'config' => { - 'network.host' => '0.0.0.0' - } - } - }, templates => { 'logstash' => { 'content' => { 'template' => 'logstash-*', 'settings' => { 'number_of_replicas' => 0 } } } } } ``` - -### Instances - -This module works with the concept of instances. For service to start you need to specify at least one instance. - -#### Quick setup - -```puppet -elasticsearch::instance { 'es-01': } -``` - -This will set up its own data directory and set the node name to `$hostname-$instance_name` - -#### Advanced options - -Instance specific options can be given: - -```puppet -elasticsearch::instance { 'es-01': - config => { }, # Configuration hash - init_defaults => { }, # Init defaults hash - datadir => [ ], # Data directory -} -``` - -See [Advanced features](#advanced-features) for more information. - ### Plugins This module can help manage [a variety of plugins](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-plugins.html#known-plugins). Note that `module_dir` is where the plugin will install itself to and must match that published by the plugin author; it is not where you would like to install it yourself. #### From an official repository ```puppet -elasticsearch::plugin { 'x-pack': - instances => 'instance_name' -} +elasticsearch::plugin { 'x-pack': } ``` #### From a custom url ```puppet elasticsearch::plugin { 'jetty': - url => 'https://oss-es-plugins.s3.amazonaws.com/elasticsearch-jetty/elasticsearch-jetty-1.2.1.zip', - instances => 'instance_name' + url => 'https://oss-es-plugins.s3.amazonaws.com/elasticsearch-jetty/elasticsearch-jetty-1.2.1.zip' } ``` #### Using a proxy You can also use a proxy if required by setting the `proxy_host` and `proxy_port` options: ```puppet elasticsearch::plugin { 'lmenezes/elasticsearch-kopf', - instances => 'instance_name', proxy_host => 'proxy.host.com', proxy_port => 3128 } ``` Proxies that require usernames and passwords are similarly supported with the `proxy_username` and `proxy_password` parameters. Plugin name formats that are supported include: * `elasticsearch/plugin/version` (for official elasticsearch plugins downloaded from download.elastic.co) * `groupId/artifactId/version` (for community plugins downloaded from maven central or OSS Sonatype) * `username/repository` (for site plugins downloaded from github master) #### Upgrading plugins When you specify a certain plugin version, you can upgrade that plugin by specifying the new version. ```puppet elasticsearch::plugin { 'elasticsearch/elasticsearch-cloud-aws/2.1.1': } ``` And to upgrade, you would simply change it to ```puppet elasticsearch::plugin { 'elasticsearch/elasticsearch-cloud-aws/2.4.1': } ``` Please note that this does not work when you specify 'latest' as a version number. -#### ES 2.x, 5.x, and 6.x official plugins +#### ES 6.x and 7.x official plugins For the Elasticsearch commercial plugins you can refer them to the simple name. See [Plugin installation](https://www.elastic.co/guide/en/elasticsearch/plugins/current/installation.html) for more details. ### Scripts Installs [scripts](http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html) to be used by Elasticsearch. These scripts are shared across all defined instances on the same host. ```puppet elasticsearch::script { 'myscript': ensure => 'present', source => 'puppet:///path/to/my/script.groovy' } ``` Script directories can also be recursively managed for large collections of scripts: ```puppet elasticsearch::script { 'myscripts_dir': ensure => 'directory, source => 'puppet:///path/to/myscripts_dir' recurse => 'remote', } ``` ### Templates By default templates use the top-level `elasticsearch::api_*` settings to communicate with Elasticsearch. The following is an example of how to override these settings: ```puppet elasticsearch::template { 'templatename': api_protocol => 'https', api_host => $::ipaddress, api_port => 9201, api_timeout => 60, api_basic_auth_username => 'admin', api_basic_auth_password => 'adminpassword', api_ca_file => '/etc/ssl/certs', api_ca_path => '/etc/pki/certs', validate_tls => false, source => 'puppet:///path/to/template.json', } ``` #### Add a new template using a file This will install and/or replace the template in Elasticsearch: ```puppet elasticsearch::template { 'templatename': source => 'puppet:///path/to/template.json', } ``` #### Add a new template using content This will install and/or replace the template in Elasticsearch: ```puppet elasticsearch::template { 'templatename': content => { 'template' => "*", 'settings' => { 'number_of_replicas' => 0 } } } ``` Plain JSON strings are also supported. ```puppet elasticsearch::template { 'templatename': content => '{"template":"*","settings":{"number_of_replicas":0}}' } ``` #### Delete a template ```puppet elasticsearch::template { 'templatename': ensure => 'absent' } ``` ### Ingestion Pipelines Pipelines behave similar to templates in that their contents can be controlled over the Elasticsearch REST API with a custom Puppet resource. API parameters follow the same rules as templates (those settings can either be controlled at the top-level in the `elasticsearch` class or set per-resource). #### Adding a new pipeline This will install and/or replace an ingestion pipeline in Elasticsearch (ingestion settings are compared against the present configuration): ```puppet elasticsearch::pipeline { 'addfoo': content => { 'description' => 'Add the foo field', 'processors' => [{ 'set' => { 'field' => 'foo', 'value' => 'bar' } }] } } ``` #### Delete a pipeline ```puppet elasticsearch::pipeline { 'addfoo': ensure => 'absent' } ``` ### Index Settings This module includes basic support for ensuring an index is present or absent with optional index settings. API access settings follow the pattern previously mentioned for templates. #### Creating an index At the time of this writing, only index settings are supported. Note that some settings (such as `number_of_shards`) can only be set at index creation time. ```puppet elasticsearch::index { 'foo': settings => { 'index' => { 'number_of_replicas' => 0 } } } ``` #### Delete an index ```puppet elasticsearch::index { 'foo': ensure => 'absent' } ``` ### Snapshot Repositories By default snapshot_repositories use the top-level `elasticsearch::api_*` settings to communicate with Elasticsearch. The following is an example of how to override these settings: ```puppet elasticsearch::snapshot_repository { 'backups': api_protocol => 'https', api_host => $::ipaddress, api_port => 9201, api_timeout => 60, api_basic_auth_username => 'admin', api_basic_auth_password => 'adminpassword', api_ca_file => '/etc/ssl/certs', api_ca_path => '/etc/pki/certs', validate_tls => false, location => '/backups', } ``` #### Delete a snapshot repository ```puppet elasticsearch::snapshot_repository { 'backups': ensure => 'absent', location => '/backup' } ``` ### Connection Validator This module offers a way to make sure an instance has been started and is up and running before doing a next action. This is done via the use of the `es_instance_conn_validator` resource. ```puppet es_instance_conn_validator { 'myinstance' : server => 'es.example.com', port => '9200', } ``` A common use would be for example : ```puppet class { 'kibana4' : require => Es_Instance_Conn_Validator['myinstance'], } ``` ### Package installation There are two different ways of installing Elasticsearch: #### Repository ##### Choosing an Elasticsearch major version This module uses the `elastic/elastic_stack` module to manage package repositories. Because there is a separate repository for each major version of the Elastic stack, selecting which version to configure is necessary to change the default repository value, like this: ```puppet class { 'elastic_stack::repo': - version => 5, + version => 6, } class { 'elasticsearch': - version => '5.6.4', + version => '6.8.12', } ``` This module defaults to the upstream package repositories, which as of Elasticsearch 6.3, includes X-Pack. In order to use the purely OSS (open source) package and repository, the appropriate `oss` flag must be set on the `elastic_stack::repo` and `elasticsearch` classes: ```puppet class { 'elastic_stack::repo': oss => true, } class { 'elasticsearch': oss => true, } ``` ##### Manual repository management You may want to manage repositories manually. You can disable automatic repository management like this: ```puppet class { 'elasticsearch': manage_repo => false, } ``` #### Remote package source When a repository is not available or preferred you can install the packages from a remote source: ##### http/https/ftp ```puppet class { 'elasticsearch': package_url => 'https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.4.2.deb', proxy_url => 'http://proxy.example.com:8080/', } ``` Setting `proxy_url` to a location will enable download using the provided proxy server. This parameter is also used by `elasticsearch::plugin`. Setting the port in the `proxy_url` is mandatory. `proxy_url` defaults to `undef` (proxy disabled). ##### puppet:// ```puppet class { 'elasticsearch': package_url => 'puppet:///path/to/elasticsearch-1.4.2.deb' } ``` ##### Local file ```puppet class { 'elasticsearch': package_url => 'file:/path/to/elasticsearch-1.4.2.deb' } ``` ### JVM Configuration -When configuring Elasticsearch's memory usage, you can do so by either changing init defaults for Elasticsearch 1.x/2.x (see the [following example](#hash-representation)), or modify it globally in 5.x using `jvm.options`: +When configuring Elasticsearch's memory usage, you can modify it by setting `jvm_options`: ```puppet class { 'elasticsearch': jvm_options => [ '-Xms4g', '-Xmx4g' ] } ``` -`jvm.options` can also be controlled per-instance: - -```puppet -elasticsearch::instance { 'es-01': - jvm_options => [ - '-Xms4g', - '-Xmx4g' - ] -} -``` - ### Service management Currently only the basic SysV-style [init](https://en.wikipedia.org/wiki/Init) and [Systemd](http://en.wikipedia.org/wiki/Systemd) service providers are supported, but other systems could be implemented as necessary (pull requests welcome). #### Defaults File The *defaults* file (`/etc/defaults/elasticsearch` or `/etc/sysconfig/elasticsearch`) for the Elasticsearch service can be populated as necessary. This can either be a static file resource or a simple key value-style [hash](http://docs.puppetlabs.com/puppet/latest/reference/lang_datatypes.html#hashes) object, the latter being particularly well-suited to pulling out of a data source such as Hiera. ##### File source ```puppet class { 'elasticsearch': init_defaults_file => 'puppet:///path/to/defaults' } ``` ##### Hash representation ```puppet $config_hash = { 'ES_HEAP_SIZE' => '30g', } class { 'elasticsearch': init_defaults => $config_hash } ``` Note: `init_defaults` hash can be passed to the main class and to the instance. ## Advanced features -### X-Pack/Shield +### Security -[X-Pack](https://www.elastic.co/products/x-pack) and [Shield](https://www.elastic.co/products/shield) file-based users, roles, and certificates can be managed by this module. +File-based users, roles, and certificates can be managed by this module. **Note**: If you are planning to use these features, it is *highly recommended* you read the following documentation to understand the caveats and extent of the resources available to you. -#### Getting Started - -Although this module can handle several types of Shield/X-Pack resources, you are expected to manage the plugin installation and versions for your deployment. -For example, the following manifest will install Elasticseach with a single instance running X-Pack: - -```puppet -class { 'elasticsearch': - security_plugin => 'x-pack', -} - -elasticsearch::instance { 'es-01': } -elasticsearch::plugin { 'x-pack': instances => 'es-01' } -``` - -The following manifest will do the same, but with Shield: - -```puppet -class { 'elasticsearch': - security_plugin => 'shield', -} - -elasticsearch::instance { 'es-01': } - -Elasticsearch::Plugin { instances => ['es-01'], } -elasticsearch::plugin { 'license': } -elasticsearch::plugin { 'shield': } -``` - -The following examples will assume the preceding resources are part of your puppet manifest. - #### Roles -Roles in the file realm (the `esusers` realm in Shield) can be managed using the `elasticsearch::role` type. -For example, to create a role called `myrole`, you could use the following resource in X-Pack: +Roles in the file realm can be managed using the `elasticsearch::role` type. +For example, to create a role called `myrole`, you could use the following resource: ```puppet elasticsearch::role { 'myrole': privileges => { 'cluster' => [ 'monitor' ], 'indices' => [{ 'names' => [ '*' ], 'privileges' => [ 'read' ], }] } } ``` -And in Shield: - -```puppet -elasticsearch::role { 'myrole': - privileges => { - 'cluster' => 'monitor', - 'indices' => { - '*' => 'read' - } - } -} -``` - This role would grant users access to cluster monitoring and read access to all indices. -See the [Shield](https://www.elastic.co/guide/en/shield/index.html) or [X-Pack](https://www.elastic.co/guide/en/x-pack/current/xpack-security.html) documentation for your version to determine what `privileges` to use and how to format them (the Puppet hash representation will simply be translated into yaml.) +See the [Security](https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-security.html) documentation for your version to determine what `privileges` to use and how to format them (the Puppet hash representation will simply be translated into yaml.) -**Note**: The Puppet provider for `esusers`/`users` has fine-grained control over the `roles.yml` file and thus will leave the default roles Shield installs in-place. +**Note**: The Puppet provider for `elasticsearch_user` has fine-grained control over the `roles.yml` file and thus will leave the default roles in-place. If you would like to explicitly purge the default roles (leaving only roles managed by puppet), you can do so by including the following in your manifest: ```puppet resources { 'elasticsearch_role': purge => true, } ``` ##### Mappings Associating mappings with a role for file-based management is done by passing an array of strings to the `mappings` parameter of the `elasticsearch::role` type. For example, to define a role with mappings: ```puppet elasticsearch::role { 'logstash': mappings => [ 'cn=group,ou=devteam', ], privileges => { 'cluster' => 'manage_index_templates', 'indices' => [{ 'names' => ['logstash-*'], 'privileges' => [ 'write', 'delete', 'create_index', ], }], }, } ``` -**Note**: Observe the brackets around `indices` in the preceding role definition; which is an array of hashes per the format in Shield 2.3.x. Follow the documentation to determine the correct formatting for your version of Shield or X-Pack. - If you'd like to keep the mappings file purged of entries not under Puppet's control, you should use the following `resources` declaration because mappings are a separate low-level type: ```puppet resources { 'elasticsearch_role_mapping': purge => true, } ``` #### Users Users can be managed using the `elasticsearch::user` type. For example, to create a user `mysuser` with membership in `myrole`: ```puppet elasticsearch::user { 'myuser': password => 'mypassword', roles => ['myrole'], } ``` The `password` parameter will also accept password hashes generated from the `esusers`/`users` utility and ensure the password is kept in-sync with the Shield `users` file for all Elasticsearch instances. ```puppet elasticsearch::user { 'myuser': password => '$2a$10$IZMnq6DF4DtQ9c4sVovgDubCbdeH62XncmcyD1sZ4WClzFuAdqspy', roles => ['myrole'], } ``` -**Note**: When using the `esusers`/`users` provider (the default for plaintext passwords), Puppet has no way to determine whether the given password is in-sync with the password hashed by Shield/X-Pack. +**Note**: When using the `esusers`/`users` provider (the default for plaintext passwords), Puppet has no way to determine whether the given password is in-sync with the password hashed by Elasticsearch. In order to work around this, the `elasticsearch::user` resource has been designed to accept refresh events in order to update password values. This is not ideal, but allows you to instruct the resource to change the password when needed. For example, to update the aforementioned user's password, you could include the following your manifest: ```puppet notify { 'update password': } ~> elasticsearch::user { 'myuser': password => 'mynewpassword', roles => ['myrole'], } ``` #### Certificates -SSL/TLS can be enabled by providing an `elasticsearch::instance` type with paths to the certificate and private key files, and a password for the keystore. +SSL/TLS can be enabled by providing the appropriate class params with paths to the certificate and private key files, and a password for the keystore. ```puppet -elasticsearch::instance { 'es-01': +class { 'elasticsearch' : ssl => true, ca_certificate => '/path/to/ca.pem', certificate => '/path/to/cert.pem', private_key => '/path/to/key.pem', keystore_password => 'keystorepassword', } ``` -**Note**: Setting up a proper CA and certificate infrastructure is outside the scope of this documentation, see the aforementioned Shield or X-Pack guide for more information regarding the generation of these certificate files. +**Note**: Setting up a proper CA and certificate infrastructure is outside the scope of this documentation, see the aforementioned security guide for more information regarding the generation of these certificate files. The module will set up a keystore file for the node to use and set the relevant options in `elasticsearch.yml` to enable TLS/SSL using the certificates and key provided. #### System Keys -Shield/X-Pack system keys can be passed to the module, where they will be placed into individual instance configuration directories. +System keys can be passed to the module, where they will be placed into individual instance configuration directories. This can be set at the `elasticsearch` class and inherited across all instances: ```puppet class { 'elasticsearch': system_key => 'puppet:///path/to/key', } ``` -Or set on a per-instance basis: - -```puppet -elasticsearch::instance { 'es-01': - system_key => '/local/path/to/key', -} -``` - ### Licensing -If you use the aforementioned Shield/X-Pack plugins, you may need to install a user license to leverage particular features outside of a trial license. +If you use the aforementioned security features, you may need to install a user license to leverage particular features outside of a trial license. This module can handle installation of licenses without the need to write custom `exec` or `curl` code to install license data. You may instruct the module to install a license through the `elasticsearch::license` parameter: ```puppet class { 'elasticsearch': license => $license, - security_plugin => 'x-pack', } ``` The `license` parameter will accept either a Puppet hash representation of the license file json or a plain json string that will be parsed into a native Puppet hash. -Although dependencies are automatically created to ensure that any `elasticsearch::instance` resources are listening and ready before API calls are made, you may need to set the appropriate `api_*` parameters to ensure that the module can interact with the Elasticsearch API over the appropriate port, protocol, and with sufficient user rights to install the license. +Although dependencies are automatically created to ensure that the Elasticsearch service is listening and ready before API calls are made, you may need to set the appropriate `api_*` parameters to ensure that the module can interact with the Elasticsearch API over the appropriate port, protocol, and with sufficient user rights to install the license. The native provider for licenses will _not_ print license signatures as part of Puppet's changelog to ensure that sensitive values are not included in console output or Puppet reports. Any fields present in the `license` parameter that differ from the license installed in a cluster will trigger a flush of the resource and new `POST` to the Elasticsearch API with the license content, though the sensitive `signature` field is not compared as it is not returned from the Elasticsearch licensing APIs. ### Data directories There are several different ways of setting data directories for Elasticsearch. In every case the required configuration options are placed in the `elasticsearch.yml` file. #### Default By default we use: - /usr/share/elasticsearch/data/$instance_name + /var/lib/elasticsearch -Which provides a data directory per instance. +Which mirrors the upstream defaults. #### Single global data directory +It is possible to override the default data directory by specifying the `datadir` param: + ```puppet class { 'elasticsearch': datadir => '/var/lib/elasticsearch-data' } ``` -Creates the following for each instance: - - /var/lib/elasticsearch-data/$instance_name - #### Multiple Global data directories -```puppet -class { 'elasticsearch': - datadir => [ '/var/lib/es-data1', '/var/lib/es-data2'] -} -``` -Creates the following for each instance: -`/var/lib/es-data1/$instance_name` -and -`/var/lib/es-data2/$instance_name`. - -#### Single instance data directory - -```puppet -class { 'elasticsearch': } - -elasticsearch::instance { 'es-01': - datadir => '/var/lib/es-data-es01' -} -``` - -Creates the following for this instance: - - /var/lib/es-data-es01 - -#### Multiple instance data directories - -```puppet -class { 'elasticsearch': } - -elasticsearch::instance { 'es-01': - datadir => ['/var/lib/es-data1-es01', '/var/lib/es-data2-es01'] -} -``` - -Creates the following for this instance: -`/var/lib/es-data1-es01` -and -`/var/lib/es-data2-es01`. - -#### Shared global data directories - -In some cases, you may want to share a top-level data directory among multiple instances. +It's also possible to specify multiple data directories using the `datadir` param: ```puppet class { 'elasticsearch': - datadir_instance_directories => false, - config => { - 'node.max_local_storage_nodes' => 2 - } + datadir => [ '/var/lib/es-data1', '/var/lib/es-data2'] } - -elasticsearch::instance { 'es-01': } -elasticsearch::instance { 'es-02': } ``` -Will result in the following directories created by Elasticsearch at runtime: - - /var/lib/elasticsearch/nodes/0 - /var/lib/elasticsearch/nodes/1 - See [the Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#max-local-storage-nodes) for additional information regarding this configuration. -### Main and instance configurations - -The `config` option in both the main class and the instances can be configured to work together. +### Elasticsearch configuration -The options in the `instance` config hash will merged with the ones from the main class and override any duplicates. - -#### Simple merging - -```puppet -class { 'elasticsearch': - config => { 'cluster.name' => 'clustername' } -} - -elasticsearch::instance { 'es-01': - config => { 'node.name' => 'nodename' } -} -elasticsearch::instance { 'es-02': - config => { 'node.name' => 'nodename2' } -} -``` - -This example merges the `cluster.name` together with the `node.name` option. - -#### Overriding - -When duplicate options are provided, the option in the instance config overrides the ones from the main class. - -```puppet -class { 'elasticsearch': - config => { 'cluster.name' => 'clustername' } -} - -elasticsearch::instance { 'es-01': - config => { 'node.name' => 'nodename', 'cluster.name' => 'otherclustername' } -} - -elasticsearch::instance { 'es-02': - config => { 'node.name' => 'nodename2' } -} -``` - -This will set the cluster name to `otherclustername` for the instance `es-01` but will keep it to `clustername` for instance `es-02` +The `config` option can be used to provide additional configuration options to Elasticsearch. #### Configuration writeup The `config` hash can be written in 2 different ways: ##### Full hash writeup Instead of writing the full hash representation: ```puppet class { 'elasticsearch': config => { 'cluster' => { 'name' => 'ClusterName', 'routing' => { 'allocation' => { 'awareness' => { 'attributes' => 'rack' } } } } } } ``` ##### Short hash writeup ```puppet class { 'elasticsearch': config => { 'cluster' => { 'name' => 'ClusterName', 'routing.allocation.awareness.attributes' => 'rack' } } } ``` #### Keystore Settings Recent versions of Elasticsearch include the [elasticsearch-keystore](https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html) utility to create and manage the `elasticsearch.keystore` file which can store sensitive values for certain settings. The settings and values for this file can be controlled by this module. Settings follow the behavior of the `config` parameter for the top-level Elasticsearch class and `elasticsearch::instance` defined types. That is, you may define keystore settings globally, and all values will be merged with instance-specific settings for final inclusion in the `elasticsearch.keystore` file. Note that each hash key is passed to the `elasticsearch-keystore` utility in a straightforward manner, so you should specify the hash passed to `secrets` in flattened form (that is, without full nested hash representation). For example, to define cloud plugin credentials for all instances: ```puppet class { 'elasticsearch': secrets => { 'cloud.aws.access_key' => 'AKIA....', 'cloud.aws.secret_key' => 'AKIA....', } } ``` -Or, to instead control these settings for a single instance: - -```puppet -elasticsearch::instance { 'es-01': - secrets => { - 'cloud.aws.access_key' => 'AKIA....', - 'cloud.aws.secret_key' => 'AKIA....', - } -} -``` - ##### Purging Secrets By default, if a secret setting exists on-disk that is not present in the `secrets` hash, this module will leave it intact. If you prefer to keep only secrets in the keystore that are specified in the `secrets` hash, use the `purge_secrets` boolean parameter either on the `elasticsearch` class to set it globally or per-instance. ##### Notifying Services Any changes to keystore secrets will notify running elasticsearch services by respecting the `restart_on_change` and `restart_config_change` parameters. ## Reference Class parameters are available in [the auto-generated documentation pages](https://elastic.github.io/puppet-elasticsearch/puppet_classes/elasticsearch.html). Autogenerated documentation for types, providers, and ruby helpers is also available on the same documentation site. ## Limitations This module is built upon and tested against the versions of Puppet listed in the metadata.json file (i.e. the listed compatible versions on the Puppet Forge). The module has been tested on: -* Debian 7/8 -* CentOS 6/7 -* OracleLinux 6/7 -* Ubuntu 14.04, 16.04 -* openSUSE 42.x +* Amazon Linux 1/2 +* Debian 8/9/10 +* CentOS 7/8 +* OracleLinux 7/8 +* Ubuntu 16.04, 18.04, 20.04 * SLES 12 -Other distro's that have been reported to work: - -* RHEL 6 -* Scientific 6 - Testing on other platforms has been light and cannot be guaranteed. ## Development Please see the [CONTRIBUTING.md](CONTRIBUTING.md) file for instructions regarding development environments and testing. ## Support -Need help? Join us in [#elasticsearch](https://webchat.freenode.net?channels=%23elasticsearch) on Freenode IRC or on the [discussion forum](https://discuss.elastic.co/). +The Puppet Elasticsearch module is community supported and not officially supported by Elastic Support. + +For questions about the module, open a topic in the [Discuss](http://discuss.elastic.co/) forums or join us in [#elasticsearch](https://webchat.freenode.net/?channels=%23elasticsearch) on Freenode IRC. + +For bugs or feature requests, open an issue in [Github](https://github.com/elastic/puppet-elasticsearch/issues). diff --git a/Rakefile b/Rakefile index 797ee02..715ae18 100644 --- a/Rakefile +++ b/Rakefile @@ -1,217 +1,219 @@ require 'digest/sha1' require 'rubygems' require 'puppetlabs_spec_helper/rake_tasks' require 'puppet_blacksmith/rake_tasks' require 'net/http' require 'uri' require 'fileutils' require 'rspec/core/rake_task' require 'puppet-strings' require 'puppet-strings/tasks' require 'yaml' require 'json' require_relative 'spec/spec_utilities' ENV['VAULT_APPROLE_ROLE_ID'] ||= '48adc137-3270-fc4a-ae65-1306919d4bb0' oss_package = ENV['OSS_PACKAGE'] and ENV['OSS_PACKAGE'] == 'true' +elasticsearch_default_version = '7.10.1' + # Workaround for certain rspec/beaker versions module TempFixForRakeLastComment def last_comment last_description end end Rake::Application.send :include, TempFixForRakeLastComment exclude_paths = [ 'pkg/**/*', 'vendor/**/*', 'spec/**/*' ] require 'puppet-lint/tasks/puppet-lint' require 'puppet-syntax/tasks/puppet-syntax' PuppetSyntax.exclude_paths = exclude_paths PuppetSyntax.future_parser = true if ENV['FUTURE_PARSER'] == 'true' %w[ 80chars class_inherits_from_params_class class_parameter_defaults single_quote_string_with_variable ].each do |check| PuppetLint.configuration.send("disable_#{check}") end PuppetLint.configuration.ignore_paths = exclude_paths PuppetLint.configuration.log_format = \ '%{path}:%{line}:%{check}:%{KIND}:%{message}' # Append custom cleanup tasks to :clean task :clean => [ :'artifact:clean', :spec_clean ] desc 'remove outdated module fixtures' task :spec_prune do mods = 'spec/fixtures/modules' fixtures = YAML.load_file '.fixtures.yml' fixtures['fixtures']['forge_modules'].each do |mod, params| next unless params.is_a? Hash \ and params.key? 'ref' \ and File.exist? "#{mods}/#{mod}" metadata = JSON.parse(File.read("#{mods}/#{mod}/metadata.json")) FileUtils.rm_rf "#{mods}/#{mod}" unless metadata['version'] == params['ref'] end end task :spec_prep => [:spec_prune] RSpec::Core::RakeTask.new(:spec_verbose) do |t| t.pattern = 'spec/{classes,defines,unit,functions,templates}/**/*_spec.rb' t.rspec_opts = [ '--format documentation', '--require "ci/reporter/rspec"', '--format CI::Reporter::RSpecFormatter', '--color' ] end task :spec_verbose => :spec_prep RSpec::Core::RakeTask.new(:spec_puppet) do |t| t.pattern = 'spec/{classes,defines,functions,templates,unit/facter}/**/*_spec.rb' t.rspec_opts = ['--color'] end task :spec_puppet => :spec_prep RSpec::Core::RakeTask.new(:spec_unit) do |t| t.pattern = 'spec/unit/{type,provider}/**/*_spec.rb' t.rspec_opts = ['--color'] end task :spec_unit => :spec_prep task :beaker => [:spec_prep] desc 'Run all linting/unit tests.' task :intake => [ :syntax, :rubocop, :lint, :validate, :spec_unit, :spec_puppet ] # Plumbing for snapshot tests desc 'Run the snapshot tests' RSpec::Core::RakeTask.new('beaker:snapshot', [:filter]) do |task, args| task.rspec_opts = ['--color'] task.pattern = 'spec/acceptance/tests/acceptance_spec.rb' task.rspec_opts = [] task.rspec_opts << '--format documentation' if ENV['CI'].nil? task.rspec_opts << "--example '#{args[:filter]}'" if args[:filter] ENV['SNAPSHOT_TEST'] = 'true' if Rake::Task.task_defined? 'artifact:snapshot:not_found' puts 'No snapshot artifacts found, skipping snapshot tests.' exit(0) end end beaker_node_sets.each do |node| desc "Run the snapshot tests against the #{node} nodeset" task "beaker:#{node}:snapshot", [:filter] => %w[ spec_prep artifact:snapshot:deb artifact:snapshot:rpm ] do |_task, args| ENV['BEAKER_set'] = node Rake::Task['beaker:snapshot'].reenable Rake::Task['beaker:snapshot'].invoke args[:filter] end desc "Run acceptance tests against #{node}" RSpec::Core::RakeTask.new( "beaker:#{node}:acceptance", [:version, :filter] => [:spec_prep] ) do |task, args| ENV['BEAKER_set'] = node - args.with_defaults(:version => '6.2.3', :filter => nil) + args.with_defaults(:version => elasticsearch_default_version, :filter => nil) task.pattern = 'spec/acceptance/tests/acceptance_spec.rb' task.rspec_opts = [] - task.rspec_opts << '--format documentation' if ENV['CI'].nil? + task.rspec_opts << '--format documentation' task.rspec_opts << "--example '#{args[:filter]}'" if args[:filter] ENV['ELASTICSEARCH_VERSION'] ||= args[:version] Rake::Task['artifact:fetch'].invoke(ENV['ELASTICSEARCH_VERSION']) end end namespace :artifact do desc 'Fetch specific installation artifacts' task :fetch, [:version] do |_t, args| fetch_archives( derive_artifact_urls_for(args[:version]) ) end namespace :snapshot do snapshot_version = JSON.parse(http_retry('https://artifacts-api.elastic.co/v1/versions'))['versions'].reject do |version| version.include? 'alpha' end.last ENV['snapshot_version'] = snapshot_version downloads = JSON.parse(http_retry("https://artifacts-api.elastic.co/v1/search/#{snapshot_version}/elasticsearch"))['packages'].select do |pkg, _| pkg =~ /(?:deb|rpm)/ and (oss_package ? pkg =~ /oss/ : pkg !~ /oss/) end.map do |package, urls| [package.split('.').last, urls] end.to_h # We end up with something like: # { # 'rpm' => {'url' => 'https://...', 'sha_url' => 'https://...'}, # 'deb' => {'url' => 'https://...', 'sha_url' => 'https://...'} # } # Note that checksums are currently broken on the Elastic unified release # side; once they start working we can verify them. if downloads.empty? puts 'No snapshot release available; skipping snapshot download' %w[deb rpm].each { |ext| task ext } task 'not_found' else # Download snapshot files downloads.each_pair do |extension, urls| filename = artifact urls['url'] checksum = artifact urls['sha_url'] link = artifact "elasticsearch-snapshot.#{extension}" FileUtils.rm link if File.exist? link task extension => link file link => filename do unless File.exist?(link) and File.symlink?(link) \ and File.readlink(link) == filename File.delete link if File.exist? link File.symlink File.basename(filename), link end end # file filename => checksum do file filename do get urls['url'], filename end task checksum do File.delete checksum if File.exist? checksum get urls['sha_url'], checksum end end end end desc 'Purge fetched artifacts' task :clean do FileUtils.rm_rf(Dir.glob('spec/fixtures/artifacts/*')) end end diff --git a/data/common.yaml b/data/common.yaml index bb17a21..dec524e 100644 --- a/data/common.yaml +++ b/data/common.yaml @@ -1,67 +1,70 @@ --- elasticsearch::ensure: present elasticsearch::api_basic_auth_password: ~ elasticsearch::api_basic_auth_username: ~ elasticsearch::api_ca_file: ~ elasticsearch::api_ca_path: ~ elasticsearch::api_host: localhost elasticsearch::api_port: 9200 elasticsearch::api_protocol: http elasticsearch::api_timeout: 10 elasticsearch::autoupgrade: false elasticsearch::config: {} elasticsearch::configdir: /etc/elasticsearch elasticsearch::configdir_recurselimit: 2 elasticsearch::daily_rolling_date_pattern: | "'.'yyyy-MM-dd" -elasticsearch::datadir_instance_directories: true elasticsearch::default_logging_level: 'INFO' elasticsearch::defaults_location: ~ +elasticsearch::deprecation_logging: false +elasticsearch::deprecation_logging_level: 'DEBUG' elasticsearch::download_tool: ~ elasticsearch::download_tool_insecure: ~ elasticsearch::download_tool_verify_certificates: true elasticsearch::file_rolling_type: dailyRollingFile elasticsearch::indices: {} elasticsearch::init_defaults: {} elasticsearch::init_defaults_file: ~ elasticsearch::init_template: "%{module_name}/etc/init.d/elasticsearch.systemd.erb" elasticsearch::instances: {} elasticsearch::jvm_options: [] elasticsearch::license: ~ elasticsearch::logdir: /var/log/elasticsearch elasticsearch::logging_config: {} elasticsearch::logging_file: ~ +elasticsearch::logging_level: 'INFO' elasticsearch::logging_template: ~ elasticsearch::manage_repo: true elasticsearch::oss: false elasticsearch::package_dl_timeout: 600 elasticsearch::package_name: elasticsearch elasticsearch::package_provider: package elasticsearch::package_url: ~ elasticsearch::pid_dir: /var/run/elasticsearch elasticsearch::pipelines: {} elasticsearch::plugindir: ~ elasticsearch::plugins: {} elasticsearch::proxy_url: ~ elasticsearch::purge_configdir: false elasticsearch::purge_package_dir: false elasticsearch::purge_secrets: false elasticsearch::repo_stage: false elasticsearch::restart_on_change: false elasticsearch::roles: {} elasticsearch::rolling_file_max_backup_index: 1 elasticsearch::rolling_file_max_file_size: 10MB elasticsearch::scripts: {} elasticsearch::secrets: ~ elasticsearch::security_logging_content: ~ elasticsearch::security_logging_source: ~ -elasticsearch::security_plugin: ~ +elasticsearch::service_name: elasticsearch elasticsearch::service_provider: systemd elasticsearch::snapshot_repositories: {} +elasticsearch::ssl: false elasticsearch::status: enabled elasticsearch::system_key: ~ elasticsearch::systemd_service_path: /lib/systemd/system elasticsearch::templates: {} elasticsearch::users: {} elasticsearch::validate_tls: true elasticsearch::version: false diff --git a/lib/facter/es_facts.rb b/lib/facter/es_facts.rb index 0114751..eb587f4 100644 --- a/lib/facter/es_facts.rb +++ b/lib/facter/es_facts.rb @@ -1,147 +1,137 @@ require 'net/http' require 'json' require 'yaml' # Helper module to encapsulate custom fact injection -# rubocop:disable Metrics/ModuleLength module EsFacts # Add a fact to the catalog of host facts def self.add_fact(prefix, key, value) key = "#{prefix}_#{key}".to_sym ::Facter.add(key) do setcode { value } end end def self.ssl?(config) tls_keys = [ - 'xpack.security.http.ssl.enabled', - 'shield.http.ssl', - 'searchguard.ssl.http.enabled' + 'xpack.security.http.ssl.enabled' ] tls_keys.any? { |key| (config.key? key) && (config[key] == true) } end # Helper to determine the instance http.port number def self.get_httpport(config) enabled = 'http.enabled' httpport = 'http.port' - if !config[enabled].nil? && config[enabled] == 'false' - false - elsif !config[httpport].nil? - { config[httpport] => ssl?(config) } - else - { '9200' => ssl?(config) } - end + return false, false if !config[enabled].nil? && config[enabled] == 'false' + return config[httpport], ssl?(config) unless config[httpport].nil? + ['9200', ssl?(config)] end # Entrypoint for custom fact populator # # This is a super old function but works; disable a bunch of checks. # rubocop:disable Lint/HandleExceptions # rubocop:disable Metrics/CyclomaticComplexity # rubocop:disable Metrics/PerceivedComplexity def self.run dir_prefix = '/etc/elasticsearch' # httpports is a hash of port_number => ssl? - httpports = {} transportports = [] http_bound_addresses = [] transport_bound_addresses = [] transport_publish_addresses = [] nodes = {} # only when the directory exists we need to process the stuff return unless File.directory?(dir_prefix) - Dir.foreach(dir_prefix) do |dir| - next if dir == '.' - - if File.readable?("#{dir_prefix}/#{dir}/elasticsearch.yml") - config_data = YAML.load_file("#{dir_prefix}/#{dir}/elasticsearch.yml") - httpport = get_httpport(config_data) - httpports.merge! httpport if httpport - end + if File.readable?("#{dir_prefix}/elasticsearch.yml") + config_data = YAML.load_file("#{dir_prefix}/elasticsearch.yml") + httpport, ssl = get_httpport(config_data) end begin - if httpports.keys.count > 0 - - add_fact('elasticsearch', 'ports', httpports.keys.join(',')) - - httpports.each_pair do |httpport, ssl| - next if ssl + add_fact('elasticsearch', 'port', httpport) - key_prefix = "elasticsearch_#{httpport}" + unless ssl + key_prefix = 'elasticsearch' + # key_prefix = "elasticsearch_#{httpport}" - uri = URI("http://localhost:#{httpport}") - http = Net::HTTP.new(uri.host, uri.port) - http.read_timeout = 10 - http.open_timeout = 2 - response = http.get('/') - json_data = JSON.parse(response.body) - next if json_data['status'] && json_data['status'] != 200 + uri = URI("http://localhost:#{httpport}") + http = Net::HTTP.new(uri.host, uri.port) + http.read_timeout = 10 + http.open_timeout = 2 + response = http.get('/') + json_data = JSON.parse(response.body) + if json_data['status'] && json_data['status'] == 200 add_fact(key_prefix, 'name', json_data['name']) add_fact(key_prefix, 'version', json_data['version']['number']) uri2 = URI("http://localhost:#{httpport}/_nodes/#{json_data['name']}") http2 = Net::HTTP.new(uri2.host, uri2.port) http2.read_timeout = 10 http2.open_timeout = 2 response2 = http2.get(uri2.path) json_data_node = JSON.parse(response2.body) add_fact(key_prefix, 'cluster_name', json_data_node['cluster_name']) node_data = json_data_node['nodes'].first add_fact(key_prefix, 'node_id', node_data[0]) nodes_data = json_data_node['nodes'][node_data[0]] process = nodes_data['process'] add_fact(key_prefix, 'mlockall', process['mlockall']) plugins = nodes_data['plugins'] plugin_names = [] plugins.each do |plugin| plugin_names << plugin['name'] plugin.each do |key, value| prefix = "#{key_prefix}_plugin_#{plugin['name']}" add_fact(prefix, key, value) unless key == 'name' end end add_fact(key_prefix, 'plugins', plugin_names.join(',')) nodes_data['http']['bound_address'].each { |i| http_bound_addresses << i } nodes_data['transport']['bound_address'].each { |i| transport_bound_addresses << i } transport_publish_addresses << nodes_data['transport']['publish_address'] unless nodes_data['transport']['publish_address'].nil? transportports << nodes_data['settings']['transport']['tcp']['port'] unless nodes_data['settings']['transport']['tcp'].nil? or nodes_data['settings']['transport']['tcp']['port'].nil? - node = { 'http_ports' => httpports.keys, - 'transport_ports' => transportports, - 'http_bound_addresses' => http_bound_addresses, - 'transport_bound_addresses' => transport_bound_addresses, - 'transport_publish_addresses' => transport_publish_addresses, - json_data['name'] => { 'settings' => nodes_data['settings'], 'http' => nodes_data['http'], 'transport' => nodes_data['transport'] } } + node = { + 'http_ports' => httpports.keys, + 'transport_ports' => transportports, + 'http_bound_addresses' => http_bound_addresses, + 'transport_bound_addresses' => transport_bound_addresses, + 'transport_publish_addresses' => transport_publish_addresses, + json_data['name'] => { + 'settings' => nodes_data['settings'], + 'http' => nodes_data['http'], + 'transport' => nodes_data['transport'] + } + } nodes.merge! node end end rescue end Facter.add(:elasticsearch) do setcode do nodes end nodes unless nodes.empty? end end # rubocop:enable Metrics/CyclomaticComplexity # rubocop:enable Metrics/PerceivedComplexity end EsFacts.run diff --git a/lib/puppet/provider/elastic_parsedfile.rb b/lib/puppet/provider/elastic_parsedfile.rb index e4f7334..cc2a6b8 100644 --- a/lib/puppet/provider/elastic_parsedfile.rb +++ b/lib/puppet/provider/elastic_parsedfile.rb @@ -1,26 +1,12 @@ require 'puppet/provider/parsedfile' # Parent class for Elasticsearch-based providers that need to access # specific configuration directories. class Puppet::Provider::ElasticParsedFile < Puppet::Provider::ParsedFile - # Find/set a shield configuration file. - # - # @return String - def self.shield_config(val) - @default_target ||= "/etc/elasticsearch/shield/#{val}" - end - # Find/set an x-pack configuration file. # # @return String def self.xpack_config(val) - @default_target ||= "/etc/elasticsearch/x-pack/#{val}" - end - - # Find/set an oss x-pack configuration file. - # - # @return String - def self.oss_xpack_config(val) @default_target ||= "/etc/elasticsearch/#{val}" end end diff --git a/lib/puppet/provider/elastic_plugin.rb b/lib/puppet/provider/elastic_plugin.rb index 07fa44e..97e4d6c 100644 --- a/lib/puppet/provider/elastic_plugin.rb +++ b/lib/puppet/provider/elastic_plugin.rb @@ -1,208 +1,161 @@ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', '..')) require 'uri' require 'puppet_x/elastic/es_versioning' require 'puppet_x/elastic/plugin_parsing' # Generalized parent class for providers that behave like Elasticsearch's plugin # command line tool. # rubocop:disable Metrics/ClassLength class Puppet::Provider::ElasticPlugin < Puppet::Provider # Elasticsearch's home directory. # # @return String def homedir case Facter.value('osfamily') when 'OpenBSD' '/usr/local/elasticsearch' else '/usr/share/elasticsearch' end end def exists? # First, attempt to list whether the named plugin exists by finding a # plugin descriptor file, which each plugin should have. We must wildcard # the name to match meta plugins, see upstream issue for this change: # https://github.com/elastic/elasticsearch/pull/28022 properties_files = Dir[File.join(@resource[:plugin_dir], plugin_path, '**', '*plugin-descriptor.properties')] return false if properties_files.empty? begin # Use the basic name format that the plugin tool supports in order to # determine the version from the resource name. plugin_version = Puppet_X::Elastic.plugin_version(@resource[:name]) # Naively parse the Java .properties file to check version equality. # Because we don't have the luxury of installing arbitrary gems, perform # simple parse with a degree of safety checking in the call chain # # Note that x-pack installs "meta" plugins which bundle multiple plugins # in one. Therefore, we need to find the first "sub" plugin that # indicates which version of x-pack this is. properties = properties_files.sort.map do |prop_file| IO.readlines(prop_file).map(&:strip).reject do |line| line.start_with?('#') or line.empty? end.map do |property| property.split('=') end.reject do |pairs| pairs.length != 2 end.to_h end.find { |prop| prop.key? 'version' } if properties and properties['version'] != plugin_version debug "Elasticsearch plugin #{@resource[:name]} not version #{plugin_version}, reinstalling" destroy return false end rescue ElasticPluginParseFailure debug "Failed to parse plugin version for #{@resource[:name]}" end # If there is no version string, we do not check version equality debug "No version found in #{@resource[:name]}, not enforcing any version" true end def plugin_path @resource[:plugin_path] || Puppet_X::Elastic.plugin_name(@resource[:name]) end - # Intelligently returns the correct installation arguments for version 1 - # version of Elasticsearch. + # Intelligently returns the correct installation arguments for Elasticsearch. # # @return [Array] # arguments to pass to the plugin installation utility - def install1x - if !@resource[:url].nil? - [ - Puppet_X::Elastic.plugin_name(@resource[:name]), - '--url', - @resource[:url] - ] - elsif !@resource[:source].nil? - [ - Puppet_X::Elastic.plugin_name(@resource[:name]), - '--url', - "file://#{@resource[:source]}" - ] - else - [@resource[:name]] - end - end - - # Intelligently returns the correct installation arguments for version 2 - # version of Elasticsearch. - # - # @return [Array] - # arguments to pass to the plugin installation utility - def install2x + def install_args if !@resource[:url].nil? [@resource[:url]] elsif !@resource[:source].nil? ["file://#{@resource[:source]}"] else [@resource[:name]] end end # Format proxy arguments for consumption by the elasticsearch plugin # management tool (i.e., Java properties). # # @return Array # of flags for command-line tools def proxy_args(url) parsed = URI(url) %w[http https].map do |schema| [:host, :port, :user, :password].map do |param| option = parsed.send(param) "-D#{schema}.proxy#{param.to_s.capitalize}=#{option}" unless option.nil? end end.flatten.compact end # Install this plugin on the host. - # rubocop:disable Metrics/CyclomaticComplexity def create commands = [] - commands += proxy_args(@resource[:proxy]) if is2x? and @resource[:proxy] + commands += proxy_args(@resource[:proxy]) if @resource[:proxy] commands << 'install' - commands << '--batch' if batch_capable? - commands += is1x? ? install1x : install2x + commands << '--batch' + commands += install_args debug("Commands: #{commands.inspect}") retry_count = 3 retry_times = 0 begin with_environment do plugin(commands) end rescue Puppet::ExecutionFailure => e retry_times += 1 debug("Failed to install plugin. Retrying... #{retry_times} of #{retry_count}") sleep 2 retry if retry_times < retry_count raise "Failed to install plugin. Received error: #{e.inspect}" end end - # rubocop:enable Metrics/CyclomaticComplexity # Remove this plugin from the host. def destroy with_environment do plugin(['remove', Puppet_X::Elastic.plugin_name(@resource[:name])]) end end - # Determine the installed version of Elasticsearch on this host. - def es_version - Puppet_X::Elastic::EsVersioning.version( - resource[:elasticsearch_package_name], resource.catalog - ) - end - - def is1x? - Puppet::Util::Package.versioncmp(es_version, '2.0.0') < 0 - end - - def is2x? - (Puppet::Util::Package.versioncmp(es_version, '2.0.0') >= 0) && \ - (Puppet::Util::Package.versioncmp(es_version, '3.0.0') < 0) - end - - def batch_capable? - Puppet::Util::Package.versioncmp(es_version, '2.2.0') >= 0 - end - # Run a command wrapped in necessary env vars def with_environment(&block) env_vars = { 'ES_JAVA_OPTS' => @resource[:java_opts], 'ES_PATH_CONF' => @resource[:configdir] } saved_vars = {} - unless @resource[:java_home].nil? or @resource[:java_home] == '' - env_vars['JAVA_HOME'] = @resource[:java_home] - end - - if !is2x? and @resource[:proxy] - env_vars['ES_JAVA_OPTS'] += proxy_args(@resource[:proxy]) - end + # Use 'java_home' param if supplied, otherwise default to Elasticsearch shipped JDK + env_vars['JAVA_HOME'] = if @resource[:java_home].nil? or @resource[:java_home] == '' + "#{homedir}/jdk" + else + @resource[:java_home] + end env_vars['ES_JAVA_OPTS'] = env_vars['ES_JAVA_OPTS'].join(' ') env_vars.each do |env_var, value| saved_vars[env_var] = ENV[env_var] ENV[env_var] = value end ret = block.yield saved_vars.each do |env_var, value| ENV[env_var] = value end ret end end diff --git a/lib/puppet/provider/elastic_user_command.rb b/lib/puppet/provider/elastic_user_command.rb index 35ef51d..b6ca8e7 100644 --- a/lib/puppet/provider/elastic_user_command.rb +++ b/lib/puppet/provider/elastic_user_command.rb @@ -1,123 +1,125 @@ # Parent provider for Elasticsearch Shield/X-Pack file-based user management # tools. class Puppet::Provider::ElasticUserCommand < Puppet::Provider attr_accessor :homedir # Elasticsearch's home directory. # # @return String def self.homedir @homedir ||= case Facter.value('osfamily') when 'OpenBSD' '/usr/local/elasticsearch' else '/usr/share/elasticsearch' end end # Run the user management command with specified tool arguments. def self.command_with_path(args, configdir = nil) options = { + :combine => true, :custom_environment => { 'ES_PATH_CONF' => configdir || '/etc/elasticsearch' - } + }, + :failonfail => true } execute( [command(:users_cli)] + (args.is_a?(Array) ? args : [args]), options ) end # Gather local file-based users into an array of Hash objects. def self.fetch_users begin output = command_with_path('list') rescue Puppet::ExecutionFailure => e debug("#fetch_users had an error: #{e.inspect}") return nil end debug("Raw command output: #{output}") output.split("\n").select { |u| # Keep only expected "user : role1,role2" formatted lines u[/^[^:]+:\s+\S+$/] }.map { |u| # Break into ["user ", " role1,role2"] u.split(':').first.strip }.map do |user| { :name => user, :ensure => :present, :provider => name } end end # Fetch an array of provider objects from the the list of local users. def self.instances fetch_users.map do |user| new user end end # Generic prefetch boilerplate. def self.prefetch(resources) instances.each do |prov| if (resource = resources[prov.name]) resource.provider = prov end end end def initialize(value = {}) super(value) @property_flush = {} end # Enforce the desired state for this user on-disk. def flush arguments = [] case @property_flush[:ensure] when :absent arguments << 'userdel' arguments << resource[:name] else arguments << 'useradd' arguments << resource[:name] arguments << '-p' << resource[:password] end self.class.command_with_path(arguments, resource[:configdir]) @property_hash = self.class.fetch_users.detect do |u| u[:name] == resource[:name] end end # Set this provider's `:ensure` property to `:present`. def create @property_flush[:ensure] = :present end def exists? @property_hash[:ensure] == :present end # Set this provider's `:ensure` property to `:absent`. def destroy @property_flush[:ensure] = :absent end # Manually set this user's password. def passwd self.class.command_with_path( [ 'passwd', resource[:name], '-p', resource[:password] ], resource[:configdir] ) end end diff --git a/lib/puppet/provider/elastic_user_roles.rb b/lib/puppet/provider/elastic_user_roles.rb index f7a6a68..ee86a4c 100644 --- a/lib/puppet/provider/elastic_user_roles.rb +++ b/lib/puppet/provider/elastic_user_roles.rb @@ -1,49 +1,49 @@ require 'puppet/provider/elastic_yaml' -# Provider to help manage file-based Shield/X-Pack user/role configuration +# Provider to help manage file-based X-Pack user/role configuration # files. class Puppet::Provider::ElasticUserRoles < Puppet::Provider::ElasticYaml # Override the ancestor `parse` method to process a users/roles file # managed by the Elasticsearch user tools. def self.parse(text) text.split("\n").map(&:strip).select do |line| # Strip comments not line.start_with? '#' and not line.empty? end.map do |line| # Turn array of roles into array of users that have the role role, users = line.split(':') users.split(',').map do |user| { user => [role] } end end.flatten.inject({}) do |hash, user| # Gather up user => role hashes by append-merging role lists hash.merge(user) { |_, o, n| o + n } end.map do |user, roles| # Map those hashes into what the provider expects { :name => user, :roles => roles } end.to_a end # Represent this user/role record as a correctly-formatted config file. def self.to_file(records) debug "Flushing: #{records.inspect}" records.map do |record| record[:roles].map do |r| { [record[:name]] => r } end end.flatten.map(&:invert).inject({}) do |acc, role| acc.merge(role) { |_, o, n| o + n } end.delete_if do |_, users| users.empty? end.map do |role, users| "#{role}:#{users.join(',')}" end.join("\n") + "\n" end def self.skip_record?(_record) false end end diff --git a/lib/puppet/provider/elasticsearch_keystore/elasticsearch_keystore.rb b/lib/puppet/provider/elasticsearch_keystore/ruby.rb similarity index 96% rename from lib/puppet/provider/elasticsearch_keystore/elasticsearch_keystore.rb rename to lib/puppet/provider/elasticsearch_keystore/ruby.rb index b21e78e..6233564 100644 --- a/lib/puppet/provider/elasticsearch_keystore/elasticsearch_keystore.rb +++ b/lib/puppet/provider/elasticsearch_keystore/ruby.rb @@ -1,166 +1,167 @@ Puppet::Type.type(:elasticsearch_keystore).provide( :elasticsearch_keystore ) do desc 'Provider for `elasticsearch-keystore` based secret management.' def self.defaults_dir @defaults_dir ||= case Facter.value('osfamily') when 'RedHat' '/etc/sysconfig' else '/etc/default' end end def self.home_dir @home_dir ||= case Facter.value('osfamily') when 'OpenBSD' '/usr/local/elasticsearch' else '/usr/share/elasticsearch' end end attr_accessor :defaults_dir, :home_dir commands :keystore => "#{home_dir}/bin/elasticsearch-keystore" def self.run_keystore(args, instance, configdir = '/etc/elasticsearch', stdin = nil) options = { :custom_environment => { - 'ES_INCLUDE' => File.join(defaults_dir, "elasticsearch-#{instance}"), + 'ES_INCLUDE' => File.join(defaults_dir, "elasticsearch-#{instance}"), 'ES_PATH_CONF' => "#{configdir}/#{instance}" }, - :uid => 'elasticsearch', - :gid => 'elasticsearch' + :uid => 'elasticsearch', + :gid => 'elasticsearch', + :failonfail => true } unless stdin.nil? stdinfile = Tempfile.new('elasticsearch-keystore') stdinfile << stdin stdinfile.flush options[:stdinfile] = stdinfile.path end begin stdout = execute([command(:keystore)] + args, options) ensure unless stdin.nil? stdinfile.close stdinfile.unlink end end stdout.exitstatus.zero? ? stdout : raise(Puppet::Error, stdout) end def self.present_keystores Dir[File.join(%w[/ etc elasticsearch *])].select do |directory| File.exist? File.join(directory, 'elasticsearch.keystore') end.map do |instance| settings = run_keystore(['list'], File.basename(instance)).split("\n") { :name => File.basename(instance), :ensure => :present, :provider => name, :settings => settings } end end def self.instances present_keystores.map do |keystore| new keystore end end def self.prefetch(resources) instances.each do |prov| if (resource = resources[prov.name]) resource.provider = prov end end end def initialize(value = {}) super(value) @property_flush = {} end # rubocop:disable Metrics/CyclomaticComplexity # rubocop:disable Metrics/PerceivedComplexity def flush case @property_flush[:ensure] when :present debug(self.class.run_keystore(['create'], resource[:name], resource[:configdir])) @property_flush[:settings] = resource[:settings] when :absent File.delete(File.join([ '/', 'etc', 'elasticsearch', resource[:instance], 'elasticsearch.keystore' ])) end # Note that since the property is :array_matching => :all, we have to # expect that the hash is wrapped in an array. if @property_flush[:settings] and not @property_flush[:settings].first.empty? # Flush properties that _should_ be present @property_flush[:settings].first.each_pair do |setting, value| next unless @property_hash[:settings].nil? \ or not @property_hash[:settings].include? setting debug(self.class.run_keystore( ['add', '--force', '--stdin', setting], resource[:name], resource[:configdir], value )) end # Remove properties that are no longer present if resource[:purge] and not (@property_hash.nil? or @property_hash[:settings].nil?) (@property_hash[:settings] - @property_flush[:settings].first.keys).each do |setting| debug(self.class.run_keystore( ['remove', setting], resource[:name], resource[:configdir] )) end end end @property_hash = self.class.present_keystores.detect do |u| u[:name] == resource[:name] end end # rubocop:enable Metrics/CyclomaticComplexity # rubocop:enable Metrics/PerceivedComplexity # settings property setter # # @return [Hash] settings def settings=(new_settings) @property_flush[:settings] = new_settings end # settings property getter # # @return [Hash] settings def settings @property_hash[:settings] end # Sets the ensure property in the @property_flush hash. # # @return [Symbol] :present def create @property_flush[:ensure] = :present end # Determine whether this resource is present on the system. # # @return [Boolean] def exists? @property_hash[:ensure] == :present end # Set flushed ensure property to absent. # # @return [Symbol] :absent def destroy @property_flush[:ensure] = :absent end end diff --git a/lib/puppet/provider/elasticsearch_license/shield.rb b/lib/puppet/provider/elasticsearch_license/shield.rb deleted file mode 100644 index 19e0bbc..0000000 --- a/lib/puppet/provider/elasticsearch_license/shield.rb +++ /dev/null @@ -1,31 +0,0 @@ -require 'puppet/provider/elastic_rest' - -Puppet::Type.type(:elasticsearch_license).provide( - :shield, - :api_resource_style => :bare, - :parent => Puppet::Provider::ElasticREST, - :metadata => :content, - :metadata_pipeline => [ - lambda { |data| Puppet_X::Elastic.deep_to_s data }, - lambda { |data| Puppet_X::Elastic.deep_to_i data } - ], - :api_uri => '_license', - :query_string => { - 'acknowledge' => 'true' - } -) do - desc 'A REST API based provider to manage Elasticsearch Shield licenses.' - - mk_resource_methods - - def self.process_body(body) - JSON.parse(body).map do |_object_name, api_object| - { - :name => name.to_s, - :ensure => :present, - metadata => { 'license' => process_metadata(api_object) }, - :provider => name - } - end - end -end diff --git a/lib/puppet/provider/elasticsearch_license/x-pack.rb b/lib/puppet/provider/elasticsearch_license/xpack.rb similarity index 92% rename from lib/puppet/provider/elasticsearch_license/x-pack.rb rename to lib/puppet/provider/elasticsearch_license/xpack.rb index 27f1fdb..d7dda2c 100644 --- a/lib/puppet/provider/elasticsearch_license/x-pack.rb +++ b/lib/puppet/provider/elasticsearch_license/xpack.rb @@ -1,33 +1,31 @@ -# rubocop:disable Style/FileName -# rubocop:enable Style/FileName require 'puppet/provider/elastic_rest' Puppet::Type.type(:elasticsearch_license).provide( :xpack, :api_resource_style => :bare, :parent => Puppet::Provider::ElasticREST, :metadata => :content, :metadata_pipeline => [ lambda { |data| Puppet_X::Elastic.deep_to_s data }, lambda { |data| Puppet_X::Elastic.deep_to_i data } ], :api_uri => '_xpack/license', :query_string => { 'acknowledge' => 'true' } ) do desc 'A REST API based provider to manage Elasticsearch X-Pack licenses.' mk_resource_methods def self.process_body(body) JSON.parse(body).map do |_object_name, api_object| { :name => name.to_s, :ensure => :present, metadata => { 'license' => process_metadata(api_object) }, :provider => name } end end end diff --git a/lib/puppet/provider/elasticsearch_plugin/plugin.rb b/lib/puppet/provider/elasticsearch_plugin/plugin.rb deleted file mode 100644 index 33b6c8c..0000000 --- a/lib/puppet/provider/elasticsearch_plugin/plugin.rb +++ /dev/null @@ -1,18 +0,0 @@ -require 'puppet/provider/elastic_plugin' - -Puppet::Type.type(:elasticsearch_plugin).provide( - :plugin, - :parent => Puppet::Provider::ElasticPlugin -) do - desc 'Pre-5.x provider for Elasticsearch bin/plugin command operations.' - - case Facter.value('osfamily') - when 'OpenBSD' - commands :plugin => '/usr/local/elasticsearch/bin/plugin' - commands :es => '/usr/local/elasticsearch/bin/elasticsearch' - commands :javapathhelper => '/usr/local/bin/javaPathHelper' - else - commands :plugin => '/usr/share/elasticsearch/bin/plugin' - commands :es => '/usr/share/elasticsearch/bin/elasticsearch' - end -end diff --git a/lib/puppet/provider/elasticsearch_plugin/elasticsearch_plugin.rb b/lib/puppet/provider/elasticsearch_plugin/ruby.rb similarity index 100% rename from lib/puppet/provider/elasticsearch_plugin/elasticsearch_plugin.rb rename to lib/puppet/provider/elasticsearch_plugin/ruby.rb diff --git a/lib/puppet/provider/elasticsearch_role/oss_xpack.rb b/lib/puppet/provider/elasticsearch_role/oss_xpack.rb deleted file mode 100644 index 6d1ac0d..0000000 --- a/lib/puppet/provider/elasticsearch_role/oss_xpack.rb +++ /dev/null @@ -1,12 +0,0 @@ -require 'puppet/provider/elastic_yaml' - -Puppet::Type.type(:elasticsearch_role).provide( - :oss_xpack, - :parent => Puppet::Provider::ElasticYaml, - :metadata => :privileges -) do - desc 'Provider for OSS X-Pack role resources.' - - oss_xpack_config 'roles.yml' - confine :exists => default_target -end diff --git a/lib/puppet/provider/elasticsearch_role/xpack.rb b/lib/puppet/provider/elasticsearch_role/ruby.rb similarity index 83% rename from lib/puppet/provider/elasticsearch_role/xpack.rb rename to lib/puppet/provider/elasticsearch_role/ruby.rb index d6b60d3..d05e87e 100644 --- a/lib/puppet/provider/elasticsearch_role/xpack.rb +++ b/lib/puppet/provider/elasticsearch_role/ruby.rb @@ -1,12 +1,11 @@ require 'puppet/provider/elastic_yaml' Puppet::Type.type(:elasticsearch_role).provide( - :xpack, + :ruby, :parent => Puppet::Provider::ElasticYaml, :metadata => :privileges ) do desc 'Provider for X-Pack role resources.' xpack_config 'roles.yml' - confine :exists => default_target end diff --git a/lib/puppet/provider/elasticsearch_role/shield.rb b/lib/puppet/provider/elasticsearch_role/shield.rb deleted file mode 100644 index bd465a0..0000000 --- a/lib/puppet/provider/elasticsearch_role/shield.rb +++ /dev/null @@ -1,12 +0,0 @@ -require 'puppet/provider/elastic_yaml' - -Puppet::Type.type(:elasticsearch_role).provide( - :shield, - :parent => Puppet::Provider::ElasticYaml, - :metadata => :privileges -) do - desc 'Provider for Shield role resources.' - - shield_config 'roles.yml' - confine :exists => default_target -end diff --git a/lib/puppet/provider/elasticsearch_role_mapping/oss_xpack.rb b/lib/puppet/provider/elasticsearch_role_mapping/oss_xpack.rb deleted file mode 100644 index ff294e6..0000000 --- a/lib/puppet/provider/elasticsearch_role_mapping/oss_xpack.rb +++ /dev/null @@ -1,12 +0,0 @@ -require 'puppet/provider/elastic_yaml' - -Puppet::Type.type(:elasticsearch_role_mapping).provide( - :oss_xpack, - :parent => Puppet::Provider::ElasticYaml, - :metadata => :mappings -) do - desc 'Provider for OSS X-Pack role mappings.' - - oss_xpack_config 'role_mapping.yml' - confine :exists => default_target -end diff --git a/lib/puppet/provider/elasticsearch_role_mapping/xpack.rb b/lib/puppet/provider/elasticsearch_role_mapping/ruby.rb similarity index 84% rename from lib/puppet/provider/elasticsearch_role_mapping/xpack.rb rename to lib/puppet/provider/elasticsearch_role_mapping/ruby.rb index 765c450..c73ea00 100644 --- a/lib/puppet/provider/elasticsearch_role_mapping/xpack.rb +++ b/lib/puppet/provider/elasticsearch_role_mapping/ruby.rb @@ -1,12 +1,11 @@ require 'puppet/provider/elastic_yaml' Puppet::Type.type(:elasticsearch_role_mapping).provide( - :xpack, + :ruby, :parent => Puppet::Provider::ElasticYaml, :metadata => :mappings ) do desc 'Provider for X-Pack role mappings.' xpack_config 'role_mapping.yml' - confine :exists => default_target end diff --git a/lib/puppet/provider/elasticsearch_role_mapping/shield.rb b/lib/puppet/provider/elasticsearch_role_mapping/shield.rb deleted file mode 100644 index 0a1775e..0000000 --- a/lib/puppet/provider/elasticsearch_role_mapping/shield.rb +++ /dev/null @@ -1,12 +0,0 @@ -require 'puppet/provider/elastic_yaml' - -Puppet::Type.type(:elasticsearch_role_mapping).provide( - :shield, - :parent => Puppet::Provider::ElasticYaml, - :metadata => :mappings -) do - desc 'Provider for Shield role mappings.' - - shield_config 'role_mapping.yml' - confine :exists => default_target -end diff --git a/lib/puppet/provider/elasticsearch_service_file/ruby.rb b/lib/puppet/provider/elasticsearch_service_file/ruby.rb deleted file mode 100644 index 08cd19a..0000000 --- a/lib/puppet/provider/elasticsearch_service_file/ruby.rb +++ /dev/null @@ -1,90 +0,0 @@ -$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', '..', '..')) - -require 'pathname' -require 'puppet/util/filetype' - -require 'puppet_x/elastic/es_versioning' - -Puppet::Type.type(:elasticsearch_service_file).provide(:ruby) do - desc <<-ENDHEREDOC - Provides management of elasticsearch service files. - ENDHEREDOC - - mk_resource_methods - - def initialize(value = {}) - super(value) - @property_flush = {} - end - - def self.services - [ - '/usr/lib/systemd/system/elasticsearch-', - '/lib/systemd/system/elasticsearch-', - '/etc/init.d/elasticsearch.', - '/etc/init.d/elasticsearch-', - '/etc/rc.d/elasticsearch_' - ].map do |path| - Pathname.glob(path + '*').map do |service| - { - :name => service.to_s, - :ensure => :present, - :provider => :ruby, - :content => Puppet::Util::FileType.filetype(:flat).new(service.to_s).read - } - end - end.flatten.compact - end - - def self.instances - services.map do |instance| - new instance - end - end - - def self.prefetch(resources) - instances.each do |prov| - if (resource = resources[prov.name]) - resource.provider = prov - end - end - end - - def create - @property_flush[:ensure] = :present - end - - def exists? - @property_hash[:ensure] == :present - end - - def destroy? - @property_flush[:ensure] = :absent - end - - def flush - begin - opt_flag, opt_flags = Puppet_X::Elastic::EsVersioning.opt_flags( - resource[:package_name], resource.catalog - ) - rescue ElasticsearchPackageNotFoundError - # If the Elasticsearch package is not present at all, we don't know what - # version is present, so we just set these as empty values for the - # template. - opt_flag = '' - opt_flags = [] - end - - # This should only be present on systemd systems. - opt_flags.delete('--quiet') unless resource[:name].include?('systemd') - - template = ERB.new(resource[:content], 0, '-') - result = template.result(binding) - - Puppet::Util::FileType.filetype(:flat).new(resource[:name]).write(result) - - @property_hash = self.class.services.detect do |t| - t[:name] == resource[:name] - end - end -end # of .provide diff --git a/lib/puppet/provider/elasticsearch_user/esusers.rb b/lib/puppet/provider/elasticsearch_user/esusers.rb deleted file mode 100644 index 9815cc5..0000000 --- a/lib/puppet/provider/elasticsearch_user/esusers.rb +++ /dev/null @@ -1,15 +0,0 @@ -require File.join(File.dirname(__FILE__), '..', '..', '..', 'puppet/provider/elastic_user_command') - -Puppet::Type.type(:elasticsearch_user).provide( - :esusers, - :parent => Puppet::Provider::ElasticUserCommand -) do - desc 'Provider for Shield file (esusers) user resources.' - - has_feature :manages_plaintext_passwords - - mk_resource_methods - - commands :users_cli => "#{homedir}/bin/shield/esusers" - commands :es => "#{homedir}/bin/elasticsearch" -end diff --git a/lib/puppet/provider/elasticsearch_user/elasticsearch_users.rb b/lib/puppet/provider/elasticsearch_user/ruby.rb similarity index 84% rename from lib/puppet/provider/elasticsearch_user/elasticsearch_users.rb rename to lib/puppet/provider/elasticsearch_user/ruby.rb index 9bfe962..d12c4f6 100644 --- a/lib/puppet/provider/elasticsearch_user/elasticsearch_users.rb +++ b/lib/puppet/provider/elasticsearch_user/ruby.rb @@ -1,15 +1,15 @@ require File.join(File.dirname(__FILE__), '..', '..', '..', 'puppet/provider/elastic_user_command') Puppet::Type.type(:elasticsearch_user).provide( - :elasticsearch_users, + :ruby, :parent => Puppet::Provider::ElasticUserCommand ) do - desc 'Provider for OSS X-Pack user resources.' + desc 'Provider for X-Pack user resources.' has_feature :manages_plaintext_passwords mk_resource_methods commands :users_cli => "#{homedir}/bin/elasticsearch-users" commands :es => "#{homedir}/bin/elasticsearch" end diff --git a/lib/puppet/provider/elasticsearch_user/users.rb b/lib/puppet/provider/elasticsearch_user/users.rb deleted file mode 100644 index 08f01a8..0000000 --- a/lib/puppet/provider/elasticsearch_user/users.rb +++ /dev/null @@ -1,16 +0,0 @@ -require File.join(File.dirname(__FILE__), '..', '..', '..', 'puppet/provider/elastic_user_command') - -Puppet::Type.type(:elasticsearch_user).provide( - :users, - :parent => Puppet::Provider::ElasticUserCommand -) do - desc 'Provider for X-Pack file (users) user resources.' - confine :false => (Puppet::FileSystem.exist? "#{homedir}/bin/elasticsearch-users") - - has_feature :manages_plaintext_passwords - - mk_resource_methods - - commands :users_cli => "#{homedir}/bin/x-pack/users" - commands :es => "#{homedir}/bin/elasticsearch" -end diff --git a/lib/puppet/provider/elasticsearch_user_file/oss_xpack.rb b/lib/puppet/provider/elasticsearch_user_file/oss_xpack.rb deleted file mode 100644 index a160bea..0000000 --- a/lib/puppet/provider/elasticsearch_user_file/oss_xpack.rb +++ /dev/null @@ -1,29 +0,0 @@ -require 'puppet/provider/elastic_parsedfile' - -Puppet::Type.type(:elasticsearch_user_file).provide( - :oss_xpack, - :parent => Puppet::Provider::ElasticParsedFile -) do - desc 'Provider for OSS X-Pack users using plain files.' - - oss_xpack_config 'users' - confine :exists => default_target - - has_feature :manages_encrypted_passwords - - text_line :comment, - :match => /^\s*#/ - - record_line :oss_xpack, - :fields => %w[name hashed_password], - :separator => ':', - :joiner => ':' - - def self.valid_attr?(klass, attr_name) - if klass.respond_to? :parameters - klass.parameters.include?(attr_name) - else - true - end - end -end diff --git a/lib/puppet/provider/elasticsearch_user_file/xpack.rb b/lib/puppet/provider/elasticsearch_user_file/ruby.rb similarity index 81% rename from lib/puppet/provider/elasticsearch_user_file/xpack.rb rename to lib/puppet/provider/elasticsearch_user_file/ruby.rb index 318ad35..7438be0 100644 --- a/lib/puppet/provider/elasticsearch_user_file/xpack.rb +++ b/lib/puppet/provider/elasticsearch_user_file/ruby.rb @@ -1,29 +1,28 @@ require 'puppet/provider/elastic_parsedfile' Puppet::Type.type(:elasticsearch_user_file).provide( - :xpack, + :ruby, :parent => Puppet::Provider::ElasticParsedFile ) do - desc 'Provider for X-Pack esusers using plain files.' + desc 'Provider for X-Pack elasticsearch users using plain files.' xpack_config 'users' - confine :exists => default_target has_feature :manages_encrypted_passwords text_line :comment, :match => /^\s*#/ - record_line :xpack, + record_line :ruby, :fields => %w[name hashed_password], :separator => ':', :joiner => ':' def self.valid_attr?(klass, attr_name) if klass.respond_to? :parameters klass.parameters.include?(attr_name) else true end end end diff --git a/lib/puppet/provider/elasticsearch_user_file/shield.rb b/lib/puppet/provider/elasticsearch_user_file/shield.rb deleted file mode 100644 index a6f15f6..0000000 --- a/lib/puppet/provider/elasticsearch_user_file/shield.rb +++ /dev/null @@ -1,29 +0,0 @@ -require 'puppet/provider/elastic_parsedfile' - -Puppet::Type.type(:elasticsearch_user_file).provide( - :shield, - :parent => Puppet::Provider::ElasticParsedFile -) do - desc 'Provider for Shield esusers using plain files.' - - shield_config 'users' - confine :exists => default_target - - has_feature :manages_encrypted_passwords - - text_line :comment, - :match => /^\s*#/ - - record_line :shield, - :fields => %w[name hashed_password], - :separator => ':', - :joiner => ':' - - def self.valid_attr?(klass, attr_name) - if klass.respond_to? :parameters - klass.parameters.include?(attr_name) - else - true - end - end -end diff --git a/lib/puppet/provider/elasticsearch_user_roles/oss_xpack.rb b/lib/puppet/provider/elasticsearch_user_roles/oss_xpack.rb deleted file mode 100644 index b2357d6..0000000 --- a/lib/puppet/provider/elasticsearch_user_roles/oss_xpack.rb +++ /dev/null @@ -1,11 +0,0 @@ -require 'puppet/provider/elastic_user_roles' - -Puppet::Type.type(:elasticsearch_user_roles).provide( - :oss_xpack, - :parent => Puppet::Provider::ElasticUserRoles -) do - desc 'Provider for X-Pack user roles (parsed file.)' - - oss_xpack_config 'users_roles' - confine :exists => default_target -end diff --git a/lib/puppet/provider/elasticsearch_user_roles/xpack.rb b/lib/puppet/provider/elasticsearch_user_roles/ruby.rb similarity index 84% rename from lib/puppet/provider/elasticsearch_user_roles/xpack.rb rename to lib/puppet/provider/elasticsearch_user_roles/ruby.rb index 0b1a082..a5da043 100644 --- a/lib/puppet/provider/elasticsearch_user_roles/xpack.rb +++ b/lib/puppet/provider/elasticsearch_user_roles/ruby.rb @@ -1,11 +1,10 @@ require 'puppet/provider/elastic_user_roles' Puppet::Type.type(:elasticsearch_user_roles).provide( - :xpack, + :ruby, :parent => Puppet::Provider::ElasticUserRoles ) do desc 'Provider for X-Pack user roles (parsed file.)' xpack_config 'users_roles' - confine :exists => default_target end diff --git a/lib/puppet/provider/elasticsearch_user_roles/shield.rb b/lib/puppet/provider/elasticsearch_user_roles/shield.rb deleted file mode 100644 index 52bd8a0..0000000 --- a/lib/puppet/provider/elasticsearch_user_roles/shield.rb +++ /dev/null @@ -1,11 +0,0 @@ -require 'puppet/provider/elastic_user_roles' - -Puppet::Type.type(:elasticsearch_user_roles).provide( - :shield, - :parent => Puppet::Provider::ElasticUserRoles -) do - desc 'Provider for Shield user roles (parsed file.)' - - shield_config 'users_roles' - confine :exists => default_target -end diff --git a/lib/puppet/provider/es_instance_conn_validator/tcp_port.rb b/lib/puppet/provider/es_instance_conn_validator/tcp_port.rb index a0b4038..e492712 100644 --- a/lib/puppet/provider/es_instance_conn_validator/tcp_port.rb +++ b/lib/puppet/provider/es_instance_conn_validator/tcp_port.rb @@ -1,50 +1,51 @@ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', '..', '..')) require 'puppet/util/es_instance_validator' # This file contains a provider for the resource type `es_instance_conn_validator`, -# which validates the Elasticsearch instance connection by attempting an https connection. +# which validates the Elasticsearch connection by attempting a tcp connection. Puppet::Type.type(:es_instance_conn_validator).provide(:tcp_port) do desc "A provider for the resource type `es_instance_conn_validator`, which validates the connection by attempting an https - connection to the Elasticsearch instance." + connection to Elasticsearch." def exists? start_time = Time.now timeout = resource[:timeout] + sleep_interval = resource[:sleep_interval] success = validator.attempt_connection while success == false && ((Time.now - start_time) < timeout) - # It can take several seconds for the Elasticsearch instance to start up; + # It can take several seconds for the Elasticsearch to start up; # especially on the first install. Therefore, our first connection attempt - # may fail. Here we have somewhat arbitrarily chosen to retry every 2 + # may fail. Here we have somewhat arbitrarily chosen to retry every 10 # seconds until the configurable timeout has expired. - Puppet.debug('Failed to connect to the Elasticsearch instance; sleeping 2 seconds before retry') - sleep 2 + Puppet.debug("Failed to connect to Elasticsearch; sleeping #{sleep_interval} seconds before retry") + sleep sleep_interval success = validator.attempt_connection end if success - Puppet.debug("Connected to the ES instance in #{Time.now - start_time} seconds.") + Puppet.debug("Connected to the Elasticsearch in #{Time.now - start_time} seconds.") else - Puppet.notice("Failed to connect to the ES instance within timeout window of #{timeout} seconds; giving up.") + Puppet.notice("Failed to connect to the Elasticsearch within timeout window of #{timeout} seconds; giving up.") end success end def create # If `#create` is called, that means that `#exists?` returned false, which # means that the connection could not be established... so we need to # cause a failure here. - raise Puppet::Error, "Unable to connect to ES instance ! (#{@validator.instance_server}:#{@validator.instance_port})" + raise Puppet::Error, "Unable to connect to Elasticsearch! (#{@validator.instance_server}:#{@validator.instance_port})" end private # @api private def validator @validator ||= Puppet::Util::EsInstanceValidator.new(resource[:server], resource[:port]) end end diff --git a/lib/puppet/type/elasticsearch_service_file.rb b/lib/puppet/type/elasticsearch_service_file.rb deleted file mode 100644 index 25b26a1..0000000 --- a/lib/puppet/type/elasticsearch_service_file.rb +++ /dev/null @@ -1,96 +0,0 @@ -$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', '..')) - -require 'puppet/util/checksums' - -require 'puppet_x/elastic/es_versioning' - -Puppet::Type.newtype(:elasticsearch_service_file) do - @doc = 'Manages elasticsearch service files.' - - ensurable - - newparam(:name, :namevar => true) do - desc 'Fully qualified path to the service file.' - end - - newproperty(:content) do - include Puppet::Util::Checksums - - desc 'Service file contents in erb template form.' - - # Interploate the erb source before comparing it to the on-disk - # init script - def insync?(is) - _opt_flag, opt_flags = Puppet_X::Elastic::EsVersioning.opt_flags( - resource[:package_name], resource.catalog - ) - # This should only be present on systemd systems. - opt_flags.delete('--quiet') unless resource[:name].include?('systemd') - - template = ERB.new(should, 0, '-') - is == template.result(binding) - rescue ElasticsearchPackageNotFoundError - # This behavior is extremely confusing because of the fact that while - # someone should be able to indicate that an instance should be absent, - # if there is no service file to query via Puppet providers, it can't - # determine this fact. If no package exists and thus `absent` has been - # instructed, indicate that the template contents are correct, because - # we don't really care what's in there anyway - the service file is for - # an absent instance of Elasticsearch anyway. - return true - end - - # Represent as a checksum, not the whole file - def change_to_s(currentvalue, newvalue) - algo = Puppet[:digest_algorithm].to_sym - - if currentvalue == :absent - return "defined content as '#{send(algo, newvalue)}'" - elsif newvalue == :absent - return "undefined content from '#{send(algo, currentvalue)}'" - else - return "content changed '#{send(algo, currentvalue)}' to '#{send(algo, newvalue)}'" - end - end - end - - newparam(:defaults_location) do - desc 'File path to defaults file.' - end - - newparam(:group) do - desc 'Group to run service under.' - end - - newparam(:homedir) do - desc 'Elasticsearch home directory.' - end - - newparam(:instance) do - desc 'Elasticsearch instance name.' - end - - newparam(:memlock) do - desc 'Memlock setting for service.' - end - - newparam(:nofile) do - desc 'Service NOFILE ulimit.' - end - - newparam(:nproc) do - desc 'Service NPROC ulimit.' - end - - newparam(:package_name) do - desc 'Name of the system Elasticsearch package.' - end - - newparam(:pid_dir) do - desc 'Directory to use for storing service PID.' - end - - newparam(:user) do - desc 'User to run service under.' - end -end diff --git a/lib/puppet/type/es_instance_conn_validator.rb b/lib/puppet/type/es_instance_conn_validator.rb index b4bc923..938c626 100644 --- a/lib/puppet/type/es_instance_conn_validator.rb +++ b/lib/puppet/type/es_instance_conn_validator.rb @@ -1,33 +1,45 @@ Puppet::Type.newtype(:es_instance_conn_validator) do @doc = "Verify that a connection can be successfully established between a - node and the Elasticsearch instance. It could potentially be used for other - purposes such as monitoring." + node and Elasticsearch. It could potentially be used for other purposes + such as monitoring." ensurable newparam(:name, :namevar => true) do desc 'An arbitrary name used as the identity of the resource.' end newparam(:server) do - desc 'DNS name or IP address of the server where Elasticsearch instance should be running.' + desc 'DNS name or IP address of the server where Elasticsearch should be running.' defaultto 'localhost' end newparam(:port) do desc 'The port that the Elasticsearch instance should be listening on.' defaultto 9200 end newparam(:timeout) do - desc 'The max number of seconds that the validator should wait before giving up and deciding that the Elasticsearch instance is not running; defaults to 60 seconds.' + desc 'The max number of seconds that the validator should wait before giving up and deciding that Elasticsearch is not running; defaults to 60 seconds.' defaultto 60 validate do |value| # This will raise an error if the string is not convertible to an integer Integer(value) end munge do |value| Integer(value) end end + + newparam(:sleep_interval) do + desc 'The number of seconds that the validator should wait before retrying the connection to Elasticsearch; defaults to 10 seconds.' + defaultto 10 + validate do |value| + # This will raise an error if the string is not convertible to an integer + Integer(value) + end + munge do |value| + Integer(value) + end + end end diff --git a/manifests/config.pp b/manifests/config.pp index aaae820..fdd699c 100644 --- a/manifests/config.pp +++ b/manifests/config.pp @@ -1,175 +1,225 @@ # This class exists to coordinate all configuration related actions, # functionality and logical units in a central place. # # It is not intended to be used directly by external resources like node # definitions or other modules. # # @example importing this class into other classes to use its functionality: # class { 'elasticsearch::config': } # # @author Richard Pijnenburg # @author Tyler Langlois +# @author Gavin Williams # class elasticsearch::config { #### Configuration Exec { path => [ '/bin', '/usr/bin', '/usr/local/bin' ], cwd => '/', } + $init_defaults = merge( + { + 'MAX_OPEN_FILES' => '65535', + }, + $elasticsearch::init_defaults + ) + if ( $elasticsearch::ensure == 'present' ) { file { + $elasticsearch::homedir: + ensure => 'directory', + group => $elasticsearch::elasticsearch_group, + owner => $elasticsearch::elasticsearch_user; $elasticsearch::configdir: ensure => 'directory', group => $elasticsearch::elasticsearch_group, - owner => 'root', + owner => $elasticsearch::elasticsearch_user, mode => '2750'; $elasticsearch::datadir: ensure => 'directory', group => $elasticsearch::elasticsearch_group, - owner => $elasticsearch::elasticsearch_user; + owner => $elasticsearch::elasticsearch_user, + mode => '2750'; $elasticsearch::logdir: ensure => 'directory', group => $elasticsearch::elasticsearch_group, owner => $elasticsearch::elasticsearch_user, - mode => '0750'; - $elasticsearch::_plugindir: + mode => '2750'; + $elasticsearch::real_plugindir: ensure => 'directory', group => $elasticsearch::elasticsearch_group, owner => $elasticsearch::elasticsearch_user, mode => 'o+Xr'; "${elasticsearch::homedir}/lib": ensure => 'directory', group => '0', owner => 'root', recurse => true; - $elasticsearch::homedir: - ensure => 'directory', - group => $elasticsearch::elasticsearch_group, - owner => $elasticsearch::elasticsearch_user; - "${elasticsearch::homedir}/templates_import": - ensure => 'directory', - group => $elasticsearch::elasticsearch_group, - owner => $elasticsearch::elasticsearch_user, - mode => '0755'; - "${elasticsearch::homedir}/scripts": - ensure => 'directory', - group => $elasticsearch::elasticsearch_group, - owner => $elasticsearch::elasticsearch_user, - mode => '0755'; - "${elasticsearch::configdir}/scripts": - ensure => 'directory', - source => "${elasticsearch::homedir}/scripts", - mode => '0755', - recurse => 'remote', - owner => $elasticsearch::elasticsearch_user, - group => $elasticsearch::elasticsearch_group; - '/etc/elasticsearch/elasticsearch.yml': - ensure => 'absent'; - '/etc/elasticsearch/jvm.options': - ensure => 'absent'; - '/etc/elasticsearch/logging.yml': - ensure => 'absent'; - '/etc/elasticsearch/log4j2.properties': - ensure => 'absent'; } - if $elasticsearch::pid_dir { - file { $elasticsearch::pid_dir: - ensure => 'directory', - group => undef, - owner => $elasticsearch::elasticsearch_user, - recurse => true, + # Defaults file, either from file source or from hash to augeas commands + if ($elasticsearch::init_defaults_file != undef) { + file { "${elasticsearch::defaults_location}/elasticsearch": + ensure => $elasticsearch::ensure, + source => $elasticsearch::init_defaults_file, + owner => 'root', + group => $elasticsearch::elasticsearch_group, + mode => '0660', + before => Service['elasticsearch'], + notify => $elasticsearch::_notify_service, + } + } else { + augeas { "${elasticsearch::defaults_location}/elasticsearch": + incl => "${elasticsearch::defaults_location}/elasticsearch", + lens => 'Shellvars.lns', + changes => template("${module_name}/etc/sysconfig/defaults.erb"), + before => Service['elasticsearch'], + notify => $elasticsearch::_notify_service, } + } - if ($elasticsearch::service_provider == 'systemd') { - $group = $elasticsearch::elasticsearch_group - $user = $elasticsearch::elasticsearch_user - $pid_dir = $elasticsearch::pid_dir + # Generate config file + $_config = deep_implode($elasticsearch::config) - file { '/usr/lib/tmpfiles.d/elasticsearch.conf': - ensure => 'file', - content => template("${module_name}/usr/lib/tmpfiles.d/elasticsearch.conf.erb"), - group => '0', - owner => 'root', - } + # Generate SSL config + if $elasticsearch::ssl { + if ($elasticsearch::keystore_password == undef) { + fail('keystore_password required') } - } - if ($elasticsearch::service_provider == 'systemd') { - # Mask default unit (from package) - service { 'elasticsearch' : - ensure => false, - enable => 'mask', - provider => $elasticsearch::service_provider, + if ($elasticsearch::keystore_path == undef) { + $_keystore_path = "${elasticsearch::configdir}/elasticsearch.ks" + } else { + $_keystore_path = $elasticsearch::keystore_path } - } else { - service { 'elasticsearch': - ensure => false, - enable => false, + + # Set the correct xpack. settings based on ES version + if (versioncmp($elasticsearch::version, '7') >= 0) { + $_tls_config = { + 'xpack.security.http.ssl.enabled' => true, + 'xpack.security.http.ssl.keystore.path' => $_keystore_path, + 'xpack.security.http.ssl.keystore.password' => $elasticsearch::keystore_password, + 'xpack.security.transport.ssl.enabled' => true, + 'xpack.security.transport.ssl.keystore.path' => $_keystore_path, + 'xpack.security.transport.ssl.keystore.password' => $elasticsearch::keystore_password, + } + } + else { + $_tls_config = { + 'xpack.security.transport.ssl.enabled' => true, + 'xpack.security.http.ssl.enabled' => true, + 'xpack.ssl.keystore.path' => $_keystore_path, + 'xpack.ssl.keystore.password' => $elasticsearch::keystore_password, + } } - } - if $elasticsearch::defaults_location { - augeas { "${elasticsearch::defaults_location}/elasticsearch": - incl => "${elasticsearch::defaults_location}/elasticsearch", - lens => 'Shellvars.lns', - changes => [ - 'rm CONF_FILE', - 'rm CONF_DIR', - 'rm ES_PATH_CONF', - ], + # Trust CA Certificate + java_ks { 'elasticsearch_ca': + ensure => 'latest', + certificate => $elasticsearch::ca_certificate, + target => $_keystore_path, + password => $elasticsearch::keystore_password, + trustcacerts => true, } - file { "${elasticsearch::defaults_location}/elasticsearch": - ensure => 'file', - group => $elasticsearch::elasticsearch_group, - owner => $elasticsearch::elasticsearch_user, - mode => '0640'; + # Load node certificate and private key + java_ks { 'elasticsearch_node': + ensure => 'latest', + certificate => $elasticsearch::certificate, + private_key => $elasticsearch::private_key, + target => $_keystore_path, + password => $elasticsearch::keystore_password, } + } else { + $_tls_config = {} } - if $::elasticsearch::security_plugin != undef and ($::elasticsearch::security_plugin in ['shield', 'x-pack']) { - file { "${::elasticsearch::configdir}/${::elasticsearch::security_plugin}" : - ensure => 'directory', - owner => 'root', - group => $elasticsearch::elasticsearch_group, - mode => '0750', - } + # # Logging file or hash + # if ($elasticsearch::logging_file != undef) { + # $_log4j_content = undef + # } else { + # if ($elasticsearch::logging_template != undef ) { + # $_log4j_content = template($elasticsearch::logging_template) + # } else { + # $_log4j_content = template("${module_name}/etc/elasticsearch/log4j2.properties.erb") + # } + # $_logging_source = undef + # } + # file { + # "${elasticsearch::configdir}/log4j2.properties": + # ensure => file, + # content => $_log4j_content, + # source => $_logging_source, + # mode => '0644', + # notify => $elasticsearch::_notify_service, + # require => Class['elasticsearch::package'], + # before => Class['elasticsearch::service'], + # } + + # Generate Elasticsearch config + $_es_config = merge( + $elasticsearch::config, + { 'path.data' => $elasticsearch::datadir }, + { 'path.logs' => $elasticsearch::logdir }, + $_tls_config + ) + + datacat_fragment { 'main_config': + target => "${elasticsearch::configdir}/elasticsearch.yml", + data => $_es_config, + } + + datacat { "${elasticsearch::configdir}/elasticsearch.yml": + template => "${module_name}/etc/elasticsearch/elasticsearch.yml.erb", + notify => $elasticsearch::_notify_service, + require => Class['elasticsearch::package'], + owner => $elasticsearch::elasticsearch_user, + group => $elasticsearch::elasticsearch_group, + mode => '0440', } - # Define logging config file for the in-use security plugin - if $::elasticsearch::security_logging_content != undef or $::elasticsearch::security_logging_source != undef { - if $::elasticsearch::security_plugin == undef or ! ($::elasticsearch::security_plugin in ['shield', 'x-pack']) { - fail("\"${::elasticsearch::security_plugin}\" is not a valid security_plugin parameter value") + # Add any additional JVM options + $elasticsearch::jvm_options.each |String $jvm_option| { + file_line { "jvm_option_${jvm_option}": + ensure => present, + path => "${elasticsearch::configdir}/jvm.options", + line => $jvm_option, + notify => $elasticsearch::_notify_service, } + } - $_security_logging_file = $::elasticsearch::security_plugin ? { - 'shield' => 'logging.yml', - default => 'log4j2.properties' + if $elasticsearch::system_key != undef { + file { "${elasticsearch::configdir}/system_key": + ensure => 'file', + source => $elasticsearch::system_key, + mode => '0400', } + } - file { "/etc/elasticsearch/${::elasticsearch::security_plugin}/${_security_logging_file}" : - content => $::elasticsearch::security_logging_content, - source => $::elasticsearch::security_logging_source, + # Add secrets to keystore + if $elasticsearch::secrets != undef { + elasticsearch_keystore { 'elasticsearch_secrets': + configdir => $elasticsearch::configdir, + purge => $elasticsearch::purge_secrets, + settings => $elasticsearch::secrets, + notify => $::elasticsearch::_notify_service, } } } elsif ( $elasticsearch::ensure == 'absent' ) { - - file { $elasticsearch::_plugindir: + file { $elasticsearch::real_plugindir: ensure => 'absent', force => true, backup => false, } - file { "${elasticsearch::configdir}/jvm.options": - ensure => 'absent', + file { "${elasticsearch::defaults_location}/elasticsearch": + ensure => 'absent', + subscribe => Service['elasticsearch'], } - } - } diff --git a/manifests/init.pp b/manifests/init.pp index 0e4bd00..035cf1b 100644 --- a/manifests/init.pp +++ b/manifests/init.pp @@ -1,597 +1,606 @@ # Top-level Elasticsearch class which may manage installation of the # Elasticsearch package, package repository, and other # global options and parameters. # # @summary Manages the installation of Elasticsearch and related options. # # @example install Elasticsearch # class { 'elasticsearch': } # # @example removal and decommissioning # class { 'elasticsearch': # ensure => 'absent', # } # # @example install everything but disable service(s) afterwards # class { 'elasticsearch': # status => 'disabled', # } # # @param ensure # Controls if the managed resources shall be `present` or `absent`. # If set to `absent`, the managed software packages will be uninstalled, and # any traces of the packages will be purged as well as possible, possibly # including existing configuration files. # System modifications (if any) will be reverted as well as possible (e.g. # removal of created users, services, changed log settings, and so on). # This is a destructive parameter and should be used with care. # # @param api_basic_auth_password # Defines the default REST basic auth password for API authentication. # # @param api_basic_auth_username # Defines the default REST basic auth username for API authentication. # # @param api_ca_file # Path to a CA file which will be used to validate server certs when # communicating with the Elasticsearch API over HTTPS. # # @param api_ca_path # Path to a directory with CA files which will be used to validate server # certs when communicating with the Elasticsearch API over HTTPS. # # @param api_host # Default host to use when accessing Elasticsearch APIs. # # @param api_port # Default port to use when accessing Elasticsearch APIs. # # @param api_protocol # Default protocol to use when accessing Elasticsearch APIs. # # @param api_timeout # Default timeout (in seconds) to use when accessing Elasticsearch APIs. # # @param autoupgrade # If set to `true`, any managed package will be upgraded on each Puppet run # when the package provider is able to find a newer version than the present # one. The exact behavior is provider dependent (see # {package, "upgradeable"}[http://j.mp/xbxmNP] in the Puppet documentation). # +# @param ca_certificate +# Path to the trusted CA certificate to add to this node's Java keystore. +# +# @param certificate +# Path to the certificate for this node signed by the CA listed in +# ca_certificate. +# # @param config # Elasticsearch configuration hash. # # @param configdir # Directory containing the elasticsearch configuration. # Use this setting if your packages deviate from the norm (`/etc/elasticsearch`) # # @param configdir_recurselimit # Dictates how deeply the file copy recursion logic should descend when # copying files from the `configdir` to instance `configdir`s. # # @param daily_rolling_date_pattern # File pattern for the file appender log when file_rolling_type is 'dailyRollingFile'. # # @param datadir # Allows you to set the data directory of Elasticsearch. # -# @param datadir_instance_directories -# Control whether individual directories for instances will be created within -# each instance's data directory. -# # @param default_logging_level # Default logging level for Elasticsearch. # # @param defaults_location # Absolute path to directory containing init defaults file. # +# @param deprecation_logging +# Whether to enable deprecation logging. If enabled, deprecation logs will be +# saved to ${cluster.name}_deprecation.log in the Elasticsearch log folder. +# +# @param deprecation_logging_level +# Default deprecation logging level for Elasticsearch. +# # @param download_tool # Command-line invocation with which to retrieve an optional package_url. # # @param download_tool_insecure # Command-line invocation with which to retrieve an optional package_url when # certificate verification should be ignored. # # @param download_tool_verify_certificates # Whether or not to verify SSL/TLS certificates when retrieving package files # using a download tool instead of a package management provider. # # @param elasticsearch_group # The group Elasticsearch should run as. This also sets file group # permissions. # # @param elasticsearch_user # The user Elasticsearch should run as. This also sets file ownership. # # @param file_rolling_type # Configuration for the file appender rotation. It can be 'dailyRollingFile', # 'rollingFile' or 'file'. The first rotates by name, the second one by size # or third don't rotate automatically. # # @param homedir # Directory where the elasticsearch installation's files are kept (plugins, etc.) # # @param indices # Define indices via a hash. This is mainly used with Hiera's auto binding. # # @param init_defaults # Defaults file content in hash representation. # # @param init_defaults_file # Defaults file as puppet resource. # # @param init_template # Service file as a template. # -# @param instances -# Define instances via a hash. This is mainly used with Hiera's auto binding. -# # @param jvm_options # Array of options to set in jvm_options. # +# @param keystore_password +# Password to encrypt this node's Java keystore. +# +# @param keystore_path +# Custom path to the Java keystore file. This parameter is optional. +# # @param license # Optional Elasticsearch license in hash or string form. # # @param logdir # Directory that will be used for Elasticsearch logging. # # @param logging_config -# Representation of information to be included in the logging.yml file. +# Representation of information to be included in the log4j.properties file. # # @param logging_file # Instead of a hash, you may supply a `puppet://` file source for the -# logging.yml file. +# log4j.properties file. +# +# @param logging_level +# Default logging level for Elasticsearch. # # @param logging_template # Use a custom logging template - just supply the relative path, i.e. # `$module/elasticsearch/logging.yml.erb` # # @param manage_repo # Enable repo management by enabling official Elastic repositories. # # @param oss # Whether to use the purely open source Elasticsearch package distribution. # # @param package_dir # Directory where packages are downloaded to. # # @param package_dl_timeout # For http, https, and ftp downloads, you may set how long the exec resource # may take. # # @param package_name # Name Of the package to install. # # @param package_provider # Method to install the packages, currently only `package` is supported. # # @param package_url # URL of the package to download. # This can be an http, https, or ftp resource for remote packages, or a # `puppet://` resource or `file:/` for local packages # # @param pid_dir # Directory where the elasticsearch process should write out its PID. # # @param pipelines # Define pipelines via a hash. This is mainly used with Hiera's auto binding. # # @param plugindir # Directory containing elasticsearch plugins. # Use this setting if your packages deviate from the norm (/usr/share/elasticsearch/plugins) # # @param plugins # Define plugins via a hash. This is mainly used with Hiera's auto binding. # +# @param private_key +# Path to the key associated with this node's certificate. +# # @param proxy_url # For http and https downloads, you may set a proxy server to use. By default, # no proxy is used. # Format: `proto://[user:pass@]server[:port]/` # # @param purge_configdir # Purge the config directory of any unmanaged files. # # @param purge_package_dir # Purge package directory on removal # # @param purge_secrets # Whether or not keys present in the keystore will be removed if they are not # present in the specified secrets hash. # # @param repo_stage # Use stdlib stage setup for managing the repo instead of relationship # ordering. # # @param restart_on_change # Determines if the application should be automatically restarted # whenever the configuration, package, or plugins change. Enabling this # setting will cause Elasticsearch to restart whenever there is cause to # re-read configuration files, load new plugins, or start the service using an # updated/changed executable. This may be undesireable in highly available # environments. If all other restart_* parameters are left unset, the value of # `restart_on_change` is used for all other restart_*_change defaults. # # @param restart_config_change # Determines if the application should be automatically restarted # whenever the configuration changes. This includes the Elasticsearch # configuration file, any service files, and defaults files. # Disabling automatic restarts on config changes may be desired in an # environment where you need to ensure restarts occur in a controlled/rolling # manner rather than during a Puppet run. # # @param restart_package_change # Determines if the application should be automatically restarted # whenever the package (or package version) for Elasticsearch changes. # Disabling automatic restarts on package changes may be desired in an # environment where you need to ensure restarts occur in a controlled/rolling # manner rather than during a Puppet run. # # @param restart_plugin_change # Determines if the application should be automatically restarted whenever # plugins are installed or removed. # Disabling automatic restarts on plugin changes may be desired in an # environment where you need to ensure restarts occur in a controlled/rolling # manner rather than during a Puppet run. # # @param roles # Define roles via a hash. This is mainly used with Hiera's auto binding. # # @param rolling_file_max_backup_index # Max number of logs to store whern file_rolling_type is 'rollingFile' # # @param rolling_file_max_file_size # Max log file size when file_rolling_type is 'rollingFile' # # @param scripts # Define scripts via a hash. This is mainly used with Hiera's auto binding. # # @param secrets # Optional default configuration hash of key/value pairs to store in the # Elasticsearch keystore file. If unset, the keystore is left unmanaged. # # @param security_logging_content -# File content for shield/x-pack logging configuration file (will be placed -# into logging.yml or log4j2.properties file as appropriate). +# File content for x-pack logging configuration file (will be placed +# into log4j2.properties file). # # @param security_logging_source -# File source for shield/x-pack logging configuration file (will be placed -# into logging.yml or log4j2.properties file as appropriate). +# File source for x-pack logging configuration file (will be placed +# into log4j2.properties). # -# @param security_plugin -# Which security plugin will be used to manage users, roles, and -# certificates. +# @param service_name +# Elasticsearch service name # # @param service_provider # The service resource type provider to use when managing elasticsearch instances. # # @param snapshot_repositories # Define snapshot repositories via a hash. This is mainly used with Hiera's auto binding. # +# @param ssl +# Whether to manage TLS certificates. Requires the ca_certificate, +# certificate, private_key and keystore_password parameters to be set. +# # @param status # To define the status of the service. If set to `enabled`, the service will # be run and will be started at boot time. If set to `disabled`, the service # is stopped and will not be started at boot time. If set to `running`, the # service will be run but will not be started at boot time. You may use this # to start a service on the first Puppet run instead of the system startup. # If set to `unmanaged`, the service will not be started at boot time and Puppet # does not care whether the service is running or not. For example, this may # be useful if a cluster management software is used to decide when to start # the service plus assuring it is running on the desired node. # # @param system_key -# Source for the Shield/x-pack system key. Valid values are any that are +# Source for the x-pack system key. Valid values are any that are # supported for the file resource `source` parameter. # # @param systemd_service_path # Path to the directory in which to install systemd service units. # # @param templates # Define templates via a hash. This is mainly used with Hiera's auto binding. # # @param users # Define templates via a hash. This is mainly used with Hiera's auto binding. # # @param validate_tls # Enable TLS/SSL validation on API calls. # # @param version # To set the specific version you want to install. # # @author Richard Pijnenburg # @author Tyler Langlois +# @author Gavin Williams # class elasticsearch ( Enum['absent', 'present'] $ensure, Optional[String] $api_basic_auth_password, Optional[String] $api_basic_auth_username, Optional[String] $api_ca_file, Optional[String] $api_ca_path, String $api_host, Integer[0, 65535] $api_port, Enum['http', 'https'] $api_protocol, Integer $api_timeout, Boolean $autoupgrade, Hash $config, Stdlib::Absolutepath $configdir, Integer $configdir_recurselimit, String $daily_rolling_date_pattern, Elasticsearch::Multipath $datadir, - Boolean $datadir_instance_directories, - String $default_logging_level, Optional[Stdlib::Absolutepath] $defaults_location, + Boolean $deprecation_logging, + String $deprecation_logging_level, Optional[String] $download_tool, Optional[String] $download_tool_insecure, Boolean $download_tool_verify_certificates, String $elasticsearch_group, String $elasticsearch_user, Enum['dailyRollingFile', 'rollingFile', 'file'] $file_rolling_type, Stdlib::Absolutepath $homedir, Hash $indices, Hash $init_defaults, Optional[String] $init_defaults_file, String $init_template, - Hash $instances, Array[String] $jvm_options, Optional[Variant[String, Hash]] $license, Stdlib::Absolutepath $logdir, Hash $logging_config, Optional[String] $logging_file, + String $logging_level, Optional[String] $logging_template, Boolean $manage_repo, Boolean $oss, Stdlib::Absolutepath $package_dir, Integer $package_dl_timeout, String $package_name, Enum['package'] $package_provider, Optional[String] $package_url, Optional[Stdlib::Absolutepath] $pid_dir, Hash $pipelines, Optional[Stdlib::Absolutepath] $plugindir, Hash $plugins, Optional[Stdlib::HTTPUrl] $proxy_url, Boolean $purge_configdir, Boolean $purge_package_dir, Boolean $purge_secrets, Variant[Boolean, String] $repo_stage, Boolean $restart_on_change, Hash $roles, Integer $rolling_file_max_backup_index, String $rolling_file_max_file_size, Hash $scripts, Optional[Hash] $secrets, Optional[String] $security_logging_content, Optional[String] $security_logging_source, - Optional[Enum['shield', 'x-pack']] $security_plugin, + String $service_name, Enum['init', 'openbsd', 'openrc', 'systemd'] $service_provider, Hash $snapshot_repositories, + Boolean $ssl, Elasticsearch::Status $status, Optional[String] $system_key, Stdlib::Absolutepath $systemd_service_path, Hash $templates, Hash $users, Boolean $validate_tls, Variant[String, Boolean] $version, - Boolean $restart_config_change = $restart_on_change, - Boolean $restart_package_change = $restart_on_change, - Boolean $restart_plugin_change = $restart_on_change, + Optional[Stdlib::Absolutepath] $ca_certificate = undef, + Optional[Stdlib::Absolutepath] $certificate = undef, + String $default_logging_level = $logging_level, + Optional[String] $keystore_password = undef, + Optional[Stdlib::Absolutepath] $keystore_path = undef, + Optional[Stdlib::Absolutepath] $private_key = undef, + Boolean $restart_config_change = $restart_on_change, + Boolean $restart_package_change = $restart_on_change, + Boolean $restart_plugin_change = $restart_on_change, ) { #### Validate parameters if ($package_url != undef and $version != false) { fail('Unable to set the version number when using package_url option.') } if ($version != false) { case $facts['os']['family'] { 'RedHat', 'Linux', 'Suse': { if ($version =~ /.+-\d/) { $pkg_version = $version } else { $pkg_version = "${version}-1" } } default: { $pkg_version = $version } } } # This value serves as an unchanging default for platforms as a default for # init scripts to fallback on. $_datadir_default = $facts['kernel'] ? { 'Linux' => '/var/lib/elasticsearch', 'OpenBSD' => '/var/elasticsearch/data', default => undef, } # The OSS package distribution's package appends `-oss` to the end of the # canonical package name. $_package_name = $oss ? { true => "${package_name}-oss", default => $package_name, } # Set the plugin path variable for use later in the module. if $plugindir == undef { - $_plugindir = "${homedir}/plugins" + $real_plugindir = "${homedir}/plugins" } else { - $_plugindir = $plugindir + $real_plugindir = $plugindir + } + + # Should we restart Elasticsearch on config change? + $_notify_service = $elasticsearch::restart_config_change ? { + true => Service[$elasticsearch::service_name], + false => undef, } #### Manage actions contain elasticsearch::package contain elasticsearch::config + contain elasticsearch::service - create_resources('elasticsearch::index', $::elasticsearch::indices) - create_resources('elasticsearch::instance', $::elasticsearch::instances) - create_resources('elasticsearch::pipeline', $::elasticsearch::pipelines) - create_resources('elasticsearch::plugin', $::elasticsearch::plugins) - create_resources('elasticsearch::role', $::elasticsearch::roles) - create_resources('elasticsearch::script', $::elasticsearch::scripts) - create_resources('elasticsearch::snapshot_repository', $::elasticsearch::snapshot_repositories) - create_resources('elasticsearch::template', $::elasticsearch::templates) - create_resources('elasticsearch::user', $::elasticsearch::users) + create_resources('elasticsearch::index', $elasticsearch::indices) + create_resources('elasticsearch::pipeline', $elasticsearch::pipelines) + create_resources('elasticsearch::plugin', $elasticsearch::plugins) + create_resources('elasticsearch::role', $elasticsearch::roles) + create_resources('elasticsearch::script', $elasticsearch::scripts) + create_resources('elasticsearch::snapshot_repository', $elasticsearch::snapshot_repositories) + create_resources('elasticsearch::template', $elasticsearch::templates) + create_resources('elasticsearch::user', $elasticsearch::users) if ($manage_repo == true) { if ($repo_stage == false) { # Use normal relationship ordering contain elastic_stack::repo Class['elastic_stack::repo'] -> Class['elasticsearch::package'] } else { # Use staging for ordering if !(defined(Stage[$repo_stage])) { stage { $repo_stage: before => Stage['main'] } } include elastic_stack::repo Class<|title == 'elastic_stack::repo'|>{ stage => $repo_stage, } } } if ($license != undef) { contain elasticsearch::license } #### Manage relationships # # Note that many of these overly verbose declarations work around # https://tickets.puppetlabs.com/browse/PUP-1410 # which means clean arrow order chaining won't work if someone, say, # doesn't declare any plugins. # # forgive me for what you're about to see if defined(Class['java']) { Class['java'] -> Class['elasticsearch::config'] } if $ensure == 'present' { - # Installation and configuration + # Installation, configuration and service Class['elasticsearch::package'] -> Class['elasticsearch::config'] + if $restart_config_change { + Class['elasticsearch::config'] ~> Class['elasticsearch::service'] + } else { + Class['elasticsearch::config'] -> Class['elasticsearch::service'] + } + # Top-level ordering bindings for resources. Class['elasticsearch::config'] -> Elasticsearch::Plugin <| ensure == 'present' or ensure == 'installed' |> Elasticsearch::Plugin <| ensure == 'absent' |> -> Class['elasticsearch::config'] Class['elasticsearch::config'] - -> Elasticsearch::Instance <| |> - Class['elasticsearch::config'] - -> Elasticsearch::User <| |> - Class['elasticsearch::config'] - -> Elasticsearch::Role <| |> + -> Elasticsearch::User <| ensure == 'present' |> + # Elasticsearch::User <| ensure == 'absent' |> + # -> Class['elasticsearch::config'] + # Class['elasticsearch::config'] + # -> Elasticsearch::Role <| |> Class['elasticsearch::config'] -> Elasticsearch::Template <| |> Class['elasticsearch::config'] -> Elasticsearch::Pipeline <| |> Class['elasticsearch::config'] -> Elasticsearch::Index <| |> Class['elasticsearch::config'] -> Elasticsearch::Snapshot_repository <| |> } else { # Absent; remove configuration before the package. Class['elasticsearch::config'] -> Class['elasticsearch::package'] # Top-level ordering bindings for resources. Elasticsearch::Plugin <| |> -> Class['elasticsearch::config'] - Elasticsearch::Instance <| |> - -> Class['elasticsearch::config'] Elasticsearch::User <| |> -> Class['elasticsearch::config'] Elasticsearch::Role <| |> -> Class['elasticsearch::config'] Elasticsearch::Template <| |> -> Class['elasticsearch::config'] Elasticsearch::Pipeline <| |> -> Class['elasticsearch::config'] Elasticsearch::Index <| |> -> Class['elasticsearch::config'] Elasticsearch::Snapshot_repository <| |> -> Class['elasticsearch::config'] } - # Install plugins before managing instances or users/roles - Elasticsearch::Plugin <| ensure == 'present' or ensure == 'installed' |> - -> Elasticsearch::Instance <| |> + # Install plugins before managing users/roles Elasticsearch::Plugin <| ensure == 'present' or ensure == 'installed' |> -> Elasticsearch::User <| |> Elasticsearch::Plugin <| ensure == 'present' or ensure == 'installed' |> -> Elasticsearch::Role <| |> # Remove plugins after managing users/roles Elasticsearch::User <| |> -> Elasticsearch::Plugin <| ensure == 'absent' |> Elasticsearch::Role <| |> -> Elasticsearch::Plugin <| ensure == 'absent' |> # Ensure roles are defined before managing users that reference roles Elasticsearch::Role <| |> -> Elasticsearch::User <| ensure == 'present' |> # Ensure users are removed before referenced roles are managed Elasticsearch::User <| ensure == 'absent' |> -> Elasticsearch::Role <| |> # Ensure users and roles are managed before calling out to REST resources Elasticsearch::Role <| |> -> Elasticsearch::Template <| |> Elasticsearch::User <| |> -> Elasticsearch::Template <| |> Elasticsearch::Role <| |> -> Elasticsearch::Pipeline <| |> Elasticsearch::User <| |> -> Elasticsearch::Pipeline <| |> Elasticsearch::Role <| |> -> Elasticsearch::Index <| |> Elasticsearch::User <| |> -> Elasticsearch::Index <| |> Elasticsearch::Role <| |> -> Elasticsearch::Snapshot_repository <| |> Elasticsearch::User <| |> -> Elasticsearch::Snapshot_repository <| |> # Ensure that any command-line based user changes are performed before the # file is modified Elasticsearch_user <| |> -> Elasticsearch_user_file <| |> - - # Manage users/roles before instances (req'd to keep dir in sync) - Elasticsearch::Role <| |> - -> Elasticsearch::Instance <| |> - Elasticsearch::User <| |> - -> Elasticsearch::Instance <| |> - - # Ensure instances are started before managing REST resources - Elasticsearch::Instance <| ensure == 'present' |> - -> Elasticsearch::Template <| |> - Elasticsearch::Instance <| ensure == 'present' |> - -> Elasticsearch::Pipeline <| |> - Elasticsearch::Instance <| ensure == 'present' |> - -> Elasticsearch::Index <| |> - Elasticsearch::Instance <| ensure == 'present' |> - -> Elasticsearch::Snapshot_repository <| |> - # Ensure instances are stopped after managing REST resources - Elasticsearch::Template <| |> - -> Elasticsearch::Instance <| ensure == 'absent' |> - Elasticsearch::Pipeline <| |> - -> Elasticsearch::Instance <| ensure == 'absent' |> - Elasticsearch::Index <| |> - -> Elasticsearch::Instance <| ensure == 'absent' |> - Elasticsearch::Snapshot_repository <| |> - -> Elasticsearch::Instance <| ensure == 'absent' |> - - # Ensure scripts are installed before copying them to configuration directory - Elasticsearch::Script <| |> - -> File["${configdir}/scripts"] } diff --git a/manifests/instance.pp b/manifests/instance.pp deleted file mode 100644 index 813136f..0000000 --- a/manifests/instance.pp +++ /dev/null @@ -1,537 +0,0 @@ -# This define allows you to create or remove an elasticsearch instance -# -# @param ensure -# Controls if the managed resources shall be `present` or `absent`. -# If set to `absent`, the managed software packages will be uninstalled, and -# any traces of the packages will be purged as well as possible, possibly -# including existing configuration files. -# System modifications (if any) will be reverted as well as possible (e.g. -# removal of created users, services, changed log settings, and so on). -# This is a destructive parameter and should be used with care. -# -# @param ca_certificate -# Path to the trusted CA certificate to add to this node's java keystore. -# -# @param certificate -# Path to the certificate for this node signed by the CA listed in -# ca_certificate. -# -# @param config -# Elasticsearch configuration hash. -# -# @param configdir -# Path to directory containing the elasticsearch configuration. -# Use this setting if your packages deviate from the norm (/etc/elasticsearch). -# -# @param configdir_recurselimit -# Dictates how deeply the file copy recursion logic should descend when -# copying files from the `elasticsearch::configdir` to instance `configdir`s. -# -# @param daily_rolling_date_pattern -# File pattern for the file appender log when file_rolling_type is `dailyRollingFile` -# -# @param datadir -# Allows you to set the data directory of Elasticsearch -# -# @param datadir_instance_directories -# Control whether individual directories for instances will be created within -# each instance's data directory. -# -# @param deprecation_logging -# Wheter to enable deprecation logging. If enabled, deprecation logs will be -# saved to ${cluster.name}_deprecation.log in the elastic search log folder. -# -# @param deprecation_logging_level -# Default deprecation logging level for Elasticsearch. -# -# @param file_rolling_type -# Configuration for the file appender rotation. It can be `dailyRollingFile` -# or `rollingFile`. The first rotates by name, and the second one by size. -# -# @param init_defaults -# Defaults file content in hash representation. -# -# @param init_defaults_file -# Defaults file as puppet resource. -# -# @param init_template -# Service file as a template -# -# @param jvm_options -# Array of options to set in jvm_options. -# -# @param keystore_password -# Password to encrypt this node's Java keystore. -# -# @param keystore_path -# Custom path to the java keystore file. This parameter is optional. -# -# @param logdir -# Log directory for this instance. -# -# @param logging_config -# Hash representation of information you want in the logging.yml file. -# -# @param logging_file -# Instead of a hash you can supply a puppet:// file source for the logging.yml file -# -# @param logging_level -# Default logging level for Elasticsearch. -# -# @param logging_template -# Use a custom logging template - just supply the reative path, ie -# $module_name/elasticsearch/logging.yml.erb -# -# @param private_key -# Path to the key associated with this node's certificate. -# -# @param purge_secrets -# Whether or not keys present in the keystore will be removed if they are not -# present in the specified secrets hash. -# -# @param rolling_file_max_backup_index -# Max number of logs to store whern file_rolling_type is `rollingFile` -# -# @param rolling_file_max_file_size -# Max log file size when file_rolling_type is `rollingFile` -# -# @param secrets -# Optional configuration hash of key/value pairs to store in the instance's -# Elasticsearch keystore file. If unset, the keystore is left unmanaged. -# -# @param security_plugin -# Which security plugin will be used to manage users, roles, and -# certificates. Inherited from top-level Elasticsearch class. -# -# @param service_flags -# Service flags used for the OpenBSD service configuration, defaults to undef. -# -# @param ssl -# Whether to manage TLS certificates for Shield. Requires the ca_certificate, -# certificate, private_key and keystore_password parameters to be set. -# -# @param status -# To define the status of the service. If set to `enabled`, the service will -# be run and will be started at boot time. If set to `disabled`, the service -# is stopped and will not be started at boot time. If set to `running`, the -# service will be run but will not be started at boot time. You may use this -# to start a service on the first Puppet run instead of the system startup. -# If set to `unmanaged`, the service will not be started at boot time and Puppet -# does not care whether the service is running or not. For example, this may -# be useful if a cluster management software is used to decide when to start -# the service plus assuring it is running on the desired node. -# -# @param system_key -# Source for the Shield system key. Valid values are any that are -# supported for the file resource `source` parameter. -# -# @author Richard Pijnenburg -# @author Tyler Langlois -# -define elasticsearch::instance ( - Enum['absent', 'present'] $ensure = $elasticsearch::ensure, - Optional[Stdlib::Absolutepath] $ca_certificate = undef, - Optional[Stdlib::Absolutepath] $certificate = undef, - Optional[Hash] $config = undef, - Stdlib::Absolutepath $configdir = "${elasticsearch::configdir}/${name}", - Integer $configdir_recurselimit = $elasticsearch::configdir_recurselimit, - String $daily_rolling_date_pattern = $elasticsearch::daily_rolling_date_pattern, - Optional[Elasticsearch::Multipath] $datadir = undef, - Boolean $datadir_instance_directories = $elasticsearch::datadir_instance_directories, - Boolean $deprecation_logging = false, - String $deprecation_logging_level = 'DEBUG', - String $file_rolling_type = $elasticsearch::file_rolling_type, - Hash $init_defaults = {}, - Optional[Stdlib::Absolutepath] $init_defaults_file = undef, - String $init_template = $elasticsearch::init_template, - Array[String] $jvm_options = $elasticsearch::jvm_options, - Optional[String] $keystore_password = undef, - Optional[Stdlib::Absolutepath] $keystore_path = undef, - Stdlib::Absolutepath $logdir = "${elasticsearch::logdir}/${name}", - Hash $logging_config = {}, - Optional[String] $logging_file = undef, - String $logging_level = $elasticsearch::default_logging_level, - Optional[String] $logging_template = undef, - Optional[Stdlib::Absolutepath] $private_key = undef, - Boolean $purge_secrets = $elasticsearch::purge_secrets, - Integer $rolling_file_max_backup_index = $elasticsearch::rolling_file_max_backup_index, - String $rolling_file_max_file_size = $elasticsearch::rolling_file_max_file_size, - Optional[Hash] $secrets = undef, - Optional[Enum['shield', 'x-pack']] $security_plugin = $elasticsearch::security_plugin, - Optional[String] $service_flags = undef, - Boolean $ssl = false, - Elasticsearch::Status $status = $elasticsearch::status, - Optional[String] $system_key = $elasticsearch::system_key, -) { - - File { - owner => $elasticsearch::elasticsearch_user, - group => $elasticsearch::elasticsearch_group, - } - - Exec { - path => [ '/bin', '/usr/bin', '/usr/local/bin' ], - cwd => '/', - } - - # ensure - if ! ($ensure in [ 'present', 'absent' ]) { - fail("\"${ensure}\" is not a valid ensure parameter value") - } - - if $ssl or ($system_key != undef) { - if $security_plugin == undef or ! ($security_plugin in ['shield', 'x-pack']) { - fail("\"${security_plugin}\" is not a valid security_plugin parameter value") - } - } - - $notify_service = $elasticsearch::restart_config_change ? { - true => Elasticsearch::Service[$name], - false => undef, - } - - if ($ensure == 'present') { - - # Configuration hash - if ($config == undef) { - $instance_config = {} - } else { - $instance_config = deep_implode($config) - } - - if(has_key($instance_config, 'node.name')) { - $instance_node_name = {} - } else { - $instance_node_name = { 'node.name' => "${::hostname}-${name}" } - } - - # String or array for data dir(s) - if ($datadir == undef) { - if ($datadir_instance_directories) { - if $elasticsearch::datadir =~ Array { - $instance_datadir = array_suffix($elasticsearch::datadir, "/${name}") - } else { - $instance_datadir = "${elasticsearch::datadir}/${name}" - } - } else { - $instance_datadir = $elasticsearch::datadir - } - } else { - $instance_datadir = $datadir - } - - # Logging file or hash - if ($logging_file != undef) { - $logging_source = $logging_file - $logging_content = undef - $_log4j_content = undef - } elsif ($elasticsearch::logging_file != undef) { - $logging_source = $elasticsearch::logging_file - $logging_content = undef - $_log4j_content = undef - } else { - - $main_logging_config = deep_implode($elasticsearch::logging_config) - $instance_logging_config = deep_implode($logging_config) - - $logging_hash = merge( - # Shipped defaults - { - 'action' => 'DEBUG', - 'com.amazonaws' => 'WARN', - 'index.search.slowlog' => 'TRACE, index_search_slow_log_file', - 'index.indexing.slowlog' => 'TRACE, index_indexing_slow_log_file', - }, - $main_logging_config, - $instance_logging_config - ) - if ($logging_template != undef ) { - $logging_content = template($logging_template) - $_log4j_content = template($logging_template) - } elsif ($elasticsearch::logging_template != undef) { - $logging_content = template($elasticsearch::logging_template) - $_log4j_content = template($elasticsearch::logging_template) - } else { - $logging_content = template("${module_name}/etc/elasticsearch/logging.yml.erb") - $_log4j_content = template("${module_name}/etc/elasticsearch/log4j2.properties.erb") - } - $logging_source = undef - } - - $main_config = deep_implode($elasticsearch::config) - - $instance_datadir_config = { 'path.data' => $instance_datadir } - - if $instance_datadir =~ Array { - $dirs = join($instance_datadir, ' ') - } else { - $dirs = $instance_datadir - } - - if $ssl { - if ($keystore_password == undef) { - fail('keystore_password required') - } - - if ($keystore_path == undef) { - $_keystore_path = "${configdir}/${security_plugin}/${name}.ks" - } else { - $_keystore_path = $keystore_path - } - - if $security_plugin == 'shield' { - $tls_config = { - 'shield.transport.ssl' => true, - 'shield.http.ssl' => true, - 'shield.ssl.keystore.path' => $_keystore_path, - 'shield.ssl.keystore.password' => $keystore_password, - } - } elsif $security_plugin == 'x-pack' { - $tls_config = { - 'xpack.security.transport.ssl.enabled' => true, - 'xpack.security.http.ssl.enabled' => true, - 'xpack.ssl.keystore.path' => $_keystore_path, - 'xpack.ssl.keystore.password' => $keystore_password, - } - } - - # Trust CA Certificate - java_ks { "elasticsearch_instance_${name}_keystore_ca": - ensure => 'latest', - certificate => $ca_certificate, - target => $_keystore_path, - password => $keystore_password, - trustcacerts => true, - } - - # Load node certificate and private key - java_ks { "elasticsearch_instance_${name}_keystore_node": - ensure => 'latest', - certificate => $certificate, - private_key => $private_key, - target => $_keystore_path, - password => $keystore_password, - } - } else { $tls_config = {} } - - exec { "mkdir_logdir_elasticsearch_${name}": - command => "mkdir -p ${logdir}", - creates => $logdir, - require => Class['elasticsearch::package'], - before => File[$logdir], - } - - file { $logdir: - ensure => 'directory', - group => $elasticsearch::elasticsearch_group, - owner => $elasticsearch::elasticsearch_user, - mode => '0750', - require => Class['elasticsearch::package'], - before => Elasticsearch::Service[$name], - } - - if ($datadir_instance_directories) { - exec { "mkdir_datadir_elasticsearch_${name}": - command => "mkdir -p ${dirs}", - creates => $instance_datadir, - require => Class['elasticsearch::package'], - before => Elasticsearch::Service[$name], - } - -> file { $instance_datadir: - ensure => 'directory', - owner => $elasticsearch::elasticsearch_user, - group => undef, - mode => '0755', - require => Class['elasticsearch::package'], - before => Elasticsearch::Service[$name], - } - } - - exec { "mkdir_configdir_elasticsearch_${name}": - command => "mkdir -p ${configdir}", - creates => $elasticsearch::configdir, - require => Class['elasticsearch::package'], - before => Elasticsearch::Service[$name], - } - -> file { $configdir: - ensure => 'directory', - # Copy files from the stock configuration directory _into_ the instance - # configuration directory. This lets us pull in miscellaneous files that - # utilities may create (like X-Pack user/role files) into instance - # directories without explicitly naming them, since we can't predict all the - # files that plugins may create/manage. - # - # Special care is needed to avoid copying in _some_ directories/files to - # avoid overwriting instance-specific configuration files or other instance - # directories. - ignore => [ - "${elasticsearch::configdir}/elasticsearch.yml", - "${elasticsearch::configdir}/jvm.options", - "${elasticsearch::configdir}/logging.yml", - "${elasticsearch::configdir}/log4j2.properties", - ], - recurse => 'remote', - recurselimit => $configdir_recurselimit, - source => $elasticsearch::configdir, - purge => $elasticsearch::purge_configdir, - force => $elasticsearch::purge_configdir, - tag => [ - 'elasticsearch_instance_configdir', - ], - require => Class['elasticsearch::package'], - before => Elasticsearch::Service[$name], - notify => $notify_service, - } - - # Do _not_ copy in instance directories. This avoids a) recursing - # indefinitely by copying our own instance directory and b) copying in any - # other potential instance directories. - File <| tag == 'elasticsearch_instance_configdir' |> { - ignore +> $name - } - - file { "${configdir}/jvm.options": - before => Elasticsearch::Service[$name], - content => template("${module_name}/etc/elasticsearch/jvm.options.erb"), - group => $elasticsearch::elasticsearch_group, - notify => $notify_service, - owner => $elasticsearch::elasticsearch_user, - } - - file { - "${configdir}/logging.yml": - ensure => file, - content => $logging_content, - source => $logging_source, - mode => '0644', - notify => $notify_service, - require => Class['elasticsearch::package'], - before => Elasticsearch::Service[$name]; - "${configdir}/log4j2.properties": - ensure => file, - content => $_log4j_content, - source => $logging_source, - mode => '0644', - notify => $notify_service, - require => Class['elasticsearch::package'], - before => Elasticsearch::Service[$name]; - } - - if $security_plugin != undef { - file { "${configdir}/${security_plugin}": - ensure => 'directory', - mode => '0750', - source => "${elasticsearch::configdir}/${security_plugin}", - recurse => 'remote', - owner => 'root', - group => $elasticsearch::elasticsearch_group, - before => Elasticsearch::Service[$name], - notify => $notify_service, - } - } - - if $system_key != undef { - file { "${configdir}/${security_plugin}/system_key": - ensure => 'file', - source => $system_key, - mode => '0400', - before => Elasticsearch::Service[$name], - require => File["${configdir}/${security_plugin}"], - } - } - - # build up new config - $instance_conf = merge( - $main_config, - $instance_node_name, - $instance_datadir_config, - { 'path.logs' => $logdir }, - $tls_config, - $instance_config - ) - - # defaults file content - # ensure user did not provide both init_defaults and init_defaults_file - if ((!empty($init_defaults)) and ($init_defaults_file != undef)) { - fail ('Only one of $init_defaults and $init_defaults_file should be defined') - } - - $init_defaults_new = merge( - { 'DATA_DIR' => $elasticsearch::_datadir_default }, - $elasticsearch::init_defaults, - { - 'CONF_DIR' => $configdir, - 'ES_HOME' => $elasticsearch::homedir, - 'ES_JVM_OPTIONS' => "${configdir}/jvm.options", - 'ES_PATH_CONF' => $configdir, - 'LOG_DIR' => $logdir, - }, - $init_defaults - ) - - $user = $elasticsearch::elasticsearch_user - $group = $elasticsearch::elasticsearch_group - - datacat_fragment { "main_config_${name}": - target => "${configdir}/elasticsearch.yml", - data => $instance_conf, - } - - datacat { "${configdir}/elasticsearch.yml": - template => "${module_name}/etc/elasticsearch/elasticsearch.yml.erb", - notify => $notify_service, - require => Class['elasticsearch::package'], - owner => $elasticsearch::elasticsearch_user, - group => $elasticsearch::elasticsearch_group, - mode => '0440', - } - - if ($elasticsearch::secrets != undef or $secrets != undef) { - if ($elasticsearch::secrets != undef) { - $main_secrets = $elasticsearch::secrets - } else { - $main_secrets = {} - } - - if ($secrets != undef) { - $instance_secrets = $secrets - } else { - $instance_secrets = {} - } - - elasticsearch_keystore { $name : - configdir => $elasticsearch::configdir, - purge => $purge_secrets, - settings => merge($main_secrets, $instance_secrets), - notify => $notify_service, - } - } - - $require_service = Class['elasticsearch::package'] - $before_service = undef - - } else { - - file { $configdir: - ensure => 'absent', - recurse => true, - force => true, - } - - $require_service = undef - $before_service = File[$configdir] - - $init_defaults_new = {} - } - - elasticsearch::service { $name: - ensure => $ensure, - status => $status, - service_flags => $service_flags, - init_defaults => $init_defaults_new, - init_defaults_file => $init_defaults_file, - init_template => $init_template, - require => $require_service, - before => $before_service, - } -} diff --git a/manifests/license.pp b/manifests/license.pp index 320e0c9..866b857 100644 --- a/manifests/license.pp +++ b/manifests/license.pp @@ -1,104 +1,88 @@ # A defined type to control Elasticsearch licenses. # # @param ensure # Controls whether the named pipeline should be present or absent in # the cluster. # # @param api_basic_auth_password # HTTP basic auth password to use when communicating over the Elasticsearch # API. # # @param api_basic_auth_username # HTTP basic auth username to use when communicating over the Elasticsearch # API. # # @param api_ca_file # Path to a CA file which will be used to validate server certs when # communicating with the Elasticsearch API over HTTPS. # # @param api_ca_path # Path to a directory with CA files which will be used to validate server # certs when communicating with the Elasticsearch API over HTTPS. # # @param api_host # Host name or IP address of the ES instance to connect to. # # @param api_port # Port number of the ES instance to connect to # # @param api_protocol # Protocol that should be used to connect to the Elasticsearch API. # # @param api_timeout # Timeout period (in seconds) for the Elasticsearch API. # # @param content # License content in hash or string form. # -# @param security_plugin -# Which security plugin will be used to manage users, roles, and -# certificates. -# # @param validate_tls # Determines whether the validity of SSL/TLS certificates received from the # Elasticsearch API should be verified or ignored. # # @author Tyler Langlois # class elasticsearch::license ( - Enum['absent', 'present'] $ensure = 'present', - Optional[String] $api_basic_auth_password = $elasticsearch::api_basic_auth_password, - Optional[String] $api_basic_auth_username = $elasticsearch::api_basic_auth_username, - Optional[Stdlib::Absolutepath] $api_ca_file = $elasticsearch::api_ca_file, - Optional[Stdlib::Absolutepath] $api_ca_path = $elasticsearch::api_ca_path, - String $api_host = $elasticsearch::api_host, - Integer[0, 65535] $api_port = $elasticsearch::api_port, - Enum['http', 'https'] $api_protocol = $elasticsearch::api_protocol, - Integer $api_timeout = $elasticsearch::api_timeout, - Variant[String, Hash] $content = $elasticsearch::license, - Optional[Enum['shield', 'x-pack']] $security_plugin = $elasticsearch::security_plugin, - Boolean $validate_tls = $elasticsearch::validate_tls, + Enum['absent', 'present'] $ensure = 'present', + Optional[String] $api_basic_auth_password = $elasticsearch::api_basic_auth_password, + Optional[String] $api_basic_auth_username = $elasticsearch::api_basic_auth_username, + Optional[Stdlib::Absolutepath] $api_ca_file = $elasticsearch::api_ca_file, + Optional[Stdlib::Absolutepath] $api_ca_path = $elasticsearch::api_ca_path, + String $api_host = $elasticsearch::api_host, + Integer[0, 65535] $api_port = $elasticsearch::api_port, + Enum['http', 'https'] $api_protocol = $elasticsearch::api_protocol, + Integer $api_timeout = $elasticsearch::api_timeout, + Variant[String, Hash] $content = $elasticsearch::license, + Boolean $validate_tls = $elasticsearch::validate_tls, ) { if $content =~ String { $_content = parsejson($content) } else { $_content = $content } - $_security_plugin = regsubst($security_plugin, '-', '') - if $ensure == 'present' { - Elasticsearch::Instance <| ensure == 'present' |> - -> Class['elasticsearch::license'] - Class['elasticsearch::license'] - -> Elasticsearch::Instance <| ensure == 'absent' |> - Elasticsearch::Role <| |> -> Class['elasticsearch::license'] Elasticsearch::User <| |> -> Class['elasticsearch::license'] - } else { - Class['elasticsearch::license'] - -> Elasticsearch::Instance <| |> } es_instance_conn_validator { 'license-conn-validator': server => $api_host, port => $api_port, timeout => $api_timeout, } - -> elasticsearch_license { $_security_plugin: + -> elasticsearch_license { 'xpack': ensure => $ensure, content => $_content, protocol => $api_protocol, host => $api_host, port => $api_port, timeout => $api_timeout, username => $api_basic_auth_username, password => $api_basic_auth_password, ca_file => $api_ca_file, ca_path => $api_ca_path, validate_tls => $validate_tls, - provider => $_security_plugin, } } diff --git a/manifests/package.pp b/manifests/package.pp index 109d48a..2b2d4b8 100644 --- a/manifests/package.pp +++ b/manifests/package.pp @@ -1,193 +1,192 @@ # This class exists to coordinate all software package management related # actions, functionality and logical units in a central place. # # It is not intended to be used directly by external resources like node # definitions or other modules. # # @example importing this class by other classes to use its functionality: # class { 'elasticsearch::package': } # # @author Richard Pijnenburg # @author Tyler Langlois # class elasticsearch::package { Exec { path => [ '/bin', '/usr/bin', '/usr/local/bin' ], cwd => '/', tries => 3, try_sleep => 10, } if $elasticsearch::ensure == 'present' { if $elasticsearch::restart_package_change { - Package['elasticsearch'] ~> Elasticsearch::Service <| |> + Package['elasticsearch'] ~> Class['elasticsearch::service'] } Package['elasticsearch'] ~> Exec['remove_plugin_dir'] # Create directory to place the package file $package_dir = $elasticsearch::package_dir exec { 'create_package_dir_elasticsearch': cwd => '/', path => ['/usr/bin', '/bin'], command => "mkdir -p ${package_dir}", creates => $package_dir, } file { $package_dir: ensure => 'directory', purge => $elasticsearch::purge_package_dir, force => $elasticsearch::purge_package_dir, backup => false, require => Exec['create_package_dir_elasticsearch'], } # Check if we want to install a specific version or not if $elasticsearch::version == false { $package_ensure = $elasticsearch::autoupgrade ? { true => 'latest', false => 'present', } } else { # install specific version $package_ensure = $elasticsearch::pkg_version } # action if ($elasticsearch::package_url != undef) { case $elasticsearch::package_provider { 'package': { $before = Package['elasticsearch'] } default: { fail("software provider \"${elasticsearch::package_provider}\".") } } - $filename_array = split($elasticsearch::package_url, '/') $basefilename = $filename_array[-1] $source_array = split($elasticsearch::package_url, ':') $protocol_type = $source_array[0] $ext_array = split($basefilename, '\.') $ext = $ext_array[-1] $pkg_source = "${package_dir}/${basefilename}" case $protocol_type { 'puppet': { file { $pkg_source: ensure => file, source => $elasticsearch::package_url, require => File[$package_dir], backup => false, before => $before, } } 'ftp', 'https', 'http': { if $elasticsearch::proxy_url != undef { $exec_environment = [ 'use_proxy=yes', "http_proxy=${elasticsearch::proxy_url}", "https_proxy=${elasticsearch::proxy_url}", ] } else { $exec_environment = [] } case $elasticsearch::download_tool { String: { $_download_command = if $elasticsearch::download_tool_verify_certificates { $elasticsearch::download_tool } else { $elasticsearch::download_tool_insecure } exec { 'download_package_elasticsearch': command => "${_download_command} ${pkg_source} ${elasticsearch::package_url} 2> /dev/null", creates => $pkg_source, environment => $exec_environment, timeout => $elasticsearch::package_dl_timeout, require => File[$package_dir], before => $before, } } default: { fail("no \$elasticsearch::download_tool defined for ${facts['os']['family']}") } } } 'file': { $source_path = $source_array[1] file { $pkg_source: ensure => file, source => $source_path, require => File[$package_dir], backup => false, before => $before, } } default: { fail("Protocol must be puppet, file, http, https, or ftp. You have given \"${protocol_type}\"") } } if ($elasticsearch::package_provider == 'package') { case $ext { 'deb': { Package { provider => 'dpkg', source => $pkg_source } } 'rpm': { Package { provider => 'rpm', source => $pkg_source } } default: { fail("Unknown file extention \"${ext}\".") } } } } else { if ($elasticsearch::manage_repo and $facts['os']['family'] == 'Debian') { Class['apt::update'] -> Package['elasticsearch'] } } # Package removal } else { if ($facts['os']['family'] == 'Suse') { Package { provider => 'rpm', } $package_ensure = 'absent' } else { $package_ensure = 'purged' } } if ($elasticsearch::package_provider == 'package') { package { 'elasticsearch': ensure => $package_ensure, name => $elasticsearch::_package_name, } exec { 'remove_plugin_dir': refreshonly => true, - command => "rm -rf ${::elasticsearch::_plugindir}", + command => "rm -rf ${elasticsearch::real_plugindir}", } } else { fail("\"${elasticsearch::package_provider}\" is not supported") } } diff --git a/manifests/plugin.pp b/manifests/plugin.pp index cb25516..7f219c8 100644 --- a/manifests/plugin.pp +++ b/manifests/plugin.pp @@ -1,152 +1,146 @@ # This define allows you to install arbitrary Elasticsearch plugins # either by using the default repositories or by specifying an URL # # @example install from official repository # elasticsearch::plugin {'mobz/elasticsearch-head': module_dir => 'head'} # # @example installation using a custom URL # elasticsearch::plugin { 'elasticsearch-jetty': # module_dir => 'elasticsearch-jetty', # url => 'https://oss-es-plugins.s3.amazonaws.com/elasticsearch-jetty/elasticsearch-jetty-0.90.0.zip', # } # # @param ensure # Whether the plugin will be installed or removed. # Set to 'absent' to ensure a plugin is not installed # # @param configdir # Path to the elasticsearch configuration directory (ES_PATH_CONF) # to which the plugin should be installed. # -# @param instances -# Specify all the instances related -# # @param java_opts # Array of Java options to be passed to `ES_JAVA_OPTS` # # @param java_home # Path to JAVA_HOME, if Java is installed in a non-standard location. # # @param module_dir # Directory name where the module has been installed # This is automatically generated based on the module name # Specify a value here to override the auto generated value # # @param proxy_host # Proxy host to use when installing the plugin # # @param proxy_password # Proxy auth password to use when installing the plugin # # @param proxy_port # Proxy port to use when installing the plugin # # @param proxy_username # Proxy auth username to use when installing the plugin # # @param source # Specify the source of the plugin. # This will copy over the plugin to the node and use it for installation. # Useful for offline installation # # @param url # Specify an URL where to download the plugin from. # # @author Richard Pijnenburg # @author Matteo Sessa # @author Dennis Konert # @author Tyler Langlois +# @author Gavin Williams # define elasticsearch::plugin ( Enum['absent', 'present'] $ensure = 'present', Stdlib::Absolutepath $configdir = $elasticsearch::configdir, - Variant[String, Array[String]] $instances = [], Array[String] $java_opts = [], Optional[Stdlib::Absolutepath] $java_home = undef, Optional[String] $module_dir = undef, Optional[String] $proxy_host = undef, Optional[String] $proxy_password = undef, Optional[Integer[0, 65535]] $proxy_port = undef, Optional[String] $proxy_username = undef, Optional[String] $source = undef, Optional[Stdlib::HTTPUrl] $url = undef, ) { include elasticsearch case $ensure { 'present': { - if empty($instances) and $elasticsearch::restart_plugin_change { - fail('no $instances defined, even though `restart_plugin_change` is set!') - } - $_file_ensure = 'directory' $_file_before = [] } 'absent': { $_file_ensure = $ensure - $_file_before = File[$elasticsearch::_plugindir] + $_file_before = File[$elasticsearch::real_plugindir] } default: { } } # set proxy by override or parse and use proxy_url from # elasticsearch::proxy_url or use no proxy at all if ($proxy_host != undef and $proxy_port != undef) { if ($proxy_username != undef and $proxy_password != undef) { $_proxy_auth = "${proxy_username}:${proxy_password}@" } else { $_proxy_auth = undef } $_proxy = "http://${_proxy_auth}${proxy_host}:${proxy_port}" } elsif ($elasticsearch::proxy_url != undef) { $_proxy = $elasticsearch::proxy_url } else { $_proxy = undef } if ($source != undef) { $filename_array = split($source, '/') $basefilename = $filename_array[-1] $file_source = "${elasticsearch::package_dir}/${basefilename}" file { $file_source: ensure => 'file', source => $source, before => Elasticsearch_plugin[$name], } } else { $file_source = undef } $_module_dir = es_plugin_name($module_dir, $name) elasticsearch_plugin { $name: ensure => $ensure, configdir => $configdir, elasticsearch_package_name => 'elasticsearch', java_opts => $java_opts, java_home => $java_home, source => $file_source, url => $url, proxy => $_proxy, - plugin_dir => $::elasticsearch::_plugindir, + plugin_dir => $elasticsearch::real_plugindir, plugin_path => $module_dir, + before => Service['elasticsearch'], } - -> file { "${::elasticsearch::_plugindir}/${_module_dir}": + -> file { "${elasticsearch::real_plugindir}/${_module_dir}": ensure => $_file_ensure, mode => 'o+Xr', recurse => true, before => $_file_before, } - if ! empty($instances) and $elasticsearch::restart_plugin_change { + if $elasticsearch::restart_plugin_change { Elasticsearch_plugin[$name] { - notify +> Elasticsearch::Instance[$instances], + notify +> Service['elasticsearch'], } } } diff --git a/manifests/role.pp b/manifests/role.pp index f9a458b..d2fe507 100644 --- a/manifests/role.pp +++ b/manifests/role.pp @@ -1,60 +1,58 @@ -# Manage shield/x-pack roles. +# Manage x-pack roles. # # @param ensure # Whether the role should be present or not. # Set to 'absent' to ensure a role is not present. # # @param mappings # A list of optional mappings defined for this role. # # @param privileges # A hash of permissions defined for the role. Valid privilege settings can -# be found in the Shield/x-pack documentation. +# be found in the x-pack documentation. # # @example create and manage the role 'power_user' mapped to an LDAP group. # elasticsearch::role { 'power_user': # privileges => { # 'cluster' => 'monitor', # 'indices' => { # '*' => 'all', # }, # }, # mappings => [ # "cn=users,dc=example,dc=com", # ], # } # # @author Tyler Langlois +# @author Gavin Williams # define elasticsearch::role ( Enum['absent', 'present'] $ensure = 'present', Array $mappings = [], Hash $privileges = {}, ) { validate_slength($name, 30, 1) - if $elasticsearch::security_plugin == undef { - fail("\"${elasticsearch::security_plugin}\" required") - } if empty($privileges) or $ensure == 'absent' { $_role_ensure = 'absent' } else { $_role_ensure = $ensure } if empty($mappings) or $ensure == 'absent' { $_mapping_ensure = 'absent' } else { $_mapping_ensure = $ensure } elasticsearch_role { $name : ensure => $_role_ensure, privileges => $privileges, } elasticsearch_role_mapping { $name : ensure => $_mapping_ensure, mappings => $mappings, } } diff --git a/manifests/service.pp b/manifests/service.pp index bf77308..6e46270 100644 --- a/manifests/service.pp +++ b/manifests/service.pp @@ -1,93 +1,53 @@ # This class exists to coordinate all service management related actions, # functionality and logical units in a central place. # # *Note*: "service" is the Puppet term and type for background processes # in general and is used in a platform-independent way. E.g. "service" means # "daemon" in relation to Unix-like systems. # -# @param ensure -# Controls if the managed resources shall be `present` or `absent`. -# If set to `absent`, the managed software packages will be uninstalled, and -# any traces of the packages will be purged as well as possible, possibly -# including existing configuration files. -# System modifications (if any) will be reverted as well as possible (e.g. -# removal of created users, services, changed log settings, and so on). -# This is a destructive parameter and should be used with care. -# -# @param init_defaults -# Defaults file content in hash representation -# -# @param init_defaults_file -# Defaults file as puppet resource -# -# @param init_template -# Service file as a template -# -# @param service_flags -# Flags to pass to the service. -# -# @param status -# Defines the status of the service. If set to `enabled`, the service is -# started and will be enabled at boot time. If set to `disabled`, the -# service is stopped and will not be started at boot time. If set to `running`, -# the service is started but will not be enabled at boot time. You may use -# this to start a service on the first Puppet run instead of the system startup. -# If set to `unmanaged`, the service will not be started at boot time and Puppet -# does not care whether the service is running or not. For example, this may -# be useful if a cluster management software is used to decide when to start -# the service plus assuring it is running on the desired node. -# # @author Richard Pijnenburg # @author Tyler Langlois +# @author Gavin Williams # -define elasticsearch::service ( - Enum['absent', 'present'] $ensure = $elasticsearch::ensure, - Hash $init_defaults = {}, - Optional[String] $init_defaults_file = undef, - Optional[String] $init_template = undef, - Optional[String] $service_flags = undef, - Elasticsearch::Status $status = $elasticsearch::status, -) { +class elasticsearch::service { + + #### Service management - case $elasticsearch::service_provider { + if $elasticsearch::ensure == 'present' { - 'init': { - elasticsearch::service::init { $name: - ensure => $ensure, - status => $status, - init_defaults_file => $init_defaults_file, - init_defaults => $init_defaults, - init_template => $init_template, + case $elasticsearch::status { + # make sure service is currently running, start it on boot + 'enabled': { + $_service_ensure = 'running' + $_service_enable = true } - } - 'openbsd': { - elasticsearch::service::openbsd { $name: - ensure => $ensure, - status => $status, - init_template => $init_template, - service_flags => $service_flags, + # make sure service is currently stopped, do not start it on boot + 'disabled': { + $_service_ensure = 'stopped' + $_service_enable = false } - } - 'systemd': { - elasticsearch::service::systemd { $name: - ensure => $ensure, - status => $status, - init_defaults_file => $init_defaults_file, - init_defaults => $init_defaults, - init_template => $init_template, + # make sure service is currently running, do not start it on boot + 'running': { + $_service_ensure = 'running' + $_service_enable = false } - } - 'openrc': { - elasticsearch::service::openrc { $name: - ensure => $ensure, - status => $status, - init_defaults_file => $init_defaults_file, - init_defaults => $init_defaults, - init_template => $init_template, + # do not start service on boot, do not care whether currently running + # or not + 'unmanaged': { + $_service_ensure = undef + $_service_enable = false } + default: { } } - default: { - fail("Unknown service provider ${elasticsearch::service_provider}") - } + } else { + # make sure the service is stopped and disabled (the removal itself will be + # done by package.pp) + $_service_ensure = 'stopped' + $_service_enable = false + } + + service { $elasticsearch::service_name: + ensure => $_service_ensure, + enable => $_service_enable, } } diff --git a/manifests/service/init.pp b/manifests/service/init.pp deleted file mode 100644 index 98611f2..0000000 --- a/manifests/service/init.pp +++ /dev/null @@ -1,161 +0,0 @@ -# This class exists to coordinate all service management related actions, -# functionality and logical units in a central place. -# -# *Note*: "service" is the Puppet term and type for background processes -# in general and is used in a platform-independent way. E.g. "service" means -# "daemon" in relation to Unix-like systems. -# -# @param ensure -# Controls if the managed resources shall be `present` or -# `absent`. If set to `absent`, the managed software packages will being -# uninstalled and any traces of the packages will be purged as well as -# possible. This may include existing configuration files (the exact -# behavior is provider). This is thus destructive and should be used with -# care. -# -# @param init_defaults -# Defaults file content in hash representation -# -# @param init_defaults_file -# Defaults file as puppet resource -# -# @param init_template -# Service file as a template -# -# @param status -# Defines the status of the service. If set to `enabled`, the service is -# started and will be enabled at boot time. If set to `disabled`, the -# service is stopped and will not be started at boot time. If set to `running`, -# the service is started but will not be enabled at boot time. You may use -# this to start a service on the first Puppet run instead of the system startup. -# If set to `unmanaged`, the service will not be started at boot time and Puppet -# does not care whether the service is running or not. For example, this may -# be useful if a cluster management software is used to decide when to start -# the service plus assuring it is running on the desired node. -# -# @author Richard Pijnenburg -# @author Tyler Langlois -# -define elasticsearch::service::init ( - Enum['absent', 'present'] $ensure = $elasticsearch::ensure, - Hash $init_defaults = {}, - Optional[String] $init_defaults_file = undef, - Optional[String] $init_template = undef, - Elasticsearch::Status $status = $elasticsearch::status, -) { - - #### Service management - - if $ensure == 'present' { - - case $status { - # make sure service is currently running, start it on boot - 'enabled': { - $service_ensure = 'running' - $service_enable = true - } - # make sure service is currently stopped, do not start it on boot - 'disabled': { - $service_ensure = 'stopped' - $service_enable = false - } - # make sure service is currently running, do not start it on boot - 'running': { - $service_ensure = 'running' - $service_enable = false - } - # do not start service on boot, do not care whether currently running - # or not - 'unmanaged': { - $service_ensure = undef - $service_enable = false - } - default: { } - } - } else { - - # make sure the service is stopped and disabled (the removal itself will be - # done by package.pp) - $service_ensure = 'stopped' - $service_enable = false - - } - - if(has_key($init_defaults, 'ES_USER') and $init_defaults['ES_USER'] != $elasticsearch::elasticsearch_user) { - fail('Found ES_USER setting for init_defaults but is not same as elasticsearch_user setting. Please use elasticsearch_user setting.') - } - - $new_init_defaults = merge( - { - 'ES_USER' => $elasticsearch::elasticsearch_user, - 'ES_GROUP' => $elasticsearch::elasticsearch_group, - 'MAX_OPEN_FILES' => '65536', - }, - $init_defaults - ) - - $notify_service = $elasticsearch::restart_config_change ? { - true => Service["elasticsearch-instance-${name}"], - false => undef, - } - - if ($ensure == 'present') { - - # Defaults file, either from file source or from hash to augeas commands - if ($init_defaults_file != undef) { - file { "${elasticsearch::defaults_location}/elasticsearch-${name}": - ensure => $ensure, - source => $init_defaults_file, - owner => 'root', - group => '0', - mode => '0644', - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } else { - augeas { "defaults_${name}": - incl => "${elasticsearch::defaults_location}/elasticsearch-${name}", - lens => 'Shellvars.lns', - changes => template("${module_name}/etc/sysconfig/defaults.erb"), - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } - - } else { # absent - - file { "${elasticsearch::defaults_location}/elasticsearch-${name}": - ensure => 'absent', - subscribe => Service["elasticsearch-${$name}"], - } - - } - - # Note that service files are persisted even in the case of absent instances. - # This is to ensure that manifest can remain idempotent and have the service - # file available in order to permit Puppet to introspect system state. - # init file from template - if ($init_template != undef) { - elasticsearch_service_file { "/etc/init.d/elasticsearch-${name}": - ensure => 'present', - content => file($init_template), - instance => $name, - notify => $notify_service, - package_name => $elasticsearch::package_name, - } - -> file { "/etc/init.d/elasticsearch-${name}": - ensure => 'file', - owner => 'root', - group => '0', - mode => '0755', - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } - - service { "elasticsearch-instance-${name}": - ensure => $service_ensure, - enable => $service_enable, - name => "elasticsearch-${name}", - } -} diff --git a/manifests/service/openbsd.pp b/manifests/service/openbsd.pp deleted file mode 100644 index ae1b586..0000000 --- a/manifests/service/openbsd.pp +++ /dev/null @@ -1,121 +0,0 @@ -# This class exists to coordinate all service management related actions, -# functionality and logical units in a central place. -# -# *Note*: "service" is the Puppet term and type for background processes -# in general and is used in a platform-independent way. E.g. "service" means -# "daemon" in relation to Unix-like systems. -# -# @param ensure -# Controls if the managed resources shall be `present` or -# `absent`. If set to `absent`, the managed software packages will being -# uninstalled and any traces of the packages will be purged as well as -# possible. This may include existing configuration files (the exact -# behavior is provider). This is thus destructive and should be used with -# care. -# -# @param init_template -# Service file as a template -# -# @param pid_dir -# Directory where to store the serice pid file. -# -# @param service_flags -# Flags to pass to the service. -# -# @param status -# Defines the status of the service. If set to `enabled`, the service is -# started and will be enabled at boot time. If set to `disabled`, the -# service is stopped and will not be started at boot time. If set to `running`, -# the service is started but will not be enabled at boot time. You may use -# this to start a service on the first Puppet run instead of the system startup. -# If set to `unmanaged`, the service will not be started at boot time and Puppet -# does not care whether the service is running or not. For example, this may -# be useful if a cluster management software is used to decide when to start -# the service plus assuring it is running on the desired node. -# -# @author Richard Pijnenburg -# @author Tyler Langlois -# -define elasticsearch::service::openbsd ( - Enum['absent', 'present'] $ensure = $elasticsearch::ensure, - Optional[String] $init_template = $elasticsearch::init_template, - Optional[String] $pid_dir = $elasticsearch::pid_dir, - Optional[String] $service_flags = undef, - Elasticsearch::Status $status = $elasticsearch::status, -) { - - #### Service management - - if $ensure == 'present' { - - case $status { - # make sure service is currently running, start it on boot - 'enabled': { - $service_ensure = 'running' - $service_enable = true - } - # make sure service is currently stopped, do not start it on boot - 'disabled': { - $service_ensure = 'stopped' - $service_enable = false - } - # make sure service is currently running, do not start it on boot - 'running': { - $service_ensure = 'running' - $service_enable = false - } - # do not start service on boot, do not care whether currently running - # or not - 'unmanaged': { - $service_ensure = undef - $service_enable = false - } - default: { } - } - } else { - - # make sure the service is stopped and disabled (the removal itself will be - # done by package.pp) - $service_ensure = 'stopped' - $service_enable = false - - } - - $notify_service = $elasticsearch::restart_config_change ? { - true => Service["elasticsearch-instance-${name}"], - false => undef, - } - - if ($status != 'unmanaged') { - # Note that service files are persisted even in the case of absent instances. - # This is to ensure that manifest can remain idempotent and have the service - # file available in order to permit Puppet to introspect system state. - # init file from template - if ($init_template != undef) { - elasticsearch_service_file { "/etc/rc.d/elasticsearch_${name}": - ensure => 'present', - content => file($init_template), - instance => $name, - pid_dir => $pid_dir, - notify => $notify_service, - package_name => 'elasticsearch', - } - -> file { "/etc/rc.d/elasticsearch_${name}": - ensure => 'file', - owner => 'root', - group => '0', - mode => '0555', - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } - - # action - service { "elasticsearch-instance-${name}": - ensure => $service_ensure, - enable => $service_enable, - name => "elasticsearch_${name}", - flags => $service_flags, - } - } -} diff --git a/manifests/service/openrc.pp b/manifests/service/openrc.pp deleted file mode 100644 index 446d260..0000000 --- a/manifests/service/openrc.pp +++ /dev/null @@ -1,166 +0,0 @@ -# This class exists to coordinate all service management related actions, -# functionality and logical units in a central place. -# -# *Note*: "service" is the Puppet term and type for background processes -# in general and is used in a platform-independent way. E.g. "service" means -# "daemon" in relation to Unix-like systems. -# -# @param ensure -# Controls if the managed resources shall be `present` or -# `absent`. If set to `absent`, the managed software packages will being -# uninstalled and any traces of the packages will be purged as well as -# possible. This may include existing configuration files (the exact -# behavior is provider). This is thus destructive and should be used with -# care. -# -# @param init_defaults -# Defaults file content in hash representation -# -# @param init_defaults_file -# Defaults file as puppet resource -# -# @param init_template -# Service file as a template -# -# @param status -# Defines the status of the service. If set to `enabled`, the service is -# started and will be enabled at boot time. If set to `disabled`, the -# service is stopped and will not be started at boot time. If set to `running`, -# the service is started but will not be enabled at boot time. You may use -# this to start a service on the first Puppet run instead of the system startup. -# If set to `unmanaged`, the service will not be started at boot time and Puppet -# does not care whether the service is running or not. For example, this may -# be useful if a cluster management software is used to decide when to start -# the service plus assuring it is running on the desired node. -# -# @author Richard Pijnenburg -# @author Tyler Langlois -# -define elasticsearch::service::openrc ( - Enum['absent', 'present'] $ensure = $elasticsearch::ensure, - Hash $init_defaults = {}, - Optional[String] $init_defaults_file = undef, - Optional[String] $init_template = undef, - Elasticsearch::Status $status = $elasticsearch::status, -) { - - #### Service management - - if $ensure == 'present' { - - case $status { - # make sure service is currently running, start it on boot - 'enabled': { - $service_ensure = 'running' - $service_enable = true - } - # make sure service is currently stopped, do not start it on boot - 'disabled': { - $service_ensure = 'stopped' - $service_enable = false - } - # make sure service is currently running, do not start it on boot - 'running': { - $service_ensure = 'running' - $service_enable = false - } - # do not start service on boot, do not care whether currently running - # or not - 'unmanaged': { - $service_ensure = undef - $service_enable = false - } - default: { } - } - } else { - - # make sure the service is stopped and disabled (the removal itself will be - # done by package.pp) - $service_ensure = 'stopped' - $service_enable = false - - } - - if(has_key($init_defaults, 'ES_USER') and $init_defaults['ES_USER'] != $elasticsearch::elasticsearch_user) { - fail('Found ES_USER setting for init_defaults but is not same as elasticsearch_user setting. Please use elasticsearch_user setting.') - } - - $new_init_defaults = merge( - { - 'ES_USER' => $elasticsearch::elasticsearch_user, - 'ES_GROUP' => $elasticsearch::elasticsearch_group, - 'MAX_OPEN_FILES' => '65536', - }, - $init_defaults - ) - - $notify_service = $elasticsearch::restart_config_change ? { - true => Service["elasticsearch-instance-${name}"], - false => undef, - } - - - if ( $status != 'unmanaged' and $ensure == 'present' ) { - - # defaults file content. Either from a hash or file - if ($init_defaults_file != undef) { - file { "${elasticsearch::defaults_location}/elasticsearch.${name}": - ensure => $ensure, - source => $init_defaults_file, - owner => 'root', - group => '0', - mode => '0644', - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } else { - augeas { "defaults_${name}": - incl => "${elasticsearch::defaults_location}/elasticsearch.${name}", - lens => 'Shellvars.lns', - changes => template("${module_name}/etc/sysconfig/defaults.erb"), - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } - - } elsif ($status != 'unmanaged') { - - file { "${elasticsearch::defaults_location}/elasticsearch.${name}": - ensure => 'absent', - subscribe => Service["elasticsearch.${$name}"], - } - - } - - - if ($status != 'unmanaged') { - # Note that service files are persisted even in the case of absent instances. - # This is to ensure that manifest can remain idempotent and have the service - # file available in order to permit Puppet to introspect system state. - # init file from template - if ($init_template != undef) { - elasticsearch_service_file { "/etc/init.d/elasticsearch.${name}": - ensure => 'present', - content => file($init_template), - instance => $name, - notify => $notify_service, - package_name => 'elasticsearch', - } - -> file { "/etc/init.d/elasticsearch.${name}": - ensure => 'file', - owner => 'root', - group => '0', - mode => '0755', - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } - - # action - service { "elasticsearch-instance-${name}": - ensure => $service_ensure, - enable => $service_enable, - name => "elasticsearch.${name}", - } - } -} diff --git a/manifests/service/systemd.pp b/manifests/service/systemd.pp deleted file mode 100644 index a27eacd..0000000 --- a/manifests/service/systemd.pp +++ /dev/null @@ -1,195 +0,0 @@ -# This class exists to coordinate all service management related actions, -# functionality and logical units in a central place. -# -# *Note*: "service" is the Puppet term and type for background processes -# in general and is used in a platform-independent way. E.g. "service" means -# "daemon" in relation to Unix-like systems. -# -# @param ensure -# Controls if the managed resources shall be `present` or -# `absent`. If set to `absent`, the managed software packages will being -# uninstalled and any traces of the packages will be purged as well as -# possible. This may include existing configuration files (the exact -# behavior is provider). This is thus destructive and should be used with -# care. -# -# @param init_defaults -# Defaults file content in hash representation -# -# @param init_defaults_file -# Defaults file as puppet resource -# -# @param init_template -# Service file as a template -# -# @param status -# Defines the status of the service. If set to `enabled`, the service is -# started and will be enabled at boot time. If set to `disabled`, the -# service is stopped and will not be started at boot time. If set to `running`, -# the service is started but will not be enabled at boot time. You may use -# this to start a service on the first Puppet run instead of the system startup. -# If set to `unmanaged`, the service will not be started at boot time and Puppet -# does not care whether the service is running or not. For example, this may -# be useful if a cluster management software is used to decide when to start -# the service plus assuring it is running on the desired node. -# -# @author Richard Pijnenburg -# @author Tyler Langlois -# -define elasticsearch::service::systemd ( - Enum['absent', 'present'] $ensure = $elasticsearch::ensure, - Hash $init_defaults = {}, - Optional[String] $init_defaults_file = undef, - Optional[String] $init_template = undef, - Elasticsearch::Status $status = $elasticsearch::status, -) { - - #### Service management - - if $ensure == 'present' { - - case $status { - # make sure service is currently running, start it on boot - 'enabled': { - $service_ensure = 'running' - $service_enable = true - } - # make sure service is currently stopped, do not start it on boot - 'disabled': { - $service_ensure = 'stopped' - $service_enable = false - } - # make sure service is currently running, do not start it on boot - 'running': { - $service_ensure = 'running' - $service_enable = false - } - # do not start service on boot, do not care whether currently running - # or not - 'unmanaged': { - $service_ensure = undef - $service_enable = false - } - default: { } - } - } else { - # make sure the service is stopped and disabled (the removal itself will be - # done by package.pp) - $service_ensure = 'stopped' - $service_enable = false - } - - if(has_key($init_defaults, 'ES_USER') and $init_defaults['ES_USER'] != $elasticsearch::elasticsearch_user) { - fail('Found ES_USER setting for init_defaults but is not same as elasticsearch_user setting. Please use elasticsearch_user setting.') - } - - $new_init_defaults = merge( - { - 'ES_USER' => $elasticsearch::elasticsearch_user, - 'ES_GROUP' => $elasticsearch::elasticsearch_group, - 'MAX_OPEN_FILES' => '65536', - 'MAX_THREADS' => '4096', - }, - $init_defaults - ) - - $notify_service = $elasticsearch::restart_config_change ? { - true => [ Exec["systemd_reload_${name}"], Service["elasticsearch-instance-${name}"] ], - false => Exec["systemd_reload_${name}"] - } - - if ($ensure == 'present') { - - # Defaults file, either from file source or from hash to augeas commands - if ($init_defaults_file != undef) { - file { "${elasticsearch::defaults_location}/elasticsearch-${name}": - ensure => $ensure, - source => $init_defaults_file, - owner => 'root', - group => '0', - mode => '0644', - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } else { - augeas { "defaults_${name}": - incl => "${elasticsearch::defaults_location}/elasticsearch-${name}", - lens => 'Shellvars.lns', - changes => template("${module_name}/etc/sysconfig/defaults.erb"), - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } - - $service_require = Exec["systemd_reload_${name}"] - - } else { # absent - - file { "${elasticsearch::defaults_location}/elasticsearch-${name}": - ensure => 'absent', - subscribe => Service["elasticsearch-instance-${name}"], - notify => Exec["systemd_reload_${name}"], - } - - $service_require = undef - } - - exec { "systemd_reload_${name}": - command => '/bin/systemctl daemon-reload', - refreshonly => true, - } - - # init file from template - if ($init_template != undef) { - # Check for values in init defaults we may want to set in the init template - if (has_key($new_init_defaults, 'MAX_OPEN_FILES')) { - $nofile = $new_init_defaults['MAX_OPEN_FILES'] - } else { - $nofile = '65536' - } - - if (has_key($new_init_defaults, 'MAX_LOCKED_MEMORY')) { - $memlock = $new_init_defaults['MAX_LOCKED_MEMORY'] - } else { - $memlock = undef - } - - if (has_key($new_init_defaults, 'MAX_THREADS')) { - $nproc = $new_init_defaults['MAX_THREADS'] - } else { - $nproc = '4096' - } - - elasticsearch_service_file { "${elasticsearch::systemd_service_path}/elasticsearch-${name}.service": - ensure => 'present', - content => file($init_template), - defaults_location => $elasticsearch::defaults_location, - group => $elasticsearch::elasticsearch_group, - homedir => $elasticsearch::homedir, - instance => $name, - memlock => $memlock, - nofile => $nofile, - nproc => $nproc, - package_name => 'elasticsearch', - pid_dir => $elasticsearch::pid_dir, - user => $elasticsearch::elasticsearch_user, - notify => $notify_service, - } - -> file { "${elasticsearch::systemd_service_path}/elasticsearch-${name}.service": - ensure => 'file', - owner => 'root', - group => 'root', - mode => '0644', - before => Service["elasticsearch-instance-${name}"], - notify => $notify_service, - } - } - - service { "elasticsearch-instance-${name}": - ensure => $service_ensure, - enable => $service_enable, - name => "elasticsearch-${name}.service", - provider => 'systemd', - require => $service_require, - } -} diff --git a/manifests/user.pp b/manifests/user.pp index e22a16a..dfd412c 100644 --- a/manifests/user.pp +++ b/manifests/user.pp @@ -1,51 +1,50 @@ -# Manages shield/x-pack users. +# Manages x-pack users. # # @example creates and manage a user with membership in the 'logstash' and 'kibana4' roles. # elasticsearch::user { 'bob': # password => 'foobar', # roles => ['logstash', 'kibana4'], # } # # @param ensure # Whether the user should be present or not. # Set to `absent` to ensure a user is not installed # # @param password # Password for the given user. A plaintext password will be managed # with the esusers utility and requires a refresh to update, while # a hashed password from the esusers utility will be managed manually # in the uses file. # # @param roles # A list of roles to which the user should belong. # # @author Tyler Langlois +# @author Gavin Williams # define elasticsearch::user ( String $password, Enum['absent', 'present'] $ensure = 'present', Array $roles = [], ) { - if $elasticsearch::security_plugin == undef { - fail("\"${elasticsearch::security_plugin}\" required") - } - if $password =~ /^\$2a\$/ { elasticsearch_user_file { $name: ensure => $ensure, configdir => $elasticsearch::configdir, hashed_password => $password, + before => Elasticsearch_user_roles[$name], } } else { elasticsearch_user { $name: ensure => $ensure, configdir => $elasticsearch::configdir, password => $password, + before => Elasticsearch_user_roles[$name], } } elasticsearch_user_roles { $name: ensure => $ensure, roles => $roles, } } diff --git a/metadata.json b/metadata.json index fcba0f7..628da01 100644 --- a/metadata.json +++ b/metadata.json @@ -1,86 +1,79 @@ { "name": "elastic-elasticsearch", - "version": "6.4.0", + "version": "7.0.0", "source": "https://github.com/elastic/puppet-elasticsearch", "author": "elastic", "license": "Apache-2.0", "summary": "Module for managing and configuring Elasticsearch nodes", - "description": "Module for managing and configuring Elasticsearch nodes", "project_page": "https://github.com/elastic/puppet-elasticsearch", "issues_url": "https://github.com/elastic/puppet-elasticsearch/issues", "dependencies": [ { "name": "elastic/elastic_stack", - "version_requirement": ">= 6.1.0 < 7.0.0" + "version_requirement": ">= 6.1.0 < 8.0.0" }, { "name": "richardc/datacat", "version_requirement": ">= 0.6.2 < 1.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">= 4.13.0 < 7.0.0" } ], "operatingsystem_support": [ { "operatingsystem": "RedHat", "operatingsystemrelease": [ - "5", - "6", - "7" + "7", + "8" ] }, { "operatingsystem": "CentOS", "operatingsystemrelease": [ - "5", - "6", - "7" + "7", + "8" ] }, { "operatingsystem": "OracleLinux", "operatingsystemrelease": [ - "5", - "6", - "7" + "7", + "8" ] }, { "operatingsystem": "Scientific", "operatingsystemrelease": [ - "5", - "6", - "7" + "7", + "8" ] }, { "operatingsystem": "Debian", "operatingsystemrelease": [ - "7", - "8" + "10" ] }, { "operatingsystem": "Ubuntu", "operatingsystemrelease": [ - "14.04", - "16.04" + "18.04", + "20.04" ] }, { "operatingsystem": "SLES", "operatingsystemrelease": [ - "12.1", - "12.2" + "12" ] } ], "requirements": [ { "name": "puppet", - "version_requirement": ">= 4.10.0 < 7.0.0" + "version_requirement": ">= 6.1.0 < 8.0.0" } ] } diff --git a/spec/acceptance/nodesets/amazonlinux-1-x64.yml b/spec/acceptance/nodesets/amazonlinux-1-x64.yml index 374f83c..21f449a 100644 --- a/spec/acceptance/nodesets/amazonlinux-1-x64.yml +++ b/spec/acceptance/nodesets/amazonlinux-1-x64.yml @@ -1,13 +1,14 @@ HOSTS: amazonlinux-1-x64: roles: + - agent - master platform: el-6-x86_64 image: amazonlinux:1 hypervisor: docker docker_cmd: ["/sbin/init"] docker_container_name: amazonlinux-1-x64 docker_preserve_image: true docker_image_commands: - rm /etc/init/tty.conf - yum install -y java-1.8.0-openjdk-headless rubygems20 tar wget which diff --git a/spec/acceptance/nodesets/amazonlinux-2-x64.yml b/spec/acceptance/nodesets/amazonlinux-2-x64.yml index 0f40344..2aceb47 100644 --- a/spec/acceptance/nodesets/amazonlinux-2-x64.yml +++ b/spec/acceptance/nodesets/amazonlinux-2-x64.yml @@ -1,17 +1,18 @@ HOSTS: amazonlinux-2-x64: roles: + - agent - master platform: el-7-x86_64 image: amazonlinux:2 hypervisor: docker docker_cmd: ["/sbin/init"] docker_preserve_image: true docker_image_commands: - yum install -y java-1.8.0-openjdk-headless net-tools wget which cronie iproute - mkdir -p /etc/selinux/targeted/contexts/ - echo '' > /etc/selinux/targeted/contexts/dbus_contexts - rm /lib/systemd/system/systemd*udev* - rm /lib/systemd/system/getty.target CONFIG: log_level: warn diff --git a/spec/acceptance/nodesets/centos-6-x64.yml b/spec/acceptance/nodesets/centos-6-x64.yml index 9f8acba..40b294b 100644 --- a/spec/acceptance/nodesets/centos-6-x64.yml +++ b/spec/acceptance/nodesets/centos-6-x64.yml @@ -1,18 +1,19 @@ HOSTS: centos-6-x64: roles: - master + - agent - database - dashboard platform: el-6-x86_64 image: centos:6.9 hypervisor: docker docker_cmd: ["/sbin/init"] docker_preserve_image: true docker_image_commands: - yum install -y wget tar which java-1.8.0-openjdk-headless - rm /etc/init/tty.conf - echo -e "elasticsearch hard nproc 4096\nelasticsearch soft nproc 4096" >> /etc/security/limits.conf - echo -e "esuser hard nproc 4096\nesuser soft nproc 4096" >> /etc/security/limits.conf CONFIG: log_level: warn diff --git a/spec/acceptance/nodesets/centos-7-x64.yml b/spec/acceptance/nodesets/centos-7-x64.yml index a61cfe3..dc95ee4 100644 --- a/spec/acceptance/nodesets/centos-7-x64.yml +++ b/spec/acceptance/nodesets/centos-7-x64.yml @@ -1,19 +1,20 @@ HOSTS: centos-7-x64: roles: + - agent - master - database - dashboard platform: el-7-x86_64 image: centos:7 hypervisor: docker docker_cmd: ["/sbin/init"] docker_preserve_image: true docker_image_commands: - yum install -y wget which cronie iproute - mkdir -p /etc/selinux/targeted/contexts/ - echo '' > /etc/selinux/targeted/contexts/dbus_contexts - rm /lib/systemd/system/systemd*udev* - rm /lib/systemd/system/getty.target CONFIG: log_level: warn diff --git a/spec/acceptance/nodesets/centos-8-x64.yml b/spec/acceptance/nodesets/centos-8-x64.yml new file mode 100644 index 0000000..03033d4 --- /dev/null +++ b/spec/acceptance/nodesets/centos-8-x64.yml @@ -0,0 +1,20 @@ +HOSTS: + centos-8-x64: + roles: + - agent + - master + - database + - dashboard + platform: el-8-x86_64 + image: centos:8 + hypervisor: docker + docker_cmd: ["/sbin/init"] + docker_preserve_image: true + docker_image_commands: + - yum install -y wget which cronie iproute + # - mkdir -p /etc/selinux/targeted/contexts/ + # - echo '' > /etc/selinux/targeted/contexts/dbus_contexts + # - rm /lib/systemd/system/systemd*udev* + # - rm /lib/systemd/system/getty.target +CONFIG: + log_level: warn diff --git a/spec/acceptance/nodesets/debian-10-x64.yml b/spec/acceptance/nodesets/debian-10-x64.yml new file mode 100644 index 0000000..20faebd --- /dev/null +++ b/spec/acceptance/nodesets/debian-10-x64.yml @@ -0,0 +1,17 @@ +HOSTS: + debian-10: + roles: + - agent + platform: debian-10-amd64 + image: debian:10 + hypervisor: docker + docker_cmd: ["/bin/systemd"] + docker_preserve_image: true + docker_image_commands: + - apt-get install -yq apt-transport-https wget net-tools gpg ruby-augeas software-properties-common + - wget -qO - https://adoptopenjdk.jfrog.io/adoptopenjdk/api/gpg/key/public | apt-key add - + - add-apt-repository --yes https://adoptopenjdk.jfrog.io/adoptopenjdk/deb/ + - apt update && apt-get install -yq adoptopenjdk-8-hotspot +CONFIG: + log_level: warn + diff --git a/spec/acceptance/nodesets/debian-8-x64.yml b/spec/acceptance/nodesets/debian-8-x64.yml index f010972..766a414 100644 --- a/spec/acceptance/nodesets/debian-8-x64.yml +++ b/spec/acceptance/nodesets/debian-8-x64.yml @@ -1,22 +1,23 @@ HOSTS: debian-8: roles: + - agent - master - database - dashboard platform: debian-8-amd64 image: debian:8.11 hypervisor: docker docker_cmd: ["/bin/systemd"] docker_preserve_image: true docker_image_commands: - echo 'deb [check-valid-until=no] http://archive.debian.org/debian jessie-backports main' >> /etc/apt/sources.list - echo 'Acquire::Check-Valid-Until "false";' >> /etc/apt/apt.conf - sed -i '/jessie.updates/d' /etc/apt/sources.list - apt-get update - apt-get install -yq -t jessie-backports openjdk-8-jre-headless - apt-get install -yq wget net-tools apt-transport-https - rm /lib/systemd/system/systemd*udev* - rm /lib/systemd/system/getty.target CONFIG: log_level: warn diff --git a/spec/acceptance/nodesets/oracle-6-x64.yml b/spec/acceptance/nodesets/oracle-6-x64.yml index de7c8d9..f492d67 100644 --- a/spec/acceptance/nodesets/oracle-6-x64.yml +++ b/spec/acceptance/nodesets/oracle-6-x64.yml @@ -1,18 +1,19 @@ HOSTS: centos-6-x64: roles: + - agent - master - database - dashboard platform: el-6-x86_64 image: oraclelinux:6 hypervisor: docker docker_cmd: ["/sbin/init"] docker_preserve_image: true docker_image_commands: - yum install -y tar wget which java-1.8.0-openjdk-headless - rm /etc/init/tty.conf - echo -e "elasticsearch hard nproc 4096\nelasticsearch soft nproc 4096" >> /etc/security/limits.conf - echo -e "esuser hard nproc 4096\nesuser soft nproc 4096" >> /etc/security/limits.conf CONFIG: log_level: warn diff --git a/spec/acceptance/nodesets/oracle-7-x64.yml b/spec/acceptance/nodesets/oracle-7-x64.yml index c868ae7..43c777f 100644 --- a/spec/acceptance/nodesets/oracle-7-x64.yml +++ b/spec/acceptance/nodesets/oracle-7-x64.yml @@ -1,19 +1,20 @@ HOSTS: oracle-7-x64: roles: + - agent - master - database - dashboard platform: el-7-x86_64 image: oraclelinux:7 hypervisor: docker docker_cmd: ["/sbin/init"] docker_preserve_image: true docker_image_commands: - yum install -y wget which cronie - mkdir -p /etc/selinux/targeted/contexts/ - echo '' > /etc/selinux/targeted/contexts/dbus_contexts - rm /lib/systemd/system/systemd*udev* - rm /lib/systemd/system/getty.target CONFIG: log_level: warn diff --git a/spec/acceptance/nodesets/sles-11-x64.yml b/spec/acceptance/nodesets/sles-11-x64.yml index fd4279d..80f8309 100644 --- a/spec/acceptance/nodesets/sles-11-x64.yml +++ b/spec/acceptance/nodesets/sles-11-x64.yml @@ -1,18 +1,19 @@ HOSTS: sles-11-x64: roles: + - agent - master - database - dashboard platform: sles-11-x64 image: dliappis/sles:11sp4 hypervisor: docker docker_cmd: ["/sbin/init"] docker_preserve_image: true docker_image_commands: - gem uninstall puppet hiera - zypper install -y augeas augeas-lenses pkgconfig - mkdir -p /etc/puppetlabs/code /etc/puppet/modules - ln -sf /etc/puppet/modules /etc/puppetlabs/code/modules CONFIG: log_level: warn diff --git a/spec/acceptance/nodesets/sles-12-x64.yml b/spec/acceptance/nodesets/sles-12-x64.yml index fa34b57..a2c5cc0 100644 --- a/spec/acceptance/nodesets/sles-12-x64.yml +++ b/spec/acceptance/nodesets/sles-12-x64.yml @@ -1,19 +1,20 @@ HOSTS: sles-12-x64: roles: + - agent - master - database - dashboard platform: sles-12-x86_64 image: dliappis/sles:12 hypervisor: docker docker_cmd: ["/sbin/init"] docker_preserve_image: true docker_image_commands: - rm /etc/zypp/repos.d/devel_languages_python.repo - gem uninstall -x puppet hiera - zypper clean -a - zypper install --force-resolution -y augeas which - ln -s /usr/lib/systemd/system/sshd.service /etc/systemd/system/multi-user.target.wants/sshd.service CONFIG: log_level: warn diff --git a/spec/acceptance/nodesets/ubuntu-server-1204-x64.yml b/spec/acceptance/nodesets/ubuntu-server-1204-x64.yml deleted file mode 100644 index 827f0ba..0000000 --- a/spec/acceptance/nodesets/ubuntu-server-1204-x64.yml +++ /dev/null @@ -1,18 +0,0 @@ -HOSTS: - ubuntu-12-04: - roles: - - master - - database - - dashboard - platform: ubuntu-12.04-amd64 - image: ubuntu:12.04 - hypervisor: docker - docker_cmd: ["/sbin/init"] - docker_preserve_image: true - docker_image_commands: - - apt-get install -yq libssl-dev net-tools - - ln -sf /sbin/initctl.distrib /sbin/initctl - - locale-gen en_US en_US.UTF-8 - - dpkg-reconfigure locales -CONFIG: - log_level: warn diff --git a/spec/acceptance/nodesets/ubuntu-server-1404-x64.yml b/spec/acceptance/nodesets/ubuntu-server-1404-x64.yml index 4db5ccc..6dea31b 100644 --- a/spec/acceptance/nodesets/ubuntu-server-1404-x64.yml +++ b/spec/acceptance/nodesets/ubuntu-server-1404-x64.yml @@ -1,23 +1,24 @@ HOSTS: ubuntu-14-04: roles: + - agent - master - database - dashboard platform: ubuntu-14.04-amd64 image: ubuntu:14.04.5 hypervisor: docker docker_cmd: ["/sbin/init"] docker_preserve_image: true docker_image_commands: - apt-get update - apt-get install -yq apt-transport-https libssl-dev software-properties-common - add-apt-repository -y ppa:openjdk-r/ppa - apt-get update - apt-get install -y openjdk-8-jre-headless - update-ca-certificates -f - ln -sf /sbin/initctl.distrib /sbin/initctl - locale-gen en_US en_US.UTF-8 - dpkg-reconfigure locales CONFIG: log_level: warn diff --git a/spec/acceptance/nodesets/ubuntu-server-1604-x64.yml b/spec/acceptance/nodesets/ubuntu-server-1604-x64.yml index 7af781c..6a665dc 100644 --- a/spec/acceptance/nodesets/ubuntu-server-1604-x64.yml +++ b/spec/acceptance/nodesets/ubuntu-server-1604-x64.yml @@ -1,16 +1,17 @@ HOSTS: ubuntu-16-04: roles: + - agent - master - database - dashboard platform: ubuntu-16.04-amd64 image: ubuntu:16.04 hypervisor: docker docker_cmd: ["/sbin/init"] docker_preserve_image: true docker_image_commands: - apt-get update - - apt-get install -yq libssl-dev puppet apt-transport-https + - apt-get install -yq libssl-dev apt-transport-https CONFIG: log_level: warn diff --git a/spec/acceptance/nodesets/ubuntu-server-1604-x64.yml b/spec/acceptance/nodesets/ubuntu-server-1804-x64.yml similarity index 57% copy from spec/acceptance/nodesets/ubuntu-server-1604-x64.yml copy to spec/acceptance/nodesets/ubuntu-server-1804-x64.yml index 7af781c..16b3bcc 100644 --- a/spec/acceptance/nodesets/ubuntu-server-1604-x64.yml +++ b/spec/acceptance/nodesets/ubuntu-server-1804-x64.yml @@ -1,16 +1,17 @@ HOSTS: - ubuntu-16-04: + ubuntu-18-04: roles: + - agent - master - database - dashboard - platform: ubuntu-16.04-amd64 - image: ubuntu:16.04 + platform: ubuntu-18.04-amd64 + image: ubuntu:18.04 hypervisor: docker docker_cmd: ["/sbin/init"] docker_preserve_image: true docker_image_commands: - apt-get update - - apt-get install -yq libssl-dev puppet apt-transport-https + - apt-get install -yq libssl-dev apt-transport-https openjdk-8-jdk iproute2 CONFIG: log_level: warn diff --git a/spec/acceptance/nodesets/ubuntu-server-1604-x64.yml b/spec/acceptance/nodesets/ubuntu-server-2004-x64.yml similarity index 57% copy from spec/acceptance/nodesets/ubuntu-server-1604-x64.yml copy to spec/acceptance/nodesets/ubuntu-server-2004-x64.yml index 7af781c..c0b13f5 100644 --- a/spec/acceptance/nodesets/ubuntu-server-1604-x64.yml +++ b/spec/acceptance/nodesets/ubuntu-server-2004-x64.yml @@ -1,16 +1,17 @@ HOSTS: - ubuntu-16-04: + ubuntu-20-04: roles: + - agent - master - database - dashboard - platform: ubuntu-16.04-amd64 - image: ubuntu:16.04 + platform: ubuntu-20.04-amd64 + image: ubuntu:20.04 hypervisor: docker docker_cmd: ["/sbin/init"] docker_preserve_image: true docker_image_commands: - apt-get update - - apt-get install -yq libssl-dev puppet apt-transport-https + - apt-get install -yq libssl-dev apt-transport-https openjdk-8-jdk iproute2 CONFIG: log_level: warn diff --git a/spec/acceptance/tests/acceptance_spec.rb b/spec/acceptance/tests/acceptance_spec.rb index 44abf7c..8cafa20 100644 --- a/spec/acceptance/tests/acceptance_spec.rb +++ b/spec/acceptance/tests/acceptance_spec.rb @@ -1,124 +1,83 @@ require 'spec_helper_acceptance' require 'helpers/acceptance/tests/basic_shared_examples.rb' require 'helpers/acceptance/tests/template_shared_examples.rb' require 'helpers/acceptance/tests/removal_shared_examples.rb' require 'helpers/acceptance/tests/pipeline_shared_examples.rb' require 'helpers/acceptance/tests/plugin_shared_examples.rb' require 'helpers/acceptance/tests/plugin_upgrade_shared_examples.rb' require 'helpers/acceptance/tests/snapshot_repository_shared_examples.rb' require 'helpers/acceptance/tests/datadir_shared_examples.rb' require 'helpers/acceptance/tests/package_url_shared_examples.rb' require 'helpers/acceptance/tests/hiera_shared_examples.rb' require 'helpers/acceptance/tests/usergroup_shared_examples.rb' require 'helpers/acceptance/tests/security_shared_examples.rb' describe "elasticsearch v#{v[:elasticsearch_full_version]} class" do - es_01 = { - 'es-01' => { - 'config' => { - 'http.port' => 9200, - 'node.name' => 'elasticsearch001' - } - } + es_config = { + 'cluster.name' => v[:cluster_name], + 'http.bind_host' => '0.0.0.0', + 'http.port' => 9200, + 'node.name' => 'elasticsearch01' } - es_02 = { - 'es-02' => { - 'config' => { - 'http.port' => 9201, - 'node.name' => 'elasticsearch002' - } - } - } - instances = es_01.merge es_02 let(:elastic_repo) { not v[:is_snapshot] } let(:manifest) do package = if not v[:is_snapshot] <<-MANIFEST # Hard version set here due to plugin incompatibilities. version => '#{v[:elasticsearch_full_version]}', MANIFEST else <<-MANIFEST manage_repo => false, package_url => '#{v[:snapshot_package]}', MANIFEST end - heap = if v[:elasticsearch_major_version] > 2 - <<-MANIFEST - jvm_options => [ - '-Xms128m', - '-Xmx128m', - ], - MANIFEST - else - <<-MANIFEST - init_defaults => { - 'ES_HEAP_SIZE' => '128m', - }, - MANIFEST - end - <<-MANIFEST api_timeout => 60, config => { - 'cluster.name' => '#{v[:cluster_name]}', - 'http.bind_host' => '0.0.0.0', +#{es_config.map { |k, v| " '#{k}' => '#{v}'," }.join("\n")} }, + jvm_options => [ + '-Xms128m', + '-Xmx128m', + ], oss => #{v[:oss]}, #{package} - #{heap} MANIFEST end - context 'instance testing with' do - describe 'one' do - include_examples('basic acceptance tests', es_01) + context 'testing with' do + describe 'simple config' do + include_examples('basic acceptance tests', es_config) end - describe 'two' do - include_examples('basic acceptance tests', instances) - end - - describe 'one absent' do - include_examples('basic acceptance tests', es_01.merge('es-02' => {})) - end - - include_examples 'module removal', ['es-01'] + include_examples('module removal', es_config) end - include_examples('template operations', es_01, v[:template]) - - include_examples('pipeline operations', es_01, v[:pipeline]) if semver(v[:elasticsearch_full_version]) >= semver('5.0.0') + include_examples('template operations', es_config, v[:template]) - include_examples('plugin acceptance tests', v[:elasticsearch_plugins]) unless v[:elasticsearch_plugins].empty? + include_examples('pipeline operations', es_config, v[:pipeline]) - # Only pre-5.x versions supported versions differing from core ES - if semver(v[:elasticsearch_full_version]) < semver('5.0.0') - include_examples( - 'plugin upgrade acceptance tests', - :name => 'kopf', - :initial => '2.0.1', - :upgraded => '2.1.2', - :repository => 'lmenezes/elasticsearch' - ) - end + include_examples( + 'plugin acceptance tests', + es_config, + v[:elasticsearch_plugins] + ) unless v[:elasticsearch_plugins].empty? - include_examples 'snapshot repository acceptance tests' + include_examples('snapshot repository acceptance tests') - include_examples 'datadir acceptance tests' + include_examples('datadir acceptance tests', es_config) # Skip this for snapshot testing, as we only have package files anyway. - include_examples 'package_url acceptance tests' unless v[:is_snapshot] - - include_examples 'hiera acceptance tests', v[:elasticsearch_plugins] + include_examples('package_url acceptance tests', es_config) unless v[:is_snapshot] - include_examples 'user/group acceptance tests' + include_examples('hiera acceptance tests', es_config, v[:elasticsearch_plugins]) # Security-related tests (shield/x-pack). # # Skip OSS-only distributions since they do not bundle x-pack, and skip # snapshots since we they don't recognize prod licenses. - include_examples 'security acceptance tests', instances unless v[:oss] or v[:is_snapshot] + include_examples('security acceptance tests', es_config) unless v[:oss] or v[:is_snapshot] end diff --git a/spec/classes/000_elasticsearch_init_spec.rb b/spec/classes/000_elasticsearch_init_spec.rb index 6356e2e..1bd73cc 100644 --- a/spec/classes/000_elasticsearch_init_spec.rb +++ b/spec/classes/000_elasticsearch_init_spec.rb @@ -1,405 +1,440 @@ require 'spec_helper' describe 'elasticsearch', :type => 'class' do default_params = { :config => { 'node.name' => 'foo' } } on_supported_os.each do |os, facts| context "on #{os}" do case facts[:os]['family'] when 'Debian' let(:defaults_path) { '/etc/default' } let(:system_service_folder) { '/lib/systemd/system' } let(:pkg_ext) { 'deb' } let(:pkg_prov) { 'dpkg' } let(:version_add) { '' } if (facts[:os]['name'] == 'Debian' and \ facts[:os]['release']['major'].to_i >= 8) or \ (facts[:os]['name'] == 'Ubuntu' and \ facts[:os]['release']['major'].to_i >= 15) let(:systemd_service_path) { '/lib/systemd/system' } test_pid = true else test_pid = false end when 'RedHat' let(:defaults_path) { '/etc/sysconfig' } let(:system_service_folder) { '/lib/systemd/system' } let(:pkg_ext) { 'rpm' } let(:pkg_prov) { 'rpm' } let(:version_add) { '-1' } if facts[:os]['release']['major'].to_i >= 7 let(:systemd_service_path) { '/lib/systemd/system' } test_pid = true else test_pid = false end when 'Suse' let(:defaults_path) { '/etc/sysconfig' } let(:pkg_ext) { 'rpm' } let(:pkg_prov) { 'rpm' } let(:version_add) { '-1' } if facts[:os]['name'] == 'OpenSuSE' and facts[:os]['release']['major'].to_i <= 12 let(:systemd_service_path) { '/lib/systemd/system' } else let(:systemd_service_path) { '/usr/lib/systemd/system' } end end let(:facts) do - facts.merge('scenario' => '', 'common' => '') + facts.merge('scenario' => '', 'common' => '', 'elasticsearch' => {}) end let(:params) do default_params.merge({}) end + it { should compile.with_all_deps } + # Varies depending on distro it { should contain_augeas("#{defaults_path}/elasticsearch") } - it do - should contain_file("#{defaults_path}/elasticsearch").with( - :ensure => 'file', - :group => 'elasticsearch', - :owner => 'elasticsearch', - :mode => '0640' - ) - end # Systemd-specific files if test_pid == true - it { should contain_service('elasticsearch').with(:ensure => false).with(:enable => 'mask') } - it { should contain_file('/usr/lib/tmpfiles.d/elasticsearch.conf') } + it { should contain_service('elasticsearch').with( + :ensure => 'running', + :enable => true + ) } end context 'java installation' do let(:pre_condition) do <<-MANIFEST include ::java MANIFEST end it { should contain_class('elasticsearch::config') .that_requires('Class[java]') } end context 'package installation' do context 'via repository' do context 'with specified version' do let(:params) do default_params.merge( :version => '1.0' ) end it { should contain_package('elasticsearch') .with(:ensure => "1.0#{version_add}") } end if facts[:os]['family'] == 'RedHat' context 'Handle special CentOS/RHEL package versioning' do let(:params) do default_params.merge( :version => '1.1-2' ) end it { should contain_package('elasticsearch') .with(:ensure => '1.1-2') } end end end context 'when setting package version and package_url' do let(:params) do default_params.merge( :version => '0.90.10', :package_url => "puppet:///path/to/some/es-0.90.10.#{pkg_ext}" ) end it { expect { should raise_error(Puppet::Error) } } end context 'via package_url setting' do ['file:/', 'ftp://', 'http://', 'https://', 'puppet:///'].each do |schema| context "using #{schema} schema" do let(:params) do default_params.merge( :package_url => "#{schema}domain-or-path/pkg.#{pkg_ext}" ) end unless schema.start_with? 'puppet' it { should contain_exec('create_package_dir_elasticsearch') .with(:command => 'mkdir -p /opt/elasticsearch/swdl') } it { should contain_file('/opt/elasticsearch/swdl') .with( :purge => false, :force => false, :require => 'Exec[create_package_dir_elasticsearch]' ) } end case schema when 'file:/' it { should contain_file( "/opt/elasticsearch/swdl/pkg.#{pkg_ext}" ).with( :source => "/domain-or-path/pkg.#{pkg_ext}", :backup => false ) } when 'puppet:///' it { should contain_file( "/opt/elasticsearch/swdl/pkg.#{pkg_ext}" ).with( :source => "#{schema}domain-or-path/pkg.#{pkg_ext}", :backup => false ) } else [true, false].each do |verify_certificates| context "with download_tool_verify_certificates '#{verify_certificates}'" do let(:params) do default_params.merge( :package_url => "#{schema}domain-or-path/pkg.#{pkg_ext}", :download_tool_verify_certificates => verify_certificates ) end flag = (not verify_certificates) ? ' --no-check-certificate' : '' it { should contain_exec('download_package_elasticsearch') .with( :command => "wget#{flag} -O /opt/elasticsearch/swdl/pkg.#{pkg_ext} #{schema}domain-or-path/pkg.#{pkg_ext} 2> /dev/null", :require => 'File[/opt/elasticsearch/swdl]' ) } end end end it { should contain_package('elasticsearch') .with( :ensure => 'present', :source => "/opt/elasticsearch/swdl/pkg.#{pkg_ext}", :provider => pkg_prov ) } end end context 'using http:// schema with proxy_url' do let(:params) do default_params.merge( :package_url => "http://www.domain.com/package.#{pkg_ext}", :proxy_url => 'http://proxy.example.com:12345/' ) end it { should contain_exec('download_package_elasticsearch') .with( :environment => [ 'use_proxy=yes', 'http_proxy=http://proxy.example.com:12345/', 'https_proxy=http://proxy.example.com:12345/' ] ) } end end end # package context 'when setting the module to absent' do let(:params) do default_params.merge( :ensure => 'absent' ) end case facts[:os]['family'] when 'Suse' it { should contain_package('elasticsearch') .with(:ensure => 'absent') } else it { should contain_package('elasticsearch') .with(:ensure => 'purged') } end + it { should contain_service('elasticsearch') + .with( + :ensure => 'stopped', + :enable => 'false' + ) } it { should contain_file('/usr/share/elasticsearch/plugins') .with(:ensure => 'absent') } + it { should contain_file("#{defaults_path}/elasticsearch") + .with(:ensure => 'absent') } end context 'When managing the repository' do let(:params) do default_params.merge( :manage_repo => true ) end it { should contain_class('elastic_stack::repo') } end context 'When not managing the repository' do let(:params) do default_params.merge( :manage_repo => false ) end it { should compile.with_all_deps } end end end on_supported_os( :hardwaremodels => ['x86_64'], :supported_os => [ { 'operatingsystem' => 'CentOS', 'operatingsystemrelease' => ['7'] } ] ).each do |os, facts| context "on #{os}" do let(:facts) { facts.merge( :scenario => '', :common => '' ) } - context 'main class tests' do + describe 'main class tests' do # init.pp it { should compile.with_all_deps } it { should contain_class('elasticsearch') } it { should contain_class('elasticsearch::package') } it { should contain_class('elasticsearch::config') .that_requires('Class[elasticsearch::package]') } + it { should contain_class('elasticsearch::service') + .that_requires('Class[elasticsearch::config]') } # Base directories it { should contain_file('/etc/elasticsearch') } - it { should contain_file('/usr/share/elasticsearch/templates_import') } - it { should contain_file('/usr/share/elasticsearch/scripts') } it { should contain_file('/usr/share/elasticsearch') } it { should contain_file('/usr/share/elasticsearch/lib') } + it { should contain_file('/var/lib/elasticsearch') } it { should contain_exec('remove_plugin_dir') } - - # file removal from package - it { should contain_file('/etc/elasticsearch/elasticsearch.yml') - .with(:ensure => 'absent') } - it { should contain_file('/etc/elasticsearch/jvm.options') - .with(:ensure => 'absent') } - it { should contain_file('/etc/elasticsearch/logging.yml') - .with(:ensure => 'absent') } - it { should contain_file('/etc/elasticsearch/log4j2.properties') - .with(:ensure => 'absent') } - it { should contain_file('/etc/elasticsearch/log4j2.properties') - .with(:ensure => 'absent') } end context 'package installation' do - context 'with default package' do + describe 'with default package' do it { should contain_package('elasticsearch') .with(:ensure => 'present') } it { should_not contain_package('my-elasticsearch') .with(:ensure => 'present') } end - context 'with specified package name' do + describe 'with specified package name' do let(:params) do default_params.merge( :package_name => 'my-elasticsearch' ) end it { should contain_package('elasticsearch') .with(:ensure => 'present', :name => 'my-elasticsearch') } it { should_not contain_package('elasticsearch') .with(:ensure => 'present', :name => 'elasticsearch') } end - context 'with auto upgrade enabled' do + describe 'with auto upgrade enabled' do let(:params) do default_params.merge( :autoupgrade => true ) end it { should contain_package('elasticsearch') .with(:ensure => 'latest') } end end - context 'running a a different user' do + describe 'running a a different user' do let(:params) do default_params.merge( :elasticsearch_user => 'myesuser', :elasticsearch_group => 'myesgroup' ) end it { should contain_file('/etc/elasticsearch') - .with(:owner => 'root', :group => 'myesgroup') } + .with(:owner => 'myesuser', :group => 'myesgroup') } it { should contain_file('/var/log/elasticsearch') .with(:owner => 'myesuser') } it { should contain_file('/usr/share/elasticsearch') .with(:owner => 'myesuser', :group => 'myesgroup') } it { should contain_file('/var/lib/elasticsearch') .with(:owner => 'myesuser', :group => 'myesgroup') } - it { should contain_file('/var/run/elasticsearch') - .with(:owner => 'myesuser') if facts[:os]['family'] == 'RedHat' } + end + + describe 'setting jvm_options' do + jvm_options = [ + '-Xms16g', + '-Xmx16g' + ] + + let(:params) do + default_params.merge( + :jvm_options => jvm_options + ) + end + + jvm_options.each do |jvm_option| + it { should contain_file_line("jvm_option_#{jvm_option}") + .with( + :ensure => 'present', + :path => '/etc/elasticsearch/jvm.options', + :line => jvm_option + )} + end + end + + context 'with restart_on_change => true' do + let(:params) do + default_params.merge( + :restart_on_change => true + ) + end + + describe 'should restart elasticsearch' do + it { should contain_file('/etc/elasticsearch/elasticsearch.yml') + .that_notifies('Service[elasticsearch]')} + end + + describe 'setting jvm_options triggers restart' do + let(:params) do + super().merge( + :jvm_options => ['-Xmx16g'] + ) + end + + it { should contain_file_line('jvm_option_-Xmx16g') + .that_notifies('Service[elasticsearch]')} + end end # This check helps catch dependency cycles. context 'create_resource' do # Helper for these tests def singular(s) case s when 'indices' 'index' when 'snapshot_repositories' 'snapshot_repository' else s[0..-2] end end { 'indices' => { 'test-index' => {} }, - 'instances' => { 'es-instance' => {} }, + # 'instances' => { 'es-instance' => {} }, 'pipelines' => { 'testpipeline' => { 'content' => {} } }, 'plugins' => { 'head' => {} }, 'roles' => { 'elastic_role' => {} }, 'scripts' => { 'foo' => { 'source' => 'puppet:///path/to/foo.groovy' } }, 'snapshot_repositories' => { 'backup' => { 'location' => '/backups' } }, 'templates' => { 'foo' => { 'content' => {} } }, 'users' => { 'elastic' => { 'password' => 'foobar' } } }.each_pair do |deftype, params| describe deftype do let(:params) do default_params.merge( - deftype => params, - :security_plugin => 'x-pack' + deftype => params ) end it { should compile } it { should send( "contain_elasticsearch__#{singular(deftype)}", params.keys.first ) } end end end describe 'oss' do let(:params) do default_params.merge(:oss => true) end it do should contain_package('elasticsearch').with( :name => 'elasticsearch-oss' ) end end end end end diff --git a/spec/classes/001_hiera_spec.rb b/spec/classes/001_hiera_spec.rb index 499838d..e7ad80a 100644 --- a/spec/classes/001_hiera_spec.rb +++ b/spec/classes/001_hiera_spec.rb @@ -1,235 +1,213 @@ require 'spec_helper' describe 'elasticsearch', :type => 'class' do default_params = { :config => { 'node.name' => 'foo' } } let(:params) do default_params.merge({}) end on_supported_os( :hardwaremodels => ['x86_64'], :supported_os => [ { 'operatingsystem' => 'CentOS', 'operatingsystemrelease' => ['7'] } ] ).each do |os, facts| context "on #{os}" do context 'hiera' do describe 'indices' do context 'single indices' do let(:facts) { facts.merge(:scenario => 'singleindex') } it { should contain_elasticsearch__index('baz') .with( :ensure => 'present', :settings => { 'index' => { 'number_of_shards' => 1 } } ) } it { should contain_elasticsearch_index('baz') } it { should contain_es_instance_conn_validator( 'baz-index-conn-validator' ) } end context 'no indices' do let(:facts) { facts.merge(:scenario => '') } it { should_not contain_elasticsearch__index('baz') } end end - describe 'instances' do - context 'single instance' do - let(:facts) { facts.merge(:scenario => 'singleinstance') } - - include_examples 'instance', 'es-hiera-single', :systemd - end - - context 'multiple instances' do - let(:facts) { facts.merge(:scenario => 'multipleinstances') } - - include_examples 'instance', 'es-hiera-multiple-1', :systemd - include_examples 'instance', 'es-hiera-multiple-2', :systemd - end - - context 'no instances' do - let(:facts) { facts.merge(:scenario => '') } - - it { should_not contain_elasticsearch__instance('es-hiera-multiple-1') } - it { should_not contain_elasticsearch__instance('es-hiera-multiple-2') } - end - - context 'multiple instances using lookup_options' do - let(:facts) do - facts.merge( - :common => 'defaultinstance-merged', - :scenario => 'singleinstance' - ) - end - - include_examples 'instance', 'default', :systemd - include_examples 'instance', 'es-hiera-single', :systemd - end - end # of instances + context 'config' do + let(:facts) { facts.merge(:scenario => 'singleinstance') } + + it { should contain_augeas('/etc/sysconfig/elasticsearch') } + it { should contain_file('/etc/elasticsearch/elasticsearch.yml') } + it { should contain_datacat('/etc/elasticsearch/elasticsearch.yml') } + it { should contain_datacat_fragment('main_config') } + it { should contain_service('elasticsearch').with( + :ensure => 'running', + :enable => true + ) } + end # of config describe 'pipelines' do context 'single pipeline' do let(:facts) { facts.merge(:scenario => 'singlepipeline') } it { should contain_elasticsearch__pipeline('testpipeline') .with( :ensure => 'present', :content => { 'description' => 'Add the foo field', 'processors' => [ { 'set' => { 'field' => 'foo', 'value' => 'bar' } } ] } ) } it { should contain_elasticsearch_pipeline('testpipeline') } end context 'no pipelines' do let(:facts) { facts.merge(:scenario => '') } it { should_not contain_elasticsearch__pipeline('testpipeline') } end end describe 'plugins' do context 'single plugin' do let(:facts) { facts.merge(:scenario => 'singleplugin') } it { should contain_elasticsearch__plugin('mobz/elasticsearch-head') .with( - :ensure => 'present', - :module_dir => 'head', - :instances => ['es-hiera-single'] + :ensure => 'present', + :module_dir => 'head' ) } it { should contain_elasticsearch_plugin('mobz/elasticsearch-head') } end context 'no plugins' do let(:facts) { facts.merge(:scenario => '') } it { should_not contain_elasticsearch__plugin( 'mobz/elasticsearch-head/1.0.0' ) } end end describe 'roles' do context 'single roles' do let(:facts) { facts.merge(:scenario => 'singlerole') } let(:params) do - default_params.merge(:security_plugin => 'x-pack') + default_params end it { should contain_elasticsearch__role('admin') .with( :ensure => 'present', :privileges => { 'cluster' => 'monitor', 'indices' => { '*' => 'all' } }, :mappings => [ 'cn=users,dc=example,dc=com' ] ) } it { should contain_elasticsearch_role('admin') } it { should contain_elasticsearch_role_mapping('admin') } end context 'no roles' do let(:facts) { facts.merge(:scenario => '') } it { should_not contain_elasticsearch__role('admin') } end end describe 'scripts' do context 'single scripts' do let(:facts) { facts.merge(:scenario => 'singlescript') } it { should contain_elasticsearch__script('myscript') .with( :ensure => 'present', :source => 'puppet:///file/here' ) } it { should contain_file('/usr/share/elasticsearch/scripts/here') } end context 'no roles' do let(:facts) { facts.merge(:scenario => '') } it { should_not contain_elasticsearch__script('myscript') } end end describe 'templates' do context 'single template' do let(:facts) { facts.merge(:scenario => 'singletemplate') } it { should contain_elasticsearch__template('foo') .with( :ensure => 'present', :content => { 'template' => 'foo-*', 'settings' => { 'index' => { 'number_of_replicas' => 0 } } } ) } it { should contain_elasticsearch_template('foo') } end context 'no templates' do let(:facts) { facts.merge(:scenario => '') } it { should_not contain_elasticsearch__template('foo') } end end describe 'users' do context 'single users' do let(:facts) { facts.merge(:scenario => 'singleuser') } let(:params) do - default_params.merge(:security_plugin => 'x-pack') + default_params end it { should contain_elasticsearch__user('elastic') .with( :ensure => 'present', :roles => ['admin'], :password => 'password' ) } it { should contain_elasticsearch_user('elastic') } end context 'no users' do let(:facts) { facts.merge(:scenario => '') } it { should_not contain_elasticsearch__user('elastic') } end end end end end end diff --git a/spec/classes/002_elasticsearch_config_security_logging_spec.rb b/spec/classes/002_elasticsearch_config_security_logging_spec.rb deleted file mode 100644 index 7ae896a..0000000 --- a/spec/classes/002_elasticsearch_config_security_logging_spec.rb +++ /dev/null @@ -1,83 +0,0 @@ -require 'spec_helper' - -shared_examples 'security plugin logging' do |plugin, logfile, tests| - describe "security logging configuration file for #{plugin}" do - tests.each_pair do |param_type, params| - context "with no security plugin defined for #{param_type}" do - let(:params) do - { "security_logging_#{param_type}" => params[:manifest] } - end - - it { should_not compile.with_all_deps } - end - - context "parameter #{param_type}" do - let(:params) do - { - :security_plugin => plugin, - "security_logging_#{param_type}" => params[:manifest] - } - end - - it { should contain_file("/etc/elasticsearch/#{plugin}") - .with_ensure('directory')} - - case param_type - when 'source' - it 'sets the source for the file resource' do - should contain_file("/etc/elasticsearch/#{plugin}/#{logfile}") - .with_source(params[:value]) - end - when 'content' - it 'sets logging file yaml content' do - should contain_file("/etc/elasticsearch/#{plugin}/#{logfile}") - .with_content(params[:value]) - end - end - end - end - end -end - -describe 'elasticsearch', :type => 'class' do - on_supported_os( - :hardwaremodels => ['x86_64'], - :supported_os => [ - { - 'operatingsystem' => 'CentOS', - 'operatingsystemrelease' => ['6'] - } - ] - ).each do |os, facts| - context "on #{os}" do - let(:facts) { facts.merge( - :scenario => '', - :common => '' - ) } - - include_examples 'security plugin logging', - 'shield', - 'logging.yml', - 'content' => { - :manifest => "one: two\nfoo: bar\n", - :value => "one: two\nfoo: bar\n" - }, - 'source' => { - :manifest => '/foo/bar.yml', - :value => '/foo/bar.yml' - } - - include_examples 'security plugin logging', - 'x-pack', - 'log4j2.properties', - 'content' => { - :manifest => "one = two\nfoo = bar\n", - :value => "one = two\nfoo = bar\n" - }, - 'source' => { - :manifest => '/foo/bar.properties', - :value => '/foo/bar.properties' - } - end - end -end diff --git a/spec/classes/006_elasticsearch_license_spec.rb b/spec/classes/006_elasticsearch_license_spec.rb index 88da031..0c2579d 100644 --- a/spec/classes/006_elasticsearch_license_spec.rb +++ b/spec/classes/006_elasticsearch_license_spec.rb @@ -1,85 +1,85 @@ require 'spec_helper' describe 'elasticsearch::license', :type => 'class' do # First, randomly select one of our supported OSes to run tests that apply # to any distro on_supported_os.to_a.sample(1).to_h.each do |os, facts| context "on #{os}" do let(:facts) do facts.merge('scenario' => '', 'common' => '') end - %w[shield x-pack].each do |plugin| - context "with #{plugin} plugin" do - let(:params) do - { - :security_plugin => plugin, - :content => { - 'license' => { - 'uid' => 'cbff45e7-c553-41f7-ae4f-9205eabd80xx', - 'type' => 'trial', - 'issue_date_in_millis' => 1_519_341_125_550, - 'expiry_date_in_millis' => 1_521_933_125_550, - 'max_nodes' => 1000, - 'issued_to' => 'test', - 'issuer' => 'elasticsearch', - 'signature' => 'secretvalue', - 'start_date_in_millis' => 1_513_814_400_000 - } + context 'when managing x-pack license' do + let(:params) do + { + :content => { + 'license' => { + 'uid' => 'cbff45e7-c553-41f7-ae4f-9205eabd80xx', + 'type' => 'trial', + 'issue_date_in_millis' => 1_519_341_125_550, + 'expiry_date_in_millis' => 1_521_933_125_550, + 'max_nodes' => 1000, + 'issued_to' => 'test', + 'issuer' => 'elasticsearch', + 'signature' => 'secretvalue', + 'start_date_in_millis' => 1_513_814_400_000 } } - end + } + end - let(:pre_condition) do - <<-EOS - class { 'elasticsearch' : - api_protocol => 'https', - api_host => '127.0.0.1', - api_port => 9201, - api_timeout => 11, - api_basic_auth_username => 'elastic', - api_basic_auth_password => 'password', - api_ca_file => '/foo/bar.pem', - api_ca_path => '/foo/', - validate_tls => false, - } - EOS - end + let(:pre_condition) do + <<-EOS + class { 'elasticsearch' : + api_protocol => 'https', + api_host => '127.0.0.1', + api_port => 9201, + api_timeout => 11, + api_basic_auth_username => 'elastic', + api_basic_auth_password => 'password', + api_ca_file => '/foo/bar.pem', + api_ca_path => '/foo/', + validate_tls => false, + } + EOS + end - it do - should contain_class('elasticsearch::license') - should contain_es_instance_conn_validator( - 'license-conn-validator' - ).that_comes_before("elasticsearch_license[#{plugin.delete('-')}]") - should contain_elasticsearch_license(plugin.delete('-')).with( - :ensure => 'present', - :content => { - 'license' => { - 'uid' => 'cbff45e7-c553-41f7-ae4f-9205eabd80xx', - 'type' => 'trial', - 'issue_date_in_millis' => 1_519_341_125_550, - 'expiry_date_in_millis' => 1_521_933_125_550, - 'max_nodes' => 1000, - 'issued_to' => 'test', - 'issuer' => 'elasticsearch', - 'signature' => 'secretvalue', - 'start_date_in_millis' => 1_513_814_400_000 - } - }, - :protocol => 'https', - :host => '127.0.0.1', - :port => 9201, - :timeout => 11, - :username => 'elastic', - :password => 'password', - :ca_file => '/foo/bar.pem', - :ca_path => '/foo/', - :validate_tls => false, - :provider => plugin.delete('-') - ) - end + it do + should contain_class('elasticsearch::license') + end + it do + should contain_es_instance_conn_validator( + 'license-conn-validator' + ).that_comes_before('elasticsearch_license[xpack]') + end + it do + should contain_elasticsearch_license('xpack').with( + :ensure => 'present', + :content => { + 'license' => { + 'uid' => 'cbff45e7-c553-41f7-ae4f-9205eabd80xx', + 'type' => 'trial', + 'issue_date_in_millis' => 1_519_341_125_550, + 'expiry_date_in_millis' => 1_521_933_125_550, + 'max_nodes' => 1000, + 'issued_to' => 'test', + 'issuer' => 'elasticsearch', + 'signature' => 'secretvalue', + 'start_date_in_millis' => 1_513_814_400_000 + } + }, + :protocol => 'https', + :host => '127.0.0.1', + :port => 9201, + :timeout => 11, + :username => 'elastic', + :password => 'password', + :ca_file => '/foo/bar.pem', + :ca_path => '/foo/', + :validate_tls => false + ) end end end end end diff --git a/spec/defines/004_elasticsearch_plugin_spec.rb b/spec/defines/004_elasticsearch_plugin_spec.rb index 1ad8eac..f22e321 100644 --- a/spec/defines/004_elasticsearch_plugin_spec.rb +++ b/spec/defines/004_elasticsearch_plugin_spec.rb @@ -1,329 +1,307 @@ require 'spec_helper' describe 'elasticsearch::plugin', :type => 'define' do let(:title) { 'mobz/elasticsearch-head/1.0.0' } on_supported_os( :hardwaremodels => ['x86_64'], :supported_os => [ { 'operatingsystem' => 'CentOS', 'operatingsystemrelease' => ['6'] } ] ).each do |_os, facts| let(:facts) do facts.merge('scenario' => '', 'common' => '') end let(:pre_condition) do <<-EOS class { "elasticsearch": config => { "node" => { "name" => "test" } } } EOS end context 'default values' do context 'present' do let(:params) do { - :ensure => 'present', - :configdir => '/etc/elasticsearch', - :instances => 'es-plugin' + :ensure => 'present', + :configdir => '/etc/elasticsearch' } end it { is_expected.to compile } end context 'absent' do let(:params) do { - :ensure => 'absent', - :instances => 'es-plugin' + :ensure => 'absent' } end it { is_expected.to compile } end context 'configdir' do - let(:params) do { - :instances => 'es-plugin' - } end - it { should contain_elasticsearch__plugin( 'mobz/elasticsearch-head/1.0.0' ).with_configdir('/etc/elasticsearch') } it { should contain_elasticsearch_plugin( 'mobz/elasticsearch-head/1.0.0' ).with_configdir('/etc/elasticsearch') } end end context 'with module_dir' do context 'add a plugin' do let(:params) do { :ensure => 'present', - :module_dir => 'head', - :instances => 'es-plugin' + :module_dir => 'head' } end it { should contain_elasticsearch__plugin( 'mobz/elasticsearch-head/1.0.0' ) } it { should contain_elasticsearch_plugin( 'mobz/elasticsearch-head/1.0.0' ) } it { should contain_file( '/usr/share/elasticsearch/plugins/head' ).that_requires( 'Elasticsearch_plugin[mobz/elasticsearch-head/1.0.0]' ) } end context 'remove a plugin' do let(:params) do { :ensure => 'absent', - :module_dir => 'head', - :instances => 'es-plugin' + :module_dir => 'head' } end it { should contain_elasticsearch__plugin( 'mobz/elasticsearch-head/1.0.0' ) } it { should contain_elasticsearch_plugin( 'mobz/elasticsearch-head/1.0.0' ).with( :ensure => 'absent' ) } it { should contain_file( '/usr/share/elasticsearch/plugins/head' ).that_requires( 'Elasticsearch_plugin[mobz/elasticsearch-head/1.0.0]' ) } end end context 'with url' do context 'add a plugin with full name' do let(:params) do { - :ensure => 'present', - :instances => 'es-plugin', - :url => 'https://github.com/mobz/elasticsearch-head/archive/master.zip' + :ensure => 'present', + :url => 'https://github.com/mobz/elasticsearch-head/archive/master.zip' } end it { should contain_elasticsearch__plugin('mobz/elasticsearch-head/1.0.0') } it { should contain_elasticsearch_plugin('mobz/elasticsearch-head/1.0.0').with(:ensure => 'present', :url => 'https://github.com/mobz/elasticsearch-head/archive/master.zip') } end end context 'offline plugin install' do let(:title) { 'head' } let(:params) do { - :ensure => 'present', - :instances => 'es-plugin', - :source => 'puppet:///path/to/my/plugin.zip' + :ensure => 'present', + :source => 'puppet:///path/to/my/plugin.zip' } end it { should contain_elasticsearch__plugin('head') } it { should contain_file('/opt/elasticsearch/swdl/plugin.zip').with(:source => 'puppet:///path/to/my/plugin.zip', :before => 'Elasticsearch_plugin[head]') } it { should contain_elasticsearch_plugin('head').with(:ensure => 'present', :source => '/opt/elasticsearch/swdl/plugin.zip') } end describe 'service restarts' do let(:title) { 'head' } let(:params) do { :ensure => 'present', - :instances => 'es-plugin', :module_dir => 'head' } end context 'restart_on_change set to false (default)' do let(:pre_condition) do <<-EOS class { "elasticsearch": } - - elasticsearch::instance { 'es-plugin': } EOS end it { should_not contain_elasticsearch_plugin( 'head' ).that_notifies( - 'Elasticsearch::Service[es-plugin]' + 'Service[elasticsearch]' )} - include_examples 'instance', 'es-plugin', :sysv + include_examples 'class', :sysv end context 'restart_on_change set to true' do let(:pre_condition) do <<-EOS class { "elasticsearch": restart_on_change => true, } - - elasticsearch::instance { 'es-plugin': } EOS end it { should contain_elasticsearch_plugin( 'head' ).that_notifies( - 'Elasticsearch::Service[es-plugin]' + 'Service[elasticsearch]' )} - include_examples 'instance', 'es-plugin', :sysv + include_examples('class') end context 'restart_plugin_change set to false (default)' do let(:pre_condition) do <<-EOS class { "elasticsearch": restart_plugin_change => false, } - - elasticsearch::instance { 'es-plugin': } EOS end it { should_not contain_elasticsearch_plugin( 'head' ).that_notifies( - 'Elasticsearch::Service[es-plugin]' + 'Service[elasticsearch]' )} - include_examples 'instance', 'es-plugin', :sysv + include_examples('class') end context 'restart_plugin_change set to true' do let(:pre_condition) do <<-EOS class { "elasticsearch": restart_plugin_change => true, } - - elasticsearch::instance { 'es-plugin': } EOS end it { should contain_elasticsearch_plugin( 'head' ).that_notifies( - 'Elasticsearch::Service[es-plugin]' + 'Service[elasticsearch]' )} - include_examples 'instance', 'es-plugin', :sysv + include_examples('class') end end describe 'proxy arguments' do let(:title) { 'head' } context 'unauthenticated' do context 'on define' do let(:params) do { - :ensure => 'present', - :instances => 'es-plugin', - :proxy_host => 'es.local', - :proxy_port => 8080 + :ensure => 'present', + :proxy_host => 'es.local', + :proxy_port => 8080 } end it { should contain_elasticsearch_plugin( 'head' ).with_proxy( 'http://es.local:8080' )} end context 'on main class' do let(:params) do { - :ensure => 'present', - :instances => 'es-plugin' + :ensure => 'present' } end let(:pre_condition) do <<-EOS class { 'elasticsearch': proxy_url => 'https://es.local:8080', } EOS end it { should contain_elasticsearch_plugin( 'head' ).with_proxy( 'https://es.local:8080' )} end end context 'authenticated' do context 'on define' do let(:params) do { :ensure => 'present', - :instances => 'es-plugin', :proxy_host => 'es.local', :proxy_port => 8080, :proxy_username => 'elastic', :proxy_password => 'password' } end it { should contain_elasticsearch_plugin( 'head' ).with_proxy( 'http://elastic:password@es.local:8080' )} end context 'on main class' do let(:params) do { - :ensure => 'present', - :instances => 'es-plugin' + :ensure => 'present' } end let(:pre_condition) do <<-EOS class { 'elasticsearch': proxy_url => 'http://elastic:password@es.local:8080', } EOS end it { should contain_elasticsearch_plugin( 'head' ).with_proxy( 'http://elastic:password@es.local:8080' )} end end end describe 'collector ordering' do describe 'present' do let(:title) { 'head' } let(:pre_condition) do <<-EOS class { 'elasticsearch': } - elasticsearch::instance { 'es-plugin': } EOS end - let(:params) do { - :instances => 'es-plugin' - } end - it { should contain_elasticsearch__plugin( 'head' + ).that_requires( + 'Class[elasticsearch::config]' + )} + + it { should contain_elasticsearch_plugin( + 'head' ).that_comes_before( - 'Elasticsearch::Instance[es-plugin]' + 'Service[elasticsearch]' )} - include_examples 'instance', 'es-plugin', :sysv + include_examples 'class' end end end end diff --git a/spec/defines/005_elasticsearch_instance_spec.rb b/spec/defines/005_elasticsearch_instance_spec.rb deleted file mode 100644 index 57a1d93..0000000 --- a/spec/defines/005_elasticsearch_instance_spec.rb +++ /dev/null @@ -1,942 +0,0 @@ -require 'spec_helper' - -describe 'elasticsearch::instance', :type => 'define' do - let(:title) { 'es-instance' } - let(:pre_condition) { 'class { "elasticsearch": }' } - - on_supported_os.each do |os, facts| - context "on #{os}" do - shared_examples 'systemd' do - it { should contain_elasticsearch__service__systemd(title) } - it { should contain_elasticsearch_service_file("#{systemd_service_path}/elasticsearch-#{title}.service") } - it { should contain_file("#{systemd_service_path}/elasticsearch-#{title}.service") } - it { should contain_exec("systemd_reload_#{title}") } - end - - shared_examples 'init' do - it { should contain_elasticsearch__service__init(title) } - it { should contain_elasticsearch_service_file("/etc/init.d/elasticsearch-#{title}") } - it { should contain_file("/etc/init.d/elasticsearch-#{title}") } - end - - if (facts[:os]['name'] == 'OpenSuSE' and facts[:os]['release']['major'].to_i >= 13) or facts[:os]['name'] == 'SLES' - let(:systemd_service_path) { '/usr/lib/systemd/system' } - else - let(:systemd_service_path) { '/lib/systemd/system' } - end - - case facts[:os]['family'] - when 'Debian' - let(:defaults_path) { '/etc/default' } - let(:pkg_ext) { 'deb' } - let(:pkg_prov) { 'dpkg' } - case facts[:os]['name'] - when 'Debian' - if facts[:os]['release']['major'].to_i >= 8 - let(:initscript) { 'systemd' } - - include_examples 'systemd' - else - let(:initscript) { 'Debian' } - - include_examples 'init' - end - when 'Ubuntu' - if facts[:os]['release']['major'].to_i >= 15 - let(:initscript) { 'systemd' } - - include_examples 'systemd' - else - let(:initscript) { 'Debian' } - - include_examples 'init' - end - end - when 'RedHat' - let(:defaults_path) { '/etc/sysconfig' } - let(:pkg_ext) { 'rpm' } - let(:pkg_prov) { 'rpm' } - if facts[:os]['release']['major'].to_i >= 7 - let(:initscript) { 'systemd' } - - include_examples 'systemd' - else - let(:initscript) { 'RedHat' } - - include_examples 'init' - end - when 'Suse' - let(:defaults_path) { '/etc/sysconfig' } - let(:pkg_ext) { 'rpm' } - let(:pkg_prov) { 'rpm' } - let(:initscript) { 'systemd' } - - include_examples 'systemd' - end - - let(:facts) do - facts.merge('scenario' => '', 'common' => '') - end - - it { should contain_elasticsearch__service( - 'es-instance' - ).with( - :init_template => - "elasticsearch/etc/init.d/elasticsearch.#{initscript}.erb", - :init_defaults => { - 'CONF_DIR' => '/etc/elasticsearch/es-instance', - 'ES_PATH_CONF' => '/etc/elasticsearch/es-instance', - 'DATA_DIR' => '/var/lib/elasticsearch', - 'ES_JVM_OPTIONS' => '/etc/elasticsearch/es-instance/jvm.options', - 'LOG_DIR' => '/var/log/elasticsearch/es-instance', - 'ES_HOME' => '/usr/share/elasticsearch' - } - )} - end # of on os context - end # of on supported OSes loop - - # Test all non OS-specific functionality with just a single distro - on_supported_os( - :hardwaremodels => ['x86_64'], - :supported_os => [ - { - 'operatingsystem' => 'CentOS', - 'operatingsystemrelease' => ['6'] - } - ] - ).each do |os, facts| - context "on #{os}" do - let(:facts) { facts.merge( - :scenario => '', - :common => '' - ) } - - let(:params) do - { :config => { 'node' => { 'name' => 'test' } } } - end - - describe 'config file' do - it { should contain_augeas('defaults_es-instance') } - it { should contain_datacat_fragment('main_config_es-instance') } - it { should contain_datacat('/etc/elasticsearch/es-instance/elasticsearch.yml') } - it { should contain_datacat_collector( - '/etc/elasticsearch/es-instance/elasticsearch.yml' - ) } - it { should contain_file('/etc/elasticsearch/es-instance/elasticsearch.yml') } - end - - describe 'service restarts' do - context 'do not happen when restart_on_change is false (default)' do - it { should_not contain_datacat( - '/etc/elasticsearch/es-instance/elasticsearch.yml' - ).that_notifies('Elasticsearch::Service[es-instance]') } - it { should_not contain_file( - '/etc/elasticsearch/es-instance/jvm.options' - ).that_notifies('Elasticsearch::Service[es-instance]') } - it { should_not contain_package( - 'elasticsearch' - ).that_notifies('Elasticsearch::Service[es-instance]') } - end - - context 'happen when restart_on_change is true' do - let(:pre_condition) do - 'class { "elasticsearch": restart_on_change => true }' - end - - it { should contain_datacat( - '/etc/elasticsearch/es-instance/elasticsearch.yml' - ).that_notifies('Elasticsearch::Service[es-instance]') } - it { should contain_file( - '/etc/elasticsearch/es-instance/jvm.options' - ).that_notifies('Elasticsearch::Service[es-instance]') } - it { should contain_package( - 'elasticsearch' - ).that_notifies('Elasticsearch::Service[es-instance]') } - end - - context 'on package change' do - let(:pre_condition) do - 'class { "elasticsearch": restart_package_change => true }' - end - - it { should_not contain_datacat( - '/etc/elasticsearch/es-instance/elasticsearch.yml' - ).that_notifies('Elasticsearch::Service[es-instance]') } - it { should contain_package( - 'elasticsearch' - ).that_notifies('Elasticsearch::Service[es-instance]') } - end - - context 'on config change' do - let(:pre_condition) do - 'class { "elasticsearch": restart_config_change => true }' - end - - it { should contain_datacat( - '/etc/elasticsearch/es-instance/elasticsearch.yml' - ).that_notifies('Elasticsearch::Service[es-instance]') } - it { should contain_file( - '/etc/elasticsearch/es-instance/jvm.options' - ).that_notifies('Elasticsearch::Service[es-instance]') } - it { should_not contain_package( - 'elasticsearch' - ).that_notifies('Elasticsearch::Service[es-instance]') } - end - end - - context 'config dir' do - context 'default' do - it { should contain_exec('mkdir_configdir_elasticsearch_es-instance') } - it { should contain_file('/etc/elasticsearch/es-instance').with(:ensure => 'directory') } - it { should contain_datacat_fragment('main_config_es-instance') } - it { should contain_datacat('/etc/elasticsearch/es-instance/elasticsearch.yml') } - - it { should contain_file('/etc/elasticsearch/es-instance/logging.yml') } - it { should contain_file('/etc/elasticsearch/es-instance/log4j2.properties') } - it { should contain_file('/etc/elasticsearch/es-instance/jvm.options') } - it { should contain_file('/usr/share/elasticsearch/scripts') } - it do - should contain_file('/etc/elasticsearch/es-instance').with( - :source => '/etc/elasticsearch' - ) - end - end - - context 'set in main class' do - let(:pre_condition) { <<-EOS - class { "elasticsearch": - configdir => "/etc/elasticsearch-config" - } - EOS - } - - it { should contain_exec('mkdir_configdir_elasticsearch_es-instance') } - it { should contain_file('/etc/elasticsearch-config').with(:ensure => 'directory') } - it { should contain_file('/usr/share/elasticsearch/templates_import').with(:ensure => 'directory') } - it { should contain_file('/etc/elasticsearch-config/es-instance').with(:ensure => 'directory') } - it { should contain_datacat_fragment('main_config_es-instance') } - it { should contain_datacat('/etc/elasticsearch-config/es-instance/elasticsearch.yml') } - - it { should contain_file('/etc/elasticsearch-config/es-instance/jvm.options') } - it { should contain_file('/etc/elasticsearch-config/es-instance/logging.yml') } - it { should contain_file('/etc/elasticsearch-config/es-instance/log4j2.properties') } - it { should contain_file('/usr/share/elasticsearch/scripts') } - it do - should contain_file('/etc/elasticsearch-config/scripts').with( - :source => '/usr/share/elasticsearch/scripts' - ) - end - it do - should contain_file('/etc/elasticsearch-config/es-instance').with( - :source => '/etc/elasticsearch-config' - ) - end - end - - context 'set in instance' do - let(:params) do { - :configdir => '/etc/elasticsearch-config/es-instance' - } end - - it { should contain_exec('mkdir_configdir_elasticsearch_es-instance') } - it { should contain_file('/etc/elasticsearch').with(:ensure => 'directory') } - it { should contain_file('/etc/elasticsearch-config/es-instance').with(:ensure => 'directory') } - it { should contain_datacat_fragment('main_config_es-instance') } - it { should contain_datacat('/etc/elasticsearch-config/es-instance/elasticsearch.yml') } - - it { should contain_file('/etc/elasticsearch-config/es-instance/jvm.options') } - it { should contain_file('/etc/elasticsearch-config/es-instance/logging.yml') } - it { should contain_file('/etc/elasticsearch-config/es-instance/log4j2.properties') } - it { should contain_file('/usr/share/elasticsearch/scripts') } - it do - should contain_file('/etc/elasticsearch/scripts').with( - :source => '/usr/share/elasticsearch/scripts' - ) - end - it do - should contain_file('/etc/elasticsearch-config/es-instance').with( - :source => '/etc/elasticsearch' - ) - end - end - end - - context 'data directory' do - shared_examples 'data directories' do |data_dirs| - data_dirs.each do |dir| - it { should contain_exec('mkdir_logdir_elasticsearch_es-instance') } - it { should contain_exec('mkdir_datadir_elasticsearch_es-instance') } - it { should contain_file("/var/lib/#{dir}").with(:ensure => 'directory') } - end - end - - context 'default' do - include_examples 'data directories', ['elasticsearch'] - end - - context 'datadir_instance_directories' do - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - datadir_instance_directories => false - } - EOS - end - - it { should contain_exec('mkdir_logdir_elasticsearch_es-instance') } - it { should_not contain_exec('mkdir_datadir_elasticsearch_es-instance') } - it { should_not contain_file('/var/lib/elasticsearch/es-instance').with(:ensure => 'directory') } - it { should contain_file('/var/lib/elasticsearch').with(:ensure => 'directory') } - end - - context 'single from main config ' do - let(:pre_condition) { <<-EOS - class { "elasticsearch": - datadir => "/var/lib/elasticsearch-data" - } - EOS - } - - include_examples 'data directories', - ['elasticsearch-data', 'elasticsearch-data/es-instance'] - end - - context 'single from instance config' do - let(:params) do { - :datadir => '/var/lib/elasticsearch/data' - } end - - include_examples 'data directories', ['elasticsearch/data'] - end - - context 'multiple from main config' do - let(:pre_condition) { <<-EOS - class { "elasticsearch": - datadir => [ - "/var/lib/elasticsearch-data01", - "/var/lib/elasticsearch-data02" - ] - } - EOS - } - - include_examples( - 'data directories', - (1..2).map do |n| - dir = "elasticsearch-data#{n.to_s.rjust(2, '0')}" - [dir, "#{dir}/es-instance"] - end.flatten - ) - end - - context 'multiple from instance config' do - let(:params) do { - :datadir => [ - '/var/lib/elasticsearch-data/01', - '/var/lib/elasticsearch-data/02' - ] - } end - - include_examples( - 'data directories', - (1..2).map { |n| "elasticsearch-data/#{n.to_s.rjust(2, '0')}" } - ) - end - - context 'conflicting setting path.data' do - let(:params) do { - :datadir => '/var/lib/elasticsearch/data', - :config => { 'path.data' => '/var/lib/elasticsearch/otherdata' } - } end - - include_examples 'data directories', ['elasticsearch/data'] - it { should_not contain_file('/var/lib/elasticsearch/otherdata').with(:ensure => 'directory') } - end - - context 'conflicting setting path => data' do - let(:params) do { - :datadir => '/var/lib/elasticsearch/data', - :config => { - 'path' => { 'data' => '/var/lib/elasticsearch/otherdata' } - } - } end - - include_examples 'data directories', ['elasticsearch/data'] - it { should_not contain_file('/var/lib/elasticsearch/otherdata').with(:ensure => 'directory') } - end - - context 'with other path options defined' do - let(:params) do { - :datadir => '/var/lib/elasticsearch/data', - :config => { 'path' => { 'home' => '/var/lib/elasticsearch' } } - } end - - include_examples 'data directories', ['elasticsearch/data'] - end - end - - context 'logs directory' do - context 'default' do - it { should contain_file('/var/log/elasticsearch/es-instance') - .with(:ensure => 'directory') } - it { should contain_file('/var/log/elasticsearch') - .with(:ensure => 'directory') } - end - - context 'single from main config ' do - let(:pre_condition) { <<-EOS - class { "elasticsearch": - logdir => "/var/log/elasticsearch-logs" - } - EOS - } - - it { should contain_file('/var/log/elasticsearch-logs') - .with(:ensure => 'directory') } - it { should contain_file('/var/log/elasticsearch-logs/es-instance') - .with(:ensure => 'directory') } - end - - context 'single from instance config' do - let(:params) do { - :logdir => '/var/log/elasticsearch/logs-a' - } end - - it { should contain_file('/var/log/elasticsearch/logs-a').with(:ensure => 'directory') } - end - - context 'Conflicting setting path.logs' do - let(:params) do { - :logdir => '/var/log/elasticsearch/logs-a', - :config => { 'path.logs' => '/var/log/elasticsearch/otherlogs' } - } end - - it { should contain_file('/var/log/elasticsearch/logs-a') - .with(:ensure => 'directory') } - it { should_not contain_file('/var/log/elasticsearch/otherlogs') - .with(:ensure => 'directory') } - end - - context 'Conflicting setting path => logs' do - let(:params) do { - :logdir => '/var/log/elasticsearch/logs-a', - :config => { 'path' => { 'logs' => '/var/log/elasticsearch/otherlogs' } } - } end - - it { should contain_file('/var/log/elasticsearch/logs-a') - .with(:ensure => 'directory') } - it { should_not contain_file('/var/log/elasticsearch/otherlogs') - .with(:ensure => 'directory') } - end - - context 'With other path options defined' do - let(:params) do { - :logdir => '/var/log/elasticsearch/logs-a', - :config => { 'path' => { 'home' => '/var/log/elasticsearch' } } - } end - - it { should contain_file('/var/log/elasticsearch/logs-a').with(:ensure => 'directory') } - end - end - - context 'logging' do - context 'default' do - it { should contain_file('/etc/elasticsearch/es-instance/logging.yml') - .with_content( - /^logger.index.search.slowlog: TRACE, index_search_slow_log_file$/, - /type: dailyRollingFile/, - /datePattern: "'.'yyyy-MM-dd"/ - ).with(:source => nil) - } - end - - context 'from main class' do - context 'config' do - let(:pre_condition) { <<-EOS - class { "elasticsearch": - logging_config => { - "index.search.slowlog" => "DEBUG, index_search_slow_log_file" - } - } - EOS - } - - it 'writes correct yaml' do - should contain_file('/etc/elasticsearch/es-instance/logging.yml') - .with_content( - /^logger.index.search.slowlog: DEBUG, index_search_slow_log_file$/ - ).with(:source => nil) - end - end - - context 'logging file ' do - let(:pre_condition) { <<-EOS - class { "elasticsearch": - logging_file => "puppet:///path/to/logging.yml" - } - EOS - } - - it 'sets the right source' do - should contain_file('/etc/elasticsearch/es-instance/logging.yml') - .with( - :source => 'puppet:///path/to/logging.yml', - :content => nil - ) - end - end - end - - context 'from instance' do - context 'config' do - let(:params) do { - :logging_config => { - 'index.search.slowlog' => 'INFO, index_search_slow_log_file' - } - } end - - it 'writes correct yaml' do - should contain_file('/etc/elasticsearch/es-instance/logging.yml') - .with_content(/^logger.index.search.slowlog: INFO, index_search_slow_log_file$/) - .with(:source => nil) - end - end - - context 'logging file' do - let(:params) do { - :logging_file => 'puppet:///path/to/logging.yml' - } end - - it 'sets the right source' do - should contain_file('/etc/elasticsearch/es-instance/logging.yml') - .with( - :source => 'puppet:///path/to/logging.yml', - :content => nil - ) - end - end - - context 'deprecation logging' do - let(:params) do { - :deprecation_logging => true - } end - - it 'writes correct yaml' do - should contain_file('/etc/elasticsearch/es-instance/logging.yml') - .with_content(/^logger.deprecation: DEBUG, deprecation_log_file$/) - .with(:source => nil) - end - it 'configures the deprecation log' do - should contain_file('/etc/elasticsearch/es-instance/logging.yml') - .with_content( - /deprecation_log_file:$/, - /type: dailyRollingFile$/, - %r(file: ${path.logs}/${cluster.name}_deprecation.log$), - /datePattern: "'.'yyyy-MM-dd"$/, - /layout:$/, - /type: pattern$/, - /conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"$/ - ).with(:source => nil) - end - end - - context 'deprecation logging level' do - let(:params) do { - :deprecation_logging => true, - :deprecation_logging_level => 'INFO' - } end - - it 'writes correct yaml' do - should contain_file('/etc/elasticsearch/es-instance/logging.yml') - .with_content(/^logger.deprecation: INFO, deprecation_log_file$/) - .with(:source => nil) - end - end - end - - describe 'rollingFile apender' do - let(:pre_condition) do - %( - class { 'elasticsearch': - file_rolling_type => 'rollingFile', - rolling_file_max_backup_index => 10, - rolling_file_max_file_size => '100MB', - } - ) - end - - it { should contain_file('/etc/elasticsearch/es-instance/logging.yml') - .with_content( - /type: rollingFile/, - /maxBackupIndex: 10/, - /maxBackupIndex: 10/, - /maxFileSize: 100MB/ - ) - } - end - end - - shared_examples 'file permissions' do |owner, group| - it { should contain_file('/var/lib/elasticsearch/es-instance') - .with(:owner => owner) } - it { should contain_file('/etc/elasticsearch/es-instance') - .with( - :owner => owner, - :group => group - ) } - it { should contain_datacat('/etc/elasticsearch/es-instance/elasticsearch.yml') - .with( - :owner => owner, - :group => group, - :mode => '0440' - ) } - it { should contain_file('/etc/elasticsearch/es-instance/elasticsearch.yml') - .with( - :owner => owner, - :group => group, - :mode => '0440' - ) } - it { should contain_file('/etc/elasticsearch/es-instance/logging.yml') - .with( - :owner => owner, - :group => group, - :mode => '0644' - ) } - it { should contain_file('/etc/elasticsearch/es-instance/log4j2.properties') - .with( - :owner => owner, - :group => group, - :mode => '0644' - ) } - - it { should contain_file('/var/lib/elasticsearch/es-instance') - .with( - :owner => owner, - :group => nil, - :mode => '0755' - ) } - it { should contain_file('/var/log/elasticsearch/es-instance') - .with( - :owner => owner, - :group => group, - :mode => '0750' - ) } - end - - describe 'default file permissions' do - let(:pre_condition) { ' class { "elasticsearch":} ' } - - include_examples 'file permissions', 'elasticsearch', 'elasticsearch' - end - - context 'running as an other user' do - let(:pre_condition) { <<-EOS - class { "elasticsearch": - elasticsearch_user => "myesuser", - elasticsearch_group => "myesgroup" - } - EOS - } - - include_examples 'file permissions', 'myesuser', 'myesgroup' - end - - context 'setting different service status then main class' do - let(:pre_condition) { 'class {"elasticsearch": status => "enabled" }' } - - context 'status option' do - let(:params) do { - :status => 'running' - } end - - it { should contain_service('elasticsearch-instance-es-instance').with(:ensure => 'running', :enable => false) } - end - end - - context 'init_template' do - context 'default' do - it { should contain_elasticsearch__service('es-instance') - .with(:init_template => 'elasticsearch/etc/init.d/elasticsearch.RedHat.erb') } - end - - context 'override in main class' do - let(:pre_condition) { <<-EOS - class { "elasticsearch": - init_template => "elasticsearch/etc/init.d/elasticsearch.systemd.erb" - } - EOS - } - - it { should contain_elasticsearch__service('es-instance') - .with(:init_template => 'elasticsearch/etc/init.d/elasticsearch.systemd.erb') } - end - end - - describe 'security plugins' do - describe 'system_key' do - context 'inherited' do - let(:pre_condition) do - %( - class { 'elasticsearch': - security_plugin => 'shield', - system_key => '/tmp/key' - } - ) - end - - it { should contain_file('/etc/elasticsearch/es-instance/shield') } - it { should contain_file( - '/etc/elasticsearch/es-instance/shield/system_key' - ).with( - :source => '/tmp/key', - :mode => '0400', - :owner => 'elasticsearch' - ) } - end - - context 'from instance' do - let(:pre_condition) { "class { 'elasticsearch': security_plugin => 'x-pack' }" } - - let(:params) do { - :system_key => 'puppet:///test/key' - } end - - it { should contain_file('/etc/elasticsearch/es-instance/x-pack') } - it { should contain_file( - '/etc/elasticsearch/es-instance/x-pack/system_key' - ).with( - :source => 'puppet:///test/key', - :mode => '0400', - :owner => 'elasticsearch' - ) } - end - end - end - - describe 'recursive configuration directory management' do - ['shield', 'x-pack'].each do |plugin| - context 'shield' do - context 'without resource notifications' do - let(:pre_condition) do - %( - class { 'elasticsearch': - security_plugin => '#{plugin}', - } - ) - end - - it "copies the #{plugin} directory from the source" do - should( - contain_file( - "/etc/elasticsearch/es-instance/#{plugin}" - ).with( - :ensure => 'directory', - :mode => '0750', - :source => "/etc/elasticsearch/#{plugin}", - :recurse => 'remote', - :owner => 'root', - :group => 'elasticsearch', - :before => 'Elasticsearch::Service[es-instance]' - ) - ) - end - end - - context 'with resource notifications' do - let(:pre_condition) do - %( - class { 'elasticsearch': - security_plugin => '#{plugin}', - restart_on_change => true, - } - ) - end - - it "copies the #{plugin} directory from the source" do - should( - contain_file( - "/etc/elasticsearch/es-instance/#{plugin}" - ).with( - :ensure => 'directory', - :mode => '0750', - :source => "/etc/elasticsearch/#{plugin}", - :recurse => 'remote', - :owner => 'root', - :group => 'elasticsearch', - :before => 'Elasticsearch::Service[es-instance]', - :notify => 'Elasticsearch::Service[es-instance]' - ) - ) - end - end - end - end - end - - describe 'jvm.options' do - let(:pre_condition) do - %( - class { 'elasticsearch': - jvm_options => [ - '-Xms4g', - '-Xmx4g' - ] - } - ) - end - - context 'from parent class' do - it do - should contain_file('/etc/elasticsearch/es-instance/jvm.options') - .with_content(%r{ - -Dfile.encoding=UTF-8. - -Dio.netty.noKeySetOptimization=true. - -Dio.netty.noUnsafe=true. - -Dio.netty.recycler.maxCapacityPerThread=0. - -Djava.awt.headless=true. - -Djna.nosys=true. - -Dlog4j.shutdownHookEnabled=false. - -Dlog4j2.disable.jmx=true. - -XX:\+AlwaysPreTouch. - -XX:\+HeapDumpOnOutOfMemoryError. - -XX:\+PrintGCDateStamps. - -XX:\+PrintGCDetails. - -XX:\+PrintTenuringDistribution. - -XX:\+UseCMSInitiatingOccupancyOnly. - -XX:\+UseConcMarkSweepGC. - -XX:\+UseGCLogFileRotation. - -XX:-OmitStackTraceInFastThrow. - -XX:CMSInitiatingOccupancyFraction=75. - -XX:GCLogFileSize=64m. - -XX:NumberOfGCLogFiles=32. - -Xloggc:\/var\/log\/elasticsearch\/es-instance\/gc.log. - -Xms4g. - -Xmx4g. - -Xss1m. - -server. - }xm) - end - end - - context 'from instance' do - let(:params) do - { - :jvm_options => [ - '-Xms8g', - '-Xmx8g' - ] - } - end - - it do - should contain_file('/etc/elasticsearch/es-instance/jvm.options') - .with_content(%r{ - -Dfile.encoding=UTF-8. - -Dio.netty.noKeySetOptimization=true. - -Dio.netty.noUnsafe=true. - -Dio.netty.recycler.maxCapacityPerThread=0. - -Djava.awt.headless=true. - -Djna.nosys=true. - -Dlog4j.shutdownHookEnabled=false. - -Dlog4j2.disable.jmx=true. - -XX:\+AlwaysPreTouch. - -XX:\+HeapDumpOnOutOfMemoryError. - -XX:\+PrintGCDateStamps. - -XX:\+PrintGCDetails. - -XX:\+PrintTenuringDistribution. - -XX:\+UseCMSInitiatingOccupancyOnly. - -XX:\+UseConcMarkSweepGC. - -XX:\+UseGCLogFileRotation. - -XX:-OmitStackTraceInFastThrow. - -XX:CMSInitiatingOccupancyFraction=75. - -XX:GCLogFileSize=64m. - -XX:NumberOfGCLogFiles=32. - -Xloggc:\/var\/log\/elasticsearch\/es-instance\/gc.log. - -Xms8g. - -Xmx8g. - -Xss1m. - -server. - }xm) - end - end - end - - describe 'keystore' do - let(:settings) do { - 'cloud.aws.access_key' => 'AKIA...', - 'cloud.aws.secret_key' => 'AKIA...' - } end - - describe 'secrets' do - context 'inherited' do - let(:pre_condition) do - <<-EOS - class { 'elasticsearch': - secrets => #{settings} - } - EOS - end - - it { should contain_elasticsearch_keystore('es-instance').with_settings(settings) } - end - - context 'from instance' do - let :params do { - :secrets => settings - } end - - it { should contain_elasticsearch_keystore('es-instance').with_settings(settings) } - end - - context 'notify events' do - let(:pre_condition) do - <<-EOS - class { 'elasticsearch': - restart_on_change => true - } - EOS - end - - let :params do { - :secrets => {} - } end - - it { should contain_elasticsearch_keystore('es-instance').that_notifies('Elasticsearch::Service[es-instance]') } - end - end - - describe 'purge_secrets' do - context 'default' do - let :params do { - :secrets => settings - } end - - it { should contain_elasticsearch_keystore('es-instance').with_purge(false) } - end - - context 'inherited' do - let(:pre_condition) do - <<-EOS - class { 'elasticsearch': - purge_secrets => true, - secrets => #{settings} - } - EOS - end - - it { should contain_elasticsearch_keystore('es-instance').with_purge(true) } - end - - context 'from instance' do - let :params do { - :purge_secrets => true, - :secrets => settings - } end - - it { should contain_elasticsearch_keystore('es-instance').with_purge(true) } - end - end - end - end - end -end diff --git a/spec/defines/007_elasticsearch_user_spec.rb b/spec/defines/007_elasticsearch_user_spec.rb index a27aae2..c8b3103 100644 --- a/spec/defines/007_elasticsearch_user_spec.rb +++ b/spec/defines/007_elasticsearch_user_spec.rb @@ -1,148 +1,120 @@ require 'spec_helper' describe 'elasticsearch::user' do let(:title) { 'elastic' } let(:pre_condition) do <<-EOS - class { 'elasticsearch': - security_plugin => 'shield', - } + class { 'elasticsearch': } EOS end on_supported_os( :hardwaremodels => ['x86_64'], :supported_os => [ { 'operatingsystem' => 'CentOS', 'operatingsystemrelease' => ['7'] } ] ).each do |os, facts| context "on #{os}" do let(:facts) { facts.merge( :scenario => '', :common => '' ) } context 'with default parameters' do let(:params) do { :password => 'foobar', :roles => %w[monitor user] } end it { should contain_elasticsearch__user('elastic') } it { should contain_elasticsearch_user('elastic') } it do should contain_elasticsearch_user_roles('elastic').with( 'ensure' => 'present', 'roles' => %w[monitor user] ) end end describe 'collector ordering' do describe 'when present' do let(:pre_condition) do <<-EOS - class { 'elasticsearch': - security_plugin => 'shield', - } - elasticsearch::instance { 'es-security-user': } - elasticsearch::plugin { 'shield': instances => 'es-security-user' } + class { 'elasticsearch': } elasticsearch::template { 'foo': content => {"foo" => "bar"} } elasticsearch::role { 'test_role': privileges => { 'cluster' => 'monitor', 'indices' => { '*' => 'all', }, }, } EOS end let(:params) do { :password => 'foobar', :roles => %w[monitor user] } end it { should contain_elasticsearch__role('test_role') } it { should contain_elasticsearch_role('test_role') } it { should contain_elasticsearch_role_mapping('test_role') } - it { should contain_elasticsearch__plugin('shield') } - it { should contain_elasticsearch_plugin('shield') } - it { should contain_file( - '/usr/share/elasticsearch/plugins/shield' - ) } it { should contain_elasticsearch__user('elastic') .that_comes_before([ 'Elasticsearch::Template[foo]' ]).that_requires([ - 'Elasticsearch::Plugin[shield]', 'Elasticsearch::Role[test_role]' ])} - include_examples 'instance', 'es-security-user', :systemd - it { should contain_file( - '/etc/elasticsearch/es-security-user/shield' - ) } + include_examples 'class', :systemd end describe 'when absent' do let(:pre_condition) do <<-EOS - class { 'elasticsearch': - security_plugin => 'shield', - } - elasticsearch::instance { 'es-security-user': } - elasticsearch::plugin { 'shield': - ensure => 'absent', - instances => 'es-security-user', - } + class { 'elasticsearch': } elasticsearch::template { 'foo': content => {"foo" => "bar"} } elasticsearch::role { 'test_role': privileges => { 'cluster' => 'monitor', 'indices' => { '*' => 'all', }, }, } EOS end let(:params) do { :password => 'foobar', :roles => %w[monitor user] } end it { should contain_elasticsearch__role('test_role') } it { should contain_elasticsearch_role('test_role') } it { should contain_elasticsearch_role_mapping('test_role') } - it { should contain_elasticsearch__plugin('shield') } - it { should contain_elasticsearch_plugin('shield') } - it { should contain_file( - '/usr/share/elasticsearch/plugins/shield' - ) } it { should contain_elasticsearch__user('elastic') .that_comes_before([ - 'Elasticsearch::Template[foo]', - 'Elasticsearch::Plugin[shield]' + 'Elasticsearch::Template[foo]' ]).that_requires([ 'Elasticsearch::Role[test_role]' ])} - include_examples 'instance', 'es-security-user', :systemd + include_examples 'class', :systemd end end end end end diff --git a/spec/defines/008_elasticsearch_role_spec.rb b/spec/defines/008_elasticsearch_role_spec.rb index 9d5c590..2275d57 100644 --- a/spec/defines/008_elasticsearch_role_spec.rb +++ b/spec/defines/008_elasticsearch_role_spec.rb @@ -1,129 +1,109 @@ require 'spec_helper' describe 'elasticsearch::role' do let(:title) { 'elastic_role' } let(:pre_condition) do <<-EOS - class { 'elasticsearch': - security_plugin => 'shield', - } + class { 'elasticsearch': } EOS end let(:params) do { :privileges => { 'cluster' => '*' }, :mappings => [ 'cn=users,dc=example,dc=com', 'cn=admins,dc=example,dc=com', 'cn=John Doe,cn=other users,dc=example,dc=com' ] } end on_supported_os( :hardwaremodels => ['x86_64'], :supported_os => [ { 'operatingsystem' => 'CentOS', 'operatingsystemrelease' => ['7'] } ] ).each do |os, facts| context "on #{os}" do let(:facts) { facts.merge( :scenario => '', :common => '' ) } context 'with an invalid role name' do context 'too long' do let(:title) { 'A' * 31 } it { should raise_error(Puppet::Error, /expected length/i) } end end context 'with default parameters' do it { should contain_elasticsearch__role('elastic_role') } it { should contain_elasticsearch_role('elastic_role') } it do should contain_elasticsearch_role_mapping('elastic_role').with( 'ensure' => 'present', 'mappings' => [ 'cn=users,dc=example,dc=com', 'cn=admins,dc=example,dc=com', 'cn=John Doe,cn=other users,dc=example,dc=com' ] ) end end describe 'collector ordering' do describe 'when present' do let(:pre_condition) do <<-EOS - class { 'elasticsearch': - security_plugin => 'shield', - } - elasticsearch::instance { 'es-security-role': } - elasticsearch::plugin { 'shield': instances => 'es-security-role' } + class { 'elasticsearch': } elasticsearch::template { 'foo': content => {"foo" => "bar"} } elasticsearch::user { 'elastic': password => 'foobar', roles => ['elastic_role'], } EOS end - it { should contain_elasticsearch__plugin('shield') } it { should contain_elasticsearch__role('elastic_role') .that_comes_before([ 'Elasticsearch::Template[foo]', 'Elasticsearch::User[elastic]' - ]).that_requires([ - 'Elasticsearch::Plugin[shield]' ])} - include_examples 'instance', 'es-security-role', :systemd - it { should contain_file( - '/etc/elasticsearch/es-security-role/shield' - ) } + include_examples 'class', :systemd end describe 'when absent' do let(:pre_condition) do <<-EOS - class { 'elasticsearch': - security_plugin => 'shield', - } - elasticsearch::instance { 'es-security-role': } - elasticsearch::plugin { 'shield': - ensure => 'absent', - instances => 'es-security-role', - } + class { 'elasticsearch': } elasticsearch::template { 'foo': content => {"foo" => "bar"} } elasticsearch::user { 'elastic': password => 'foobar', roles => ['elastic_role'], } EOS end - it { should contain_elasticsearch__plugin('shield') } - include_examples 'instance', 'es-security-role', :systemd + include_examples 'class', :systemd # TODO: Uncomment once upstream issue is fixed. # https://github.com/rodjek/rspec-puppet/issues/418 # it { should contain_elasticsearch__shield__role('elastic_role') # .that_comes_before([ # 'Elasticsearch::Template[foo]', # 'Elasticsearch::Plugin[shield]', # 'Elasticsearch::Shield::User[elastic]' # ])} end end end end end diff --git a/spec/defines/010_elasticsearch_service_init_spec.rb b/spec/defines/010_elasticsearch_service_init_spec.rb deleted file mode 100644 index 7d7eebe..0000000 --- a/spec/defines/010_elasticsearch_service_init_spec.rb +++ /dev/null @@ -1,301 +0,0 @@ -require 'spec_helper' - -describe 'elasticsearch::service::init', :type => 'define' do - let(:title) { 'es-service-init' } - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - config => { "node" => {"name" => "test" }} - } - EOS - end - - on_supported_os( - :hardwaremodels => ['x86_64'], - :supported_os => [ - { - 'operatingsystem' => 'CentOS', - 'operatingsystemrelease' => ['6'] - } - ] - ).each do |os, facts| - context "on #{os}" do - let(:facts) { facts.merge( - :scenario => '', - :common => '' - ) } - - context 'setup service' do - let(:params) do { - :ensure => 'present', - :status => 'enabled' - } end - - it { should contain_elasticsearch__service__init('es-service-init') } - it { should contain_service('elasticsearch-instance-es-service-init') - .with(:ensure => 'running', :enable => true) } - end - - context 'remove service' do - let(:params) do { - :ensure => 'absent' - } end - - it { should contain_elasticsearch__service__init('es-service-init') } - it { should contain_service('elasticsearch-instance-es-service-init') - .with(:ensure => 'stopped', :enable => false) } - end - - context 'unmanaged' do - let(:params) do { - :ensure => 'present', - :status => 'unmanaged' - } end - - it { should contain_elasticsearch__service__init('es-service-init') } - it { should contain_service('elasticsearch-instance-es-service-init') - .with(:enable => false) } - it { should contain_augeas('defaults_es-service-init') } - end - - context 'defaults file' do - context 'set via file' do - let :params do { - :ensure => 'present', - :status => 'enabled', - :init_defaults_file => - 'puppet:///path/to/initdefaultsfile' - } end - - it { should contain_file( - '/etc/sysconfig/elasticsearch-es-service-init' - ).with( - :source => 'puppet:///path/to/initdefaultsfile' - )} - it { should contain_file( - '/etc/sysconfig/elasticsearch-es-service-init' - ).that_comes_before( - 'Service[elasticsearch-instance-es-service-init]' - ) } - end - - context 'set via hash' do - let :params do { - :ensure => 'present', - :status => 'enabled', - :init_defaults => { 'ES_HOME' => '/usr/share/elasticsearch' } - } end - - it 'writes the defaults file' do - should contain_augeas('defaults_es-service-init').with( - :incl => '/etc/sysconfig/elasticsearch-es-service-init', - :changes => [ - 'rm CONF_FILE', - "set ES_GROUP 'elasticsearch'", - "set ES_HOME '/usr/share/elasticsearch'", - "set ES_USER 'elasticsearch'", - "set MAX_OPEN_FILES '65536'" - ].join("\n") << "\n", - :before => 'Service[elasticsearch-instance-es-service-init]' - ) - end - end - - context 'restarts when "restart_on_change" is true' do - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - config => { "node" => {"name" => "test" }}, - restart_on_change => true - } - EOS - end - - context 'set via file' do - let :params do { - :ensure => 'present', - :status => 'enabled', - :init_defaults_file => 'puppet:///path/to/initdefaultsfile' - } end - - it { should contain_file( - '/etc/sysconfig/elasticsearch-es-service-init' - ).with( - :source => 'puppet:///path/to/initdefaultsfile' - ) } - it { should contain_file( - '/etc/sysconfig/elasticsearch-es-service-init' - ).that_comes_before( - 'Service[elasticsearch-instance-es-service-init]' - ) } - it { should contain_file( - '/etc/sysconfig/elasticsearch-es-service-init' - ).that_notifies( - 'Service[elasticsearch-instance-es-service-init]' - ) } - end - - context 'set via hash' do - let :params do { - :ensure => 'present', - :status => 'enabled', - :init_defaults => { - 'ES_HOME' => '/usr/share/elasticsearch' - } - } end - - it { should contain_augeas( - 'defaults_es-service-init' - ).with( - :incl => '/etc/sysconfig/elasticsearch-es-service-init', - :changes => [ - 'rm CONF_FILE', - "set ES_GROUP 'elasticsearch'", - "set ES_HOME '/usr/share/elasticsearch'", - "set ES_USER 'elasticsearch'", - "set MAX_OPEN_FILES '65536'" - ].join("\n") << "\n" - ) } - it { should contain_augeas( - 'defaults_es-service-init' - ).that_comes_before( - 'Service[elasticsearch-instance-es-service-init]' - ) } - it { should contain_augeas( - 'defaults_es-service-init' - ).that_notifies( - 'Service[elasticsearch-instance-es-service-init]' - ) } - end - end - - context 'does not restart when "restart_on_change" is false' do - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - config => { "node" => {"name" => "test" }}, - } - EOS - end - - context 'set via file' do - let :params do { - :ensure => 'present', - :status => 'enabled', - :init_defaults_file => 'puppet:///path/to/initdefaultsfile' - } end - - it { should_not contain_file( - '/etc/sysconfig/elasticsearch-es-service-init' - ).that_notifies( - 'Service[elasticsearch-instance-es-service-init]' - ) } - end - - context 'set via hash' do - let :params do { - :ensure => 'present', - :status => 'enabled', - :init_defaults => { - 'ES_HOME' => '/usr/share/elasticsearch' - } - } end - - it { should_not contain_augeas( - 'defaults_es-service-init' - ).that_notifies( - 'Service[elasticsearch-instance-es-service-init]' - ) } - end - end - end - - context 'init file' do - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - config => { "node" => {"name" => "test" }} - } - EOS - end - - context 'via template' do - let(:params) do { - :ensure => 'present', - :status => 'enabled', - :init_template => - 'elasticsearch/etc/init.d/elasticsearch.RedHat.erb' - } end - - it do - should contain_elasticsearch_service_file( - '/etc/init.d/elasticsearch-es-service-init' - ).that_comes_before( - 'File[/etc/init.d/elasticsearch-es-service-init]' - ) - end - - it do - should contain_file( - '/etc/init.d/elasticsearch-es-service-init' - ).that_comes_before( - 'Service[elasticsearch-instance-es-service-init]' - ) - end - end - - context 'restarts when "restart_on_change" is true' do - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - config => { "node" => {"name" => "test" }}, - restart_on_change => true - } - EOS - end - - let(:params) do { - :ensure => 'present', - :status => 'enabled', - :init_template => - 'elasticsearch/etc/init.d/elasticsearch.RedHat.erb' - } end - - it { should contain_file( - '/etc/init.d/elasticsearch-es-service-init' - ).that_comes_before( - 'Service[elasticsearch-instance-es-service-init]' - ) } - it { should contain_file( - '/etc/init.d/elasticsearch-es-service-init' - ).that_notifies( - 'Service[elasticsearch-instance-es-service-init]' - ) } - end - - context 'does not restart when "restart_on_change" is false' do - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - config => { "node" => {"name" => "test" }}, - } - EOS - end - - let(:params) do { - :ensure => 'present', - :status => 'enabled', - :init_template => - 'elasticsearch/etc/init.d/elasticsearch.RedHat.erb' - } end - - it { should_not contain_file( - '/etc/init.d/elasticsearch-es-service-init' - ).that_notifies( - 'Service[elasticsearch-instance-es-service-init]' - ) } - end - end - end - end -end diff --git a/spec/defines/011_elasticsearch_service_system_spec.rb b/spec/defines/011_elasticsearch_service_system_spec.rb deleted file mode 100644 index d230777..0000000 --- a/spec/defines/011_elasticsearch_service_system_spec.rb +++ /dev/null @@ -1,294 +0,0 @@ -require 'spec_helper' - -describe 'elasticsearch::service::systemd', :type => 'define' do - on_supported_os( - :hardwaremodels => ['x86_64'], - :supported_os => [ - { - 'operatingsystem' => 'OpenSuSE', - 'operatingsystemrelease' => %w[42] - }, - { - 'operatingsystem' => 'CentOS', - 'operatingsystemrelease' => %w[7] - } - ] - ).each do |os, facts| - context "on #{os}" do - let(:facts) { facts.merge( - :scenario => '', - :common => '' - ) } - let(:title) { 'es-systemd' } - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - config => { "node" => {"name" => "test" }} - } - EOS - end - - if facts[:os]['name'] == 'OpenSuSE' and - facts[:os]['release']['major'].to_i >= 13 - let(:systemd_service_path) { '/usr/lib/systemd/system' } - else - let(:systemd_service_path) { '/lib/systemd/system' } - end - - context 'setup service' do - let(:params) do { - :ensure => 'present', - :status => 'enabled' - } end - - it { should contain_elasticsearch__service__systemd('es-systemd') } - it { should contain_exec('systemd_reload_es-systemd') - .with(:command => '/bin/systemctl daemon-reload') } - it { should contain_service('elasticsearch-instance-es-systemd') - .with(:ensure => 'running', :enable => true, :provider => 'systemd') } - end - - context 'remove service' do - let(:params) do { - :ensure => 'absent' - } end - - it { should contain_elasticsearch__service__systemd('es-systemd') } - it { should contain_exec('systemd_reload_es-systemd') - .with(:command => '/bin/systemctl daemon-reload') } - it { should contain_service('elasticsearch-instance-es-systemd') - .with( - :ensure => 'stopped', :enable => false, :provider => 'systemd' - ) } - end - - context 'unmanaged' do - let(:params) do { - :ensure => 'present', - :status => 'unmanaged' - } end - - it { should contain_elasticsearch__service__systemd('es-systemd') } - it { should contain_service('elasticsearch-instance-es-systemd') - .with(:enable => false) } - it { should contain_augeas('defaults_es-systemd') } - end - - context 'defaults file' do - context 'set via file' do - let(:params) do { - :ensure => 'present', - :status => 'enabled', - :init_defaults_file => 'puppet:///path/to/initdefaultsfile' - } end - - it { should contain_file('/etc/sysconfig/elasticsearch-es-systemd') - .with( - :source => 'puppet:///path/to/initdefaultsfile', - :before => 'Service[elasticsearch-instance-es-systemd]' - ) } - end - - context 'set via hash' do - let(:params) do { - :ensure => 'present', - :status => 'enabled', - :init_defaults => { 'ES_HOME' => '/usr/share/elasticsearch' } - } end - - it { should contain_augeas('defaults_es-systemd') - .with( - :incl => '/etc/sysconfig/elasticsearch-es-systemd', - :changes => [ - 'rm CONF_FILE', - "set ES_GROUP 'elasticsearch'", - "set ES_HOME '/usr/share/elasticsearch'", - "set ES_USER 'elasticsearch'", - "set MAX_OPEN_FILES '65536'", - "set MAX_THREADS '4096'" - ].join("\n") << "\n", - :before => 'Service[elasticsearch-instance-es-systemd]' - ) } - end - - context 'restarts when "restart_on_change" is true' do - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - config => { "node" => {"name" => "test" }}, - restart_on_change => true - } - EOS - end - - context 'set via file' do - let(:params) do { - :ensure => 'present', - :status => 'enabled', - :init_defaults_file => - 'puppet:///path/to/initdefaultsfile' - } end - - it { should contain_file( - '/etc/sysconfig/elasticsearch-es-systemd' - ).with(:source => 'puppet:///path/to/initdefaultsfile') } - it { should contain_file( - '/etc/sysconfig/elasticsearch-es-systemd' - ).that_notifies([ - 'Service[elasticsearch-instance-es-systemd]' - ]) } - end - - context 'set via hash' do - let(:params) do { - :ensure => 'present', - :status => 'enabled', - :init_defaults => { - 'ES_HOME' => '/usr/share/elasticsearch' - } - } end - - it { should contain_augeas( - 'defaults_es-systemd' - ).with( - :incl => '/etc/sysconfig/elasticsearch-es-systemd', - :changes => [ - 'rm CONF_FILE', - "set ES_GROUP 'elasticsearch'", - "set ES_HOME '/usr/share/elasticsearch'", - "set ES_USER 'elasticsearch'", - "set MAX_OPEN_FILES '65536'", - "set MAX_THREADS '4096'" - ].join("\n") << "\n" - )} - it { should contain_augeas( - 'defaults_es-systemd' - ).that_comes_before( - 'Service[elasticsearch-instance-es-systemd]' - ) } - it { should contain_augeas( - 'defaults_es-systemd' - ).that_notifies( - 'Exec[systemd_reload_es-systemd]' - ) } - end - end - - context 'does not restart when "restart_on_change" is false' do - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - config => { "node" => {"name" => "test" }}, - } - EOS - end - - context 'set via file' do - let(:params) do { - :ensure => 'present', - :status => 'enabled', - :init_defaults_file => - 'puppet:///path/to/initdefaultsfile' - } end - - it { should_not contain_file( - '/etc/sysconfig/elasticsearch-es-systemd' - ).that_notifies( - 'Service[elasticsearch-instance-es-systemd]' - ) } - end - end - end - - context 'init file' do - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - config => { "node" => {"name" => "test" }} - } - EOS - end - - context 'via template' do - let(:params) do { - :ensure => 'present', - :status => 'enabled', - :init_template => - 'elasticsearch/etc/init.d/elasticsearch.systemd.erb' - } end - - it do - should contain_elasticsearch_service_file( - "#{systemd_service_path}/elasticsearch-es-systemd.service" - ).with( - :before => [ - "File[#{systemd_service_path}/elasticsearch-es-systemd.service]" - ] - ) - end - - it do - should contain_file( - "#{systemd_service_path}/elasticsearch-es-systemd.service" - ).with( - :before => 'Service[elasticsearch-instance-es-systemd]' - ) - end - end - - context 'restarts when "restart_on_change" is true' do - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - config => { "node" => {"name" => "test" }}, - restart_on_change => true - } - EOS - end - - let(:params) do { - :ensure => 'present', - :status => 'enabled', - :init_template => - 'elasticsearch/etc/init.d/elasticsearch.systemd.erb' - } end - - it { should contain_file( - "#{systemd_service_path}/elasticsearch-es-systemd.service" - ).that_notifies([ - 'Exec[systemd_reload_es-systemd]', - 'Service[elasticsearch-instance-es-systemd]' - ]) } - it { should contain_file( - "#{systemd_service_path}/elasticsearch-es-systemd.service" - ).that_comes_before( - 'Service[elasticsearch-instance-es-systemd]' - ) } - end - - context 'does not restart when "restart_on_change" is false' do - let(:pre_condition) do - <<-EOS - class { "elasticsearch": - config => { "node" => {"name" => "test" }}, - } - EOS - end - - let(:params) do { - :ensure => 'present', - :status => 'enabled', - :init_template => - 'elasticsearch/etc/init.d/elasticsearch.systemd.erb' - } end - - it { should_not contain_file( - "#{systemd_service_path}/elasticsearch-es-systemd.service" - ).that_notifies( - 'Service[elasticsearch-instance-es-systemd]' - ) } - end - end - end # of context on os - end # of on_supported_os -end # of describe elasticsearch::service::systemd diff --git a/spec/fixtures/facts/v5-nodes.json b/spec/fixtures/facts/v5-nodes.json deleted file mode 100644 index 7e6dc16..0000000 --- a/spec/fixtures/facts/v5-nodes.json +++ /dev/null @@ -1,371 +0,0 @@ -{ - "_nodes" : { - "total" : 1, - "successful" : 1, - "failed" : 0 - }, - "cluster_name" : "elasticsearch", - "nodes" : { - "9lRSXfREQnqIgBWP0FBi0Q" : { - "name" : "v5", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.5.0", - "build_hash" : "260387d", - "total_indexing_buffer" : 211261849, - "roles" : [ - "master", - "data", - "ingest" - ], - "settings" : { - "client" : { - "type" : "node" - }, - "cluster" : { - "name" : "elasticsearch" - }, - "http" : { - "type" : { - "default" : "netty4" - }, - "port" : "9202" - }, - "node" : { - "name" : "v5" - }, - "path" : { - "logs" : "/Users/tylerjl/Work/elasticsearch-5.5.0/logs", - "home" : "/Users/tylerjl/Work/elasticsearch-5.5.0" - }, - "transport" : { - "type" : { - "default" : "netty4" - } - } - }, - "os" : { - "refresh_interval_in_millis" : 1000, - "name" : "Mac OS X", - "arch" : "x86_64", - "version" : "10.12.5", - "available_processors" : 4, - "allocated_processors" : 4 - }, - "process" : { - "refresh_interval_in_millis" : 1000, - "id" : 16828, - "mlockall" : false - }, - "jvm" : { - "pid" : 16828, - "version" : "1.8.0_121", - "vm_name" : "Java HotSpot(TM) 64-Bit Server VM", - "vm_version" : "25.121-b13", - "vm_vendor" : "Oracle Corporation", - "start_time_in_millis" : 1502814708699, - "mem" : { - "heap_init_in_bytes" : 2147483648, - "heap_max_in_bytes" : 2112618496, - "non_heap_init_in_bytes" : 2555904, - "non_heap_max_in_bytes" : 0, - "direct_max_in_bytes" : 2112618496 - }, - "gc_collectors" : [ - "ParNew", - "ConcurrentMarkSweep" - ], - "memory_pools" : [ - "Code Cache", - "Metaspace", - "Compressed Class Space", - "Par Eden Space", - "Par Survivor Space", - "CMS Old Gen" - ], - "using_compressed_ordinary_object_pointers" : "true", - "input_arguments" : [ - "-Xms2g", - "-Xmx2g", - "-XX:+UseConcMarkSweepGC", - "-XX:CMSInitiatingOccupancyFraction=75", - "-XX:+UseCMSInitiatingOccupancyOnly", - "-XX:+AlwaysPreTouch", - "-Xss1m", - "-Djava.awt.headless=true", - "-Dfile.encoding=UTF-8", - "-Djna.nosys=true", - "-Djdk.io.permissionsUseCanonicalPath=true", - "-Dio.netty.noUnsafe=true", - "-Dio.netty.noKeySetOptimization=true", - "-Dio.netty.recycler.maxCapacityPerThread=0", - "-Dlog4j.shutdownHookEnabled=false", - "-Dlog4j2.disable.jmx=true", - "-Dlog4j.skipJansi=true", - "-XX:+HeapDumpOnOutOfMemoryError", - "-Des.path.home=/Users/tylerjl/Work/elasticsearch-5.5.0" - ] - }, - "thread_pool" : { - "force_merge" : { - "type" : "fixed", - "min" : 1, - "max" : 1, - "queue_size" : -1 - }, - "fetch_shard_started" : { - "type" : "scaling", - "min" : 1, - "max" : 8, - "keep_alive" : "5m", - "queue_size" : -1 - }, - "listener" : { - "type" : "fixed", - "min" : 2, - "max" : 2, - "queue_size" : -1 - }, - "index" : { - "type" : "fixed", - "min" : 4, - "max" : 4, - "queue_size" : 200 - }, - "refresh" : { - "type" : "scaling", - "min" : 1, - "max" : 2, - "keep_alive" : "5m", - "queue_size" : -1 - }, - "generic" : { - "type" : "scaling", - "min" : 4, - "max" : 128, - "keep_alive" : "30s", - "queue_size" : -1 - }, - "warmer" : { - "type" : "scaling", - "min" : 1, - "max" : 2, - "keep_alive" : "5m", - "queue_size" : -1 - }, - "search" : { - "type" : "fixed", - "min" : 7, - "max" : 7, - "queue_size" : 1000 - }, - "flush" : { - "type" : "scaling", - "min" : 1, - "max" : 2, - "keep_alive" : "5m", - "queue_size" : -1 - }, - "fetch_shard_store" : { - "type" : "scaling", - "min" : 1, - "max" : 8, - "keep_alive" : "5m", - "queue_size" : -1 - }, - "management" : { - "type" : "scaling", - "min" : 1, - "max" : 5, - "keep_alive" : "5m", - "queue_size" : -1 - }, - "get" : { - "type" : "fixed", - "min" : 4, - "max" : 4, - "queue_size" : 1000 - }, - "bulk" : { - "type" : "fixed", - "min" : 4, - "max" : 4, - "queue_size" : 200 - }, - "snapshot" : { - "type" : "scaling", - "min" : 1, - "max" : 2, - "keep_alive" : "5m", - "queue_size" : -1 - } - }, - "transport" : { - "bound_address" : [ - "[fe80::1]:9300", - "[::1]:9300", - "127.0.0.1:9300" - ], - "publish_address" : "127.0.0.1:9300", - "profiles" : { } - }, - "http" : { - "bound_address" : [ - "[fe80::1]:9202", - "[::1]:9202", - "127.0.0.1:9202" - ], - "publish_address" : "127.0.0.1:9202", - "max_content_length_in_bytes" : 104857600 - }, - "plugins" : [ ], - "modules" : [ - { - "name" : "aggs-matrix-stats", - "version" : "5.5.0", - "description" : "Adds aggregations whose input are a list of numeric fields and output includes a matrix.", - "classname" : "org.elasticsearch.search.aggregations.matrix.MatrixAggregationPlugin", - "has_native_controller" : false - }, - { - "name" : "ingest-common", - "version" : "5.5.0", - "description" : "Module for ingest processors that do not require additional security permissions or have large dependencies and resources", - "classname" : "org.elasticsearch.ingest.common.IngestCommonPlugin", - "has_native_controller" : false - }, - { - "name" : "lang-expression", - "version" : "5.5.0", - "description" : "Lucene expressions integration for Elasticsearch", - "classname" : "org.elasticsearch.script.expression.ExpressionPlugin", - "has_native_controller" : false - }, - { - "name" : "lang-groovy", - "version" : "5.5.0", - "description" : "Groovy scripting integration for Elasticsearch", - "classname" : "org.elasticsearch.script.groovy.GroovyPlugin", - "has_native_controller" : false - }, - { - "name" : "lang-mustache", - "version" : "5.5.0", - "description" : "Mustache scripting integration for Elasticsearch", - "classname" : "org.elasticsearch.script.mustache.MustachePlugin", - "has_native_controller" : false - }, - { - "name" : "lang-painless", - "version" : "5.5.0", - "description" : "An easy, safe and fast scripting language for Elasticsearch", - "classname" : "org.elasticsearch.painless.PainlessPlugin", - "has_native_controller" : false - }, - { - "name" : "parent-join", - "version" : "5.5.0", - "description" : "This module adds the support parent-child queries and aggregations", - "classname" : "org.elasticsearch.join.ParentJoinPlugin", - "has_native_controller" : false - }, - { - "name" : "percolator", - "version" : "5.5.0", - "description" : "Percolator module adds capability to index queries and query these queries by specifying documents", - "classname" : "org.elasticsearch.percolator.PercolatorPlugin", - "has_native_controller" : false - }, - { - "name" : "reindex", - "version" : "5.5.0", - "description" : "The Reindex module adds APIs to reindex from one index to another or update documents in place.", - "classname" : "org.elasticsearch.index.reindex.ReindexPlugin", - "has_native_controller" : false - }, - { - "name" : "transport-netty3", - "version" : "5.5.0", - "description" : "Netty 3 based transport implementation", - "classname" : "org.elasticsearch.transport.Netty3Plugin", - "has_native_controller" : false - }, - { - "name" : "transport-netty4", - "version" : "5.5.0", - "description" : "Netty 4 based transport implementation", - "classname" : "org.elasticsearch.transport.Netty4Plugin", - "has_native_controller" : false - } - ], - "ingest" : { - "processors" : [ - { - "type" : "append" - }, - { - "type" : "convert" - }, - { - "type" : "date" - }, - { - "type" : "date_index_name" - }, - { - "type" : "dot_expander" - }, - { - "type" : "fail" - }, - { - "type" : "foreach" - }, - { - "type" : "grok" - }, - { - "type" : "gsub" - }, - { - "type" : "join" - }, - { - "type" : "json" - }, - { - "type" : "kv" - }, - { - "type" : "lowercase" - }, - { - "type" : "remove" - }, - { - "type" : "rename" - }, - { - "type" : "script" - }, - { - "type" : "set" - }, - { - "type" : "sort" - }, - { - "type" : "split" - }, - { - "type" : "trim" - }, - { - "type" : "uppercase" - } - ] - } - } - } -} diff --git a/spec/fixtures/facts/v5-root.json b/spec/fixtures/facts/v5-root.json deleted file mode 100644 index 62ac5ee..0000000 --- a/spec/fixtures/facts/v5-root.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "name" : "v5", - "cluster_name" : "elasticsearch", - "cluster_uuid" : "1ENUnFlUQcmF6qko-PjpHw", - "version" : { - "number" : "5.5.0", - "build_hash" : "260387d", - "build_date" : "2017-06-30T23:16:05.735Z", - "build_snapshot" : false, - "lucene_version" : "6.6.0" - }, - "tagline" : "You Know, for Search" -} diff --git a/spec/fixtures/hiera/hieradata/singleplugin.yaml b/spec/fixtures/hiera/hieradata/singleplugin.yaml index fdc0612..10d1d6a 100644 --- a/spec/fixtures/hiera/hieradata/singleplugin.yaml +++ b/spec/fixtures/hiera/hieradata/singleplugin.yaml @@ -1,12 +1,5 @@ --- -elasticsearch::instances: - es-hiera-single: - config: - node.name: 'es-01' - http.bind_host: '0.0.0.0' elasticsearch::plugins: 'mobz/elasticsearch-head': ensure: 'present' module_dir: 'head' - instances: - - 'es-hiera-single' diff --git a/spec/fixtures/templates/post_6.0.json b/spec/fixtures/templates/6.x.json similarity index 99% rename from spec/fixtures/templates/post_6.0.json rename to spec/fixtures/templates/6.x.json index e118ec1..e1f9ba1 100644 --- a/spec/fixtures/templates/post_6.0.json +++ b/spec/fixtures/templates/6.x.json @@ -1,59 +1,59 @@ { "index_patterns": [ "logstash-*" ], "version": 123, "settings": { "index": { "refresh_interval": "5s", "analysis": { "analyzer": { "default": { "type": "standard", "stopwords": "_none_" } } } } }, "mappings": { "_doc": { "dynamic_templates": [ { "string_fields": { "match": "*", "match_mapping_type": "string", "mapping": { "type": "multi_field", "fields": { "{name}": { "type": "text", "index": "analyzed", "omit_norms": true }, "raw": { "type ": "text", "index": "not_analyzed", "ignore_above": 256 } } } } } ], "properties": { "@version": { "type": "text", "index": false }, "geoip": { "type": "object", "dynamic": true, "properties": { "location": { "type": "geo_point" } } } } } } -} +} \ No newline at end of file diff --git a/spec/fixtures/templates/post_8.0.json b/spec/fixtures/templates/7.x.json similarity index 74% copy from spec/fixtures/templates/post_8.0.json copy to spec/fixtures/templates/7.x.json index db8c484..54569e0 100644 --- a/spec/fixtures/templates/post_8.0.json +++ b/spec/fixtures/templates/7.x.json @@ -1,57 +1,42 @@ { "index_patterns": [ "logstash-*" ], "version": 123, "settings": { "index": { "refresh_interval": "5s", "analysis": { "analyzer": { "default": { "type": "standard", "stopwords": "_none_" } } } } }, "mappings": { "dynamic_templates": [ { "string_fields": { "match": "*", "match_mapping_type": "string", "mapping": { "type": "multi_field", "fields": { "{name}": { "type": "text", "index": "analyzed", "omit_norms": true }, "raw": { "type ": "text", "index": "not_analyzed", "ignore_above": 256 } } } } } - ], - "properties": { - "@version": { - "type": "text", - "index": false - }, - "geoip": { - "type": "object", - "dynamic": true, - "properties": { - "location": { - "type": "geo_point" - } - } - } - } + ] } -} +} \ No newline at end of file diff --git a/spec/fixtures/templates/post_8.0.json b/spec/fixtures/templates/post_8.0.json index db8c484..91cade1 100644 --- a/spec/fixtures/templates/post_8.0.json +++ b/spec/fixtures/templates/post_8.0.json @@ -1,57 +1,30 @@ { - "index_patterns": [ "logstash-*" ], - "version": 123, - "settings": { - "index": { - "refresh_interval": "5s", - "analysis": { - "analyzer": { - "default": { - "type": "standard", - "stopwords": "_none_" - } - } - } - } - }, - "mappings": { - "dynamic_templates": [ - { - "string_fields": { - "match": "*", - "match_mapping_type": "string", - "mapping": { - "type": "multi_field", - "fields": { - "{name}": { - "type": "text", - "index": "analyzed", - "omit_norms": true - }, - "raw": { - "type ": "text", - "index": "not_analyzed", - "ignore_above": 256 - } - } - } - } - } - ], - "properties": { - "@version": { - "type": "text", - "index": false + "index_patterns": ["te*", "bar*"], + "template": { + "settings": { + "number_of_shards": 1 + }, + "mappings": { + "_source": { + "enabled": false }, - "geoip": { - "type": "object", - "dynamic": true, - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "host_name": { + "type": "keyword" + }, + "created_at": { + "type": "date", + "format": "EEE MMM dd HH:mm:ss Z yyyy" } } + }, + "aliases": { + "mydata": { } } + }, + "priority": 10, + "version": 3, + "_meta": { + "description": "my custom" } -} +} \ No newline at end of file diff --git a/spec/fixtures/templates/pre_6.0.json b/spec/fixtures/templates/pre_6.0.json deleted file mode 100644 index 38a2379..0000000 --- a/spec/fixtures/templates/pre_6.0.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "template": "logstash-*", - "settings": { - "index": { - "refresh_interval": "5s", - "analysis": { - "analyzer": { - "default": { - "type": "standard", - "stopwords": "_none_" - } - } - } - } - }, - "mappings": { - "_default_": { - "_all": { - "enabled": true - }, - "dynamic_templates": [ - { - "string_fields": { - "match": "*", - "match_mapping_type": "string", - "mapping": { - "type": "multi_field", - "fields": { - "{name}": { - "type": "string", - "index": "analyzed", - "omit_norms": true - }, - "raw": { - "type ": "string", - "index": "not_analyzed", - "ignore_above": 256 - } - } - } - } - } - ], - "properties": { - "@version": { - "type": "string", - "index": "not_analyzed" - }, - "geoip": { - "type": "object", - "dynamic": true, - "properties": { - "location": { - "type": "geo_point" - } - } - } - } - } - } -} diff --git a/spec/helpers/acceptance/tests/bad_manifest_shared_examples.rb b/spec/helpers/acceptance/tests/bad_manifest_shared_examples.rb index aadf579..c6dc2de 100644 --- a/spec/helpers/acceptance/tests/bad_manifest_shared_examples.rb +++ b/spec/helpers/acceptance/tests/bad_manifest_shared_examples.rb @@ -1,33 +1,18 @@ -shared_examples 'invalid manifest application' do |instances| - context "bad #{instances.count}-node manifest" do +shared_examples 'invalid manifest application' do + context 'bad manifest' do let(:applied_manifest) do - instance_manifest = instances.map do |instance, meta| - config = meta.map { |k, v| "'#{k}' => '#{v}'," }.join(' ') - <<-MANIFEST - elasticsearch::instance { '#{instance}': - ensure => #{meta.empty? ? 'absent' : 'present'}, - config => { - #{config} - }, - #{defined?(manifest_instance_parameters) && manifest_instance_parameters} - } - MANIFEST - end.join("\n") - <<-MANIFEST class { 'elasticsearch' : #{manifest} #{defined?(manifest_class_parameters) && manifest_class_parameters} } - #{defined?(skip_instance_manifests) || instance_manifest} - #{defined?(extra_manifest) && extra_manifest} MANIFEST end it 'fails to apply' do - apply_manifest applied_manifest, :expect_failures => true + apply_manifest(applied_manifest, :expect_failures => true, :debug => v[:puppet_debug]) end end end diff --git a/spec/helpers/acceptance/tests/basic_shared_examples.rb b/spec/helpers/acceptance/tests/basic_shared_examples.rb index 500538d..e9a3e39 100644 --- a/spec/helpers/acceptance/tests/basic_shared_examples.rb +++ b/spec/helpers/acceptance/tests/basic_shared_examples.rb @@ -1,72 +1,67 @@ require 'json' require 'helpers/acceptance/tests/manifest_shared_examples' -shared_examples 'basic acceptance tests' do |instances| - include_examples 'manifest application', instances +shared_examples 'basic acceptance tests' do |es_config| + include_examples('manifest application') describe package("elasticsearch#{v[:oss] ? '-oss' : ''}") do - it { should be_installed } + it { should be_installed + .with_version(v[:elasticsearch_full_version]) } end %w[ - /usr/share/elasticsearch/templates_import - /usr/share/elasticsearch/scripts + /etc/elasticsearch + /usr/share/elasticsearch + /var/lib/elasticsearch ].each do |dir| describe file(dir) do it { should be_directory } end end - instances.each do |instance, config| - describe "resources for instance #{instance}" do - describe service("elasticsearch-#{instance}") do - it { send(config.empty? ? :should_not : :should, be_enabled) } - it { send(config.empty? ? :should_not : :should, be_running) } - end + describe 'resources' do + describe service('elasticsearch') do + it { send(es_config.empty? ? :should_not : :should, be_enabled) } + it { send(es_config.empty? ? :should_not : :should, be_running) } + end - unless config.empty? - describe file(pid_for(instance)) do - it { should be_file } - its(:content) { should match(/[0-9]+/) } - end + unless es_config.empty? + describe file(pid_file) do + it { should be_file } + its(:content) { should match(/[0-9]+/) } + end - describe file("/etc/elasticsearch/#{instance}/elasticsearch.yml") do - it { should be_file } - it { should contain "name: #{config['config']['node.name']}" } - it { should contain "/var/lib/elasticsearch/#{instance}" } - end + describe file('/etc/elasticsearch/elasticsearch.yml') do + it { should be_file } + it { should contain "name: #{es_config['node.name']}" } end + end - unless config.empty? - describe file("/var/lib/elasticsearch/#{instance}") do - it { should be_directory } + unless es_config.empty? + es_port = es_config['http.port'] + describe port(es_port) do + it 'open', :with_retries do + should be_listening end + end - describe port(config['config']['http.port']) do - it 'open', :with_retries do - should be_listening + describe server :container do + describe http("http://localhost:#{es_port}/_nodes/_local") do + it 'serves requests', :with_retries do + expect(response.status).to eq(200) end - end - - describe server :container do - describe http("http://localhost:#{config['config']['http.port']}/_nodes/_local") do - it 'serves requests', :with_retries do - expect(response.status).to eq(200) - end - it 'uses the default data path', :with_retries do - json = JSON.parse(response.body)['nodes'].values.first - expected = "/var/lib/elasticsearch/#{instance}" - expected = [expected] if v[:elasticsearch_major_version] > 2 - expect( - json['settings']['path'] - ).to include( - 'data' => expected - ) - end + it 'uses the default data path', :with_retries do + json = JSON.parse(response.body)['nodes'].values.first + data_dir = ['/var/lib/elasticsearch'] + expect( + json['settings']['path'] + ).to include( + 'data' => data_dir + ) end end end end end end diff --git a/spec/helpers/acceptance/tests/datadir_shared_examples.rb b/spec/helpers/acceptance/tests/datadir_shared_examples.rb index c4467c6..0ec67db 100644 --- a/spec/helpers/acceptance/tests/datadir_shared_examples.rb +++ b/spec/helpers/acceptance/tests/datadir_shared_examples.rb @@ -1,102 +1,72 @@ require 'json' require 'helpers/acceptance/tests/manifest_shared_examples' -shared_examples 'datadir directory validation' do |instances, datapaths| - include_examples( - 'manifest application', - instances - ) +shared_examples 'datadir directory validation' do |es_config, datapaths| + include_examples('manifest application') - instances.each_pair do |instance, config| - describe file("/etc/elasticsearch/#{instance}/elasticsearch.yml") do - it { should be_file } - datapaths.each do |datapath| - it { should contain datapath } - end + describe file('/etc/elasticsearch/elasticsearch.yml') do + it { should be_file } + datapaths.each do |datapath| + it { should contain datapath } end + end - datapaths.each do |datapath| - describe file(datapath) do - it { should be_directory } - end + datapaths.each do |datapath| + describe file(datapath) do + it { should be_directory } end + end - describe port(config['config']['http.port']) do - it 'open', :with_retries do - should be_listening - end + es_port = es_config['http.port'] + describe port(es_port) do + it 'open', :with_retries do + should be_listening end + end - describe server :container do - describe http( - "http://localhost:#{config['config']['http.port']}/_nodes/_local" - ) do - it 'uses a custom data path' do - json = JSON.parse(response.body)['nodes'].values.first - expect( - json['settings']['path']['data'] - ).to((datapaths.one? and v[:elasticsearch_major_version] <= 2) ? eq(datapaths.first) : contain_exactly(*datapaths)) - end + describe server :container do + describe http( + "http://localhost:#{es_port}/_nodes/_local" + ) do + it 'uses a custom data path' do + json = JSON.parse(response.body)['nodes'].values.first + expect( + json['settings']['path']['data'] + ).to((datapaths.one? and v[:elasticsearch_major_version] <= 2) ? eq(datapaths.first) : contain_exactly(*datapaths)) end end end end -shared_examples 'datadir acceptance tests' do +shared_examples 'datadir acceptance tests' do |es_config| describe 'elasticsearch::datadir' do let(:manifest_class_parameters) { 'restart_on_change => true' } - instances = - { - 'es-01' => { - 'config' => { - 'http.port' => 9200 - } - } - } - - context 'single path from class', :with_cleanup do + context 'single path', :with_cleanup do let(:manifest_class_parameters) do <<-MANIFEST - datadir => '/var/lib/elasticsearch-data', + datadir => '/var/lib/elasticsearch-data', restart_on_change => true, MANIFEST end - include_examples 'datadir directory validation', instances, ['/var/lib/elasticsearch-data/es-01'] + include_examples('datadir directory validation', + es_config, + ['/var/lib/elasticsearch-data']) end - context 'single path from instance', :with_cleanup do - let(:manifest_instance_parameters) { "datadir => '/var/lib/elasticsearch-data/1'" } - include_examples 'datadir directory validation', instances, ['/var/lib/elasticsearch-data/1'] - end - - context 'multiple paths from class', :with_cleanup do + context 'multiple paths', :with_cleanup do let(:manifest_class_parameters) do <<-MANIFEST datadir => [ '/var/lib/elasticsearch-01', '/var/lib/elasticsearch-02' ], restart_on_change => true, MANIFEST end - include_examples 'datadir directory validation', - instances, - ['/var/lib/elasticsearch-01/es-01', '/var/lib/elasticsearch-02/es-01'] - end - - context 'multiple paths from instance', :with_cleanup do - let(:manifest_instance_parameters) do - <<-MANIFEST - datadir => [ - '/var/lib/elasticsearch-data/2', - '/var/lib/elasticsearch-data/3' - ] - MANIFEST - end - include_examples 'datadir directory validation', - instances, - ['/var/lib/elasticsearch-data/2', '/var/lib/elasticsearch-data/3'] + include_examples('datadir directory validation', + es_config, + ['/var/lib/elasticsearch-01', '/var/lib/elasticsearch-02']) end end end diff --git a/spec/helpers/acceptance/tests/hiera_shared_examples.rb b/spec/helpers/acceptance/tests/hiera_shared_examples.rb index 40b43ea..609d1de 100644 --- a/spec/helpers/acceptance/tests/hiera_shared_examples.rb +++ b/spec/helpers/acceptance/tests/hiera_shared_examples.rb @@ -1,124 +1,87 @@ require 'tempfile' require 'helpers/acceptance/tests/basic_shared_examples' require 'helpers/acceptance/tests/plugin_shared_examples' -shared_examples 'hiera tests with' do |yamlname, instances, additional_yaml = {}| - before :all do - Tempfile.create([yamlname, '.yaml']) do |temp| - temp << { - 'elasticsearch::instances' => instances - }.merge(additional_yaml).to_yaml - temp.flush +agents = only_host_with_role(hosts, 'agent') - File.basename(temp.path).tap do |config| - scp_to( - default, - temp.path, - File.join(hiera_datadir(default), config) - ) - write_hiera_config([config]) - end - end +shared_examples 'hiera tests with' do |es_config, additional_yaml = {}| + hieradata = { + 'elasticsearch::config' => es_config + }.merge(additional_yaml).to_yaml + + before :all do + write_hieradata_to(agents, hieradata) end - include_examples( - 'basic acceptance tests', - instances - ) + include_examples('basic acceptance tests', es_config) end -shared_examples 'hiera acceptance tests' do |plugins| +shared_examples 'hiera acceptance tests' do |es_config, plugins| describe 'hiera', :then_purge do - before :all do - shell "mkdir -p #{hiera_datadir(default)}" + let(:manifest) do + package = if not v[:is_snapshot] + <<-MANIFEST + # Hard version set here due to plugin incompatibilities. + version => '#{v[:elasticsearch_full_version]}', + MANIFEST + else + <<-MANIFEST + manage_repo => false, + package_url => '#{v[:snapshot_package]}', + MANIFEST + end + + <<-MANIFEST + api_timeout => 60, + jvm_options => [ + '-Xms128m', + '-Xmx128m', + ], + oss => #{v[:oss]}, + #{package} + MANIFEST end - let(:skip_instance_manifests) { true } let(:manifest_class_parameters) { 'restart_on_change => true' } - describe 'with one instance' do + describe 'with hieradata' do + nodename = SecureRandom.hex(10) include_examples( 'hiera tests with', - 'singleinstance', - 'es-hiera-single' => { - 'config' => { - 'node.name' => 'es-hiera-single', - 'http.port' => 9200 - } - } + es_config.merge('node.name' => nodename) ) end plugins.each_pair do |plugin, _meta| describe "with plugin #{plugin}" do + nodename = SecureRandom.hex(10) include_examples( 'hiera tests with', - 'singleplugin', - { - 'es-hiera-single' => { - 'config' => { - 'node.name' => 'es-hiera-single', - 'http.port' => 9200 - } - } - }, + es_config.merge('node.name' => nodename), 'elasticsearch::plugins' => { plugin => { - 'ensure' => 'present', - 'instances' => [ - 'es-hiera-single' - ] + 'ensure' => 'present' } } ) include_examples( 'plugin API response', - { - 'es-hiera-single' => { - 'config' => { - 'node.name' => 'es-hiera-single', - 'http.port' => 9200 - } - } - }, - 'installs the plugin', + es_config.merge('node.name' => nodename), + 'reports the plugin as installed', 'name' => plugin ) end end - describe 'with two instances' do - include_examples( - 'hiera tests with', - 'multipleinstances', - 'es-hiera-multiple-1' => { - 'config' => { - 'node.name' => 'es-hiera-multiple-1', - 'http.bind_host' => '0.0.0.0', - 'http.port' => 9201 - } - }, - 'es-hiera-multiple-2' => { - 'config' => { - 'node.name' => 'es-hiera-multiple-2', - 'http.bind_host' => '0.0.0.0', - 'http.port' => 9202 - } - } - ) - end - after :all do - write_hiera_config([]) + write_hieradata_to(agents, {}) - apply_manifest <<-EOS + # Ensure that elasticsearch is cleaned up before any other tests + cleanup_manifest = <<-EOS class { 'elasticsearch': ensure => 'absent', oss => #{v[:oss]} } - Elasticsearch::Instance { ensure => 'absent' } - elasticsearch::instance { 'es-hiera-single': } - elasticsearch::instance { 'es-hiera-multiple-1': } - elasticsearch::instance { 'es-hiera-multiple-2': } EOS + apply_manifest(cleanup_manifest, :debug => v[:puppet_debug]) end end end diff --git a/spec/helpers/acceptance/tests/manifest_shared_examples.rb b/spec/helpers/acceptance/tests/manifest_shared_examples.rb index 8c01416..eba3863 100644 --- a/spec/helpers/acceptance/tests/manifest_shared_examples.rb +++ b/spec/helpers/acceptance/tests/manifest_shared_examples.rb @@ -1,49 +1,38 @@ -shared_examples 'manifest application' do |instances, idempotency_check = true| - context "#{instances.count}-node manifest" do +shared_examples 'manifest application' do |idempotency_check = true| + context 'manifest' do let(:applied_manifest) do - instance_manifest = instances.map do |instance, parameters| - <<-MANIFEST - elasticsearch::instance { '#{instance}': - ensure => #{parameters.empty? ? 'absent' : 'present'}, - #{parameters.map { |k, v| "#{k} => #{v}," }.join("\n")} - #{defined?(manifest_instance_parameters) && manifest_instance_parameters} - } - MANIFEST - end.join("\n") - repo = if elastic_repo <<-MANIFEST class { 'elastic_stack::repo': oss => #{v[:oss]}, version => #{v[:elasticsearch_major_version]}, } MANIFEST else '' end <<-MANIFEST #{repo} class { 'elasticsearch' : #{manifest} #{defined?(manifest_class_parameters) && manifest_class_parameters} } - #{defined?(skip_instance_manifests) || instance_manifest} - #{defined?(extra_manifest) && extra_manifest} MANIFEST end it 'applies cleanly' do - apply_manifest applied_manifest, :catch_failures => true + apply_manifest(applied_manifest, :catch_failures => true, :debug => v[:puppet_debug]) end + # binding.pry if idempotency_check it 'is idempotent', :logs_on_failure do - apply_manifest applied_manifest, :catch_changes => true + apply_manifest(applied_manifest, :catch_changes => true, :debug => v[:puppet_debug]) end end end end diff --git a/spec/helpers/acceptance/tests/package_url_shared_examples.rb b/spec/helpers/acceptance/tests/package_url_shared_examples.rb index 536b09a..a250943 100644 --- a/spec/helpers/acceptance/tests/package_url_shared_examples.rb +++ b/spec/helpers/acceptance/tests/package_url_shared_examples.rb @@ -1,75 +1,71 @@ require 'json' require 'helpers/acceptance/tests/basic_shared_examples' -shared_examples 'package_url acceptance tests' do +shared_examples 'package_url acceptance tests' do |es_config| describe 'elasticsearch::package_url' do - instances = - { - 'es-01' => { - 'config' => { - 'http.port' => 9200 - } - } - } + # Override default manifest to remove `package` + let(:manifest) do + <<-MANIFEST + api_timeout => 60, + config => { + 'cluster.name' => '#{v[:cluster_name]}', + 'http.bind_host' => '0.0.0.0', + #{es_config.map { |k, v| " '#{k}' => '#{v}'," }.join("\n")} + }, + jvm_options => [ + '-Xms128m', + '-Xmx128m', + ], + oss => #{v[:oss]}, + MANIFEST + end - context 'via http', :with_cleanup do - let(:manifest) do + # context 'via http', :with_cleanup do + context 'via http' do + let(:manifest_class_parameters) do <<-MANIFEST - config => { - 'cluster.name' => '#{v[:cluster_name]}', - 'http.bind_host' => '0.0.0.0', - }, manage_repo => false, package_url => '#{v[:elasticsearch_package][:url]}' MANIFEST end - include_examples 'basic acceptance tests', instances + include_examples('basic acceptance tests', es_config) end context 'via local filesystem', :with_cleanup do before :all do scp_to default, v[:elasticsearch_package][:path], "/tmp/#{v[:elasticsearch_package][:filename]}" end - let(:manifest) do + let(:manifest_class_parameters) do <<-MANIFEST - config => { - 'cluster.name' => '#{v[:cluster_name]}', - 'http.bind_host' => '0.0.0.0', - }, manage_repo => false, package_url => 'file:/tmp/#{v[:elasticsearch_package][:filename]}' MANIFEST end - include_examples 'basic acceptance tests', instances + include_examples('basic acceptance tests', es_config) end context 'via puppet paths', :with_cleanup do before :all do shell "mkdir -p #{default['distmoduledir']}/another/files" scp_to default, v[:elasticsearch_package][:path], "#{default['distmoduledir']}/another/files/#{v[:elasticsearch_package][:filename]}" end - let(:manifest) do + let(:manifest_class_parameters) do <<-MANIFEST - config => { - 'cluster.name' => '#{v[:cluster_name]}', - 'http.bind_host' => '0.0.0.0', - }, manage_repo => false, - package_url => - 'puppet:///modules/another/#{v[:elasticsearch_package][:filename]}', + package_url => 'puppet:///modules/another/#{v[:elasticsearch_package][:filename]}', MANIFEST end - include_examples 'basic acceptance tests', instances + include_examples('basic acceptance tests', es_config) end end end diff --git a/spec/helpers/acceptance/tests/pipeline_shared_examples.rb b/spec/helpers/acceptance/tests/pipeline_shared_examples.rb index 191e811..181eaf0 100644 --- a/spec/helpers/acceptance/tests/pipeline_shared_examples.rb +++ b/spec/helpers/acceptance/tests/pipeline_shared_examples.rb @@ -1,39 +1,56 @@ require 'json' require 'helpers/acceptance/tests/manifest_shared_examples' require 'helpers/acceptance/tests/bad_manifest_shared_examples' -shared_examples 'pipeline operations' do |instances, pipeline| +shared_examples 'pipeline operations' do |es_config, pipeline| describe 'pipeline resources' do let(:pipeline_name) { 'foo' } context 'present' do let(:extra_manifest) do <<-MANIFEST elasticsearch::pipeline { '#{pipeline_name}': ensure => 'present', content => #{pipeline} } MANIFEST end - include_examples( - 'manifest application', - instances - ) + include_examples('manifest application') - context 'absent' do - let(:extra_manifest) do - <<-MANIFEST - elasticsearch::template { '#{pipeline_name}': - ensure => absent, - } - MANIFEST - end + include_examples('pipeline content', es_config, pipeline) + end + + context 'absent' do + let(:extra_manifest) do + <<-MANIFEST + elasticsearch::template { '#{pipeline_name}': + ensure => absent, + } + MANIFEST + end + + include_examples('manifest application') + end + end +end + +# Verifies the content of a loaded index template. +shared_examples 'pipeline content' do |es_config, pipeline| + elasticsearch_port = es_config['http.port'] + describe port(elasticsearch_port) do + it 'open', :with_retries do + should be_listening + end + end - include_examples( - 'manifest application', - instances - ) + describe server :container do + describe http( + "http://localhost:#{elasticsearch_port}/_ingest/pipeline" + ) do + it 'returns the configured pipelines', :with_retries do + expect(JSON.parse(response.body).values) + .to include(include(pipeline)) end end end end diff --git a/spec/helpers/acceptance/tests/plugin_api_shared_examples.rb b/spec/helpers/acceptance/tests/plugin_api_shared_examples.rb index 91c44f2..d61cc12 100644 --- a/spec/helpers/acceptance/tests/plugin_api_shared_examples.rb +++ b/spec/helpers/acceptance/tests/plugin_api_shared_examples.rb @@ -1,23 +1,21 @@ require 'json' -shared_examples 'plugin API response' do |instances, desc, val| - instances.each_pair do |_instance, i| - describe port(i['config']['http.port']) do - it 'open', :with_retries do - should be_listening - end +shared_examples 'plugin API response' do |es_config, desc, val| + describe port(es_config['http.port']) do + it 'open', :with_retries do + should be_listening end + end - describe server :container do - describe http( - "http://localhost:#{i['config']['http.port']}/_cluster/stats" - ) do - it desc, :with_retries do - expect( - JSON.parse(response.body)['nodes']['plugins'] - ).to include(include(val)) - end + describe server :container do + describe http( + "http://localhost:#{es_config['http.port']}/_cluster/stats" + ) do + it desc, :with_retries do + expect( + JSON.parse(response.body)['nodes']['plugins'] + ).to include(include(val)) end end end end diff --git a/spec/helpers/acceptance/tests/plugin_shared_examples.rb b/spec/helpers/acceptance/tests/plugin_shared_examples.rb index 87c9894..6628998 100644 --- a/spec/helpers/acceptance/tests/plugin_shared_examples.rb +++ b/spec/helpers/acceptance/tests/plugin_shared_examples.rb @@ -1,125 +1,98 @@ require 'json' require 'helpers/acceptance/tests/bad_manifest_shared_examples' require 'helpers/acceptance/tests/manifest_shared_examples' require 'helpers/acceptance/tests/plugin_api_shared_examples' -shared_examples 'plugin acceptance tests' do |plugins| +shared_examples 'plugin acceptance tests' do |es_config, plugins| describe 'elasticsearch::plugin' do - instances = { - 'es-01' => { - 'config' => { - 'http.port' => 9200, - 'node.name' => 'elasticsearch001' - } - } - } - describe 'invalid plugins', :with_cleanup do let(:extra_manifest) do <<-MANIFEST - elasticsearch::plugin { 'elastic/non-existing': - instances => 'es-01', - } + elasticsearch::plugin { 'elastic/non-existing': } MANIFEST end - include_examples( - 'invalid manifest application', - instances - ) + include_examples('invalid manifest application') end before :all do shell "mkdir -p #{default['distmoduledir']}/another/files" end plugins.each_pair do |plugin, meta| describe plugin do # Ensure that instances are restarted to include plugins let(:manifest_class_parameters) { 'restart_on_change => true' } describe 'installation' do describe 'using simple names', :with_cleanup do let(:extra_manifest) do <<-MANIFEST - elasticsearch::plugin { '#{plugin}': - instances => 'es-01', - } + elasticsearch::plugin { '#{plugin}': } MANIFEST end - include_examples( - 'manifest application', - instances - ) + include_examples('manifest application', es_config) describe file("/usr/share/elasticsearch/plugins/#{plugin}/") do it { should be_directory } end include_examples( 'plugin API response', - instances, + es_config, 'reports the plugin as installed', 'name' => plugin ) end describe 'offline via puppet://', :with_cleanup do before :all do scp_to( default, meta[:path], "#{default['distmoduledir']}/another/files/#{plugin}.zip" ) end let(:extra_manifest) do <<-MANIFEST elasticsearch::plugin { '#{plugin}': - instances => 'es-01', - source => 'puppet:///modules/another/#{plugin}.zip', + source => 'puppet:///modules/another/#{plugin}.zip', } MANIFEST end - include_examples( - 'manifest application', - instances - ) + include_examples('manifest application', es_config) include_examples( 'plugin API response', - instances, + es_config, 'reports the plugin as installed', 'name' => plugin ) end describe 'via url', :with_cleanup do let(:extra_manifest) do <<-MANIFEST elasticsearch::plugin { '#{plugin}': - instances => 'es-01', - url => '#{meta[:url]}', + url => '#{meta[:url]}', } MANIFEST end - include_examples( - 'manifest application', - instances - ) + include_examples('manifest application', es_config) include_examples( 'plugin API response', - instances, + es_config, 'reports the plugin as installed', 'name' => plugin ) end end end end end end diff --git a/spec/helpers/acceptance/tests/removal_shared_examples.rb b/spec/helpers/acceptance/tests/removal_shared_examples.rb index a173931..518d691 100644 --- a/spec/helpers/acceptance/tests/removal_shared_examples.rb +++ b/spec/helpers/acceptance/tests/removal_shared_examples.rb @@ -1,35 +1,30 @@ -shared_examples 'module removal' do |instances| +shared_examples 'module removal' do |es_config| describe 'uninstalling' do let(:manifest) do - instance_resource = <<-RESOURCE - elasticsearch::instance { '%s' : - ensure => 'absent' - } - RESOURCE - <<-MANIFEST class { 'elasticsearch': ensure => 'absent', oss => #{v[:oss]} } - #{instances.map { |i| instance_resource % i }.join("\n")} MANIFEST end it 'should run successfully' do - apply_manifest manifest, :catch_failures => true + apply_manifest(manifest, :catch_failures => true, :debug => v[:puppet_debug]) end - it 'is idempotent' do - apply_manifest manifest, :catch_changes => true + describe package("elasticsearch#{v[:oss] ? '-oss' : ''}") do + it { should_not be_installed } end - instances.each do |instance| - describe file("/etc/elasticsearch/#{instance}") do - it { should_not be_directory } - end + describe service('elasticsearch') do + it { should_not be_enabled } + it { should_not be_running } + end - describe service(instance) do - it { should_not be_enabled } - it { should_not be_running } + unless es_config.empty? + describe port(es_config['http.port']) do + it 'closed' do + should_not be_listening + end end end end end diff --git a/spec/helpers/acceptance/tests/security_shared_examples.rb b/spec/helpers/acceptance/tests/security_shared_examples.rb index a291410..1c0f2ea 100644 --- a/spec/helpers/acceptance/tests/security_shared_examples.rb +++ b/spec/helpers/acceptance/tests/security_shared_examples.rb @@ -1,272 +1,180 @@ require 'json' require 'spec_utilities' require 'helpers/acceptance/tests/manifest_shared_examples' -shared_examples 'security plugin manifest' do |instances, credentials| +shared_examples 'security plugin manifest' do |credentials| let(:extra_manifest) do - instance_plugins = - <<-MANIFEST - Elasticsearch::Plugin { instances => #{instances.keys} } - MANIFEST - users = credentials.map do |username, meta| <<-USER #{meta[:changed] ? "notify { 'password change for #{username}' : } ~>" : ''} elasticsearch::user { '#{username}': password => '#{meta[:hash] ? meta[:hash] : meta[:plaintext]}', roles => #{meta[:roles].reduce({}) { |a, e| a.merge(e) }.keys}, } USER end.join("\n") roles = credentials.values.reduce({}) do |sum, user_metadata| # Collect all roles across users sum.merge user_metadata end[:roles].reduce({}) do |all_roles, role| all_roles.merge role end.reject do |_role, permissions| permissions.empty? end.map do |role, rights| <<-ROLE elasticsearch::role { '#{role}': privileges => #{rights} } ROLE end.join("\n") <<-MANIFEST - #{security_plugins} - - #{instance_plugins} - #{users} #{roles} - - #{ssl_params} MANIFEST end include_examples( 'manifest application', - instances, not(credentials.values.map { |p| p[:changed] }.any?) ) end -shared_examples 'secured request' do |test_desc, instances, path, http_test, expected, user = nil, pass = nil| - instances.each_value do |i| - describe port(i['config']['http.port']) do - it 'open', :with_generous_retries do - should be_listening - end +shared_examples 'secured request' do |test_desc, es_config, path, http_test, expected, user = nil, pass = nil| + es_port = es_config['http.port'] + describe port(es_port) do + it 'open', :with_retries do + should be_listening end + end - describe server :container do - describe http( - "https://localhost:#{i['config']['http.port']}#{path}", - { - :ssl => { :verify => false } - }.merge((user and pass) ? { :basic_auth => [user, pass] } : {}) - ) do - it test_desc, :with_retries do - expect(http_test.call(response)).to eq(expected) - end + describe server :container do + describe http( + "https://localhost:#{es_port}#{path}", + { + :ssl => { :verify => false } + }.merge((user and pass) ? { :basic_auth => [user, pass] } : {}) + ) do + it test_desc, :with_retries do + expect(http_test.call(response)).to eq(expected) end end end end -shared_examples 'security acceptance tests' do |default_instances| +shared_examples 'security acceptance tests' do |es_config| describe 'security plugin operations', :if => vault_available?, :then_purge => true, :with_license => true, :with_certificates => true do - superuser_role = v[:elasticsearch_major_version] > 2 ? 'superuser' : 'admin' rand_string = lambda { [*('a'..'z')].sample(8).join } admin_user = rand_string.call admin_password = rand_string.call - admin = { admin_user => { :plaintext => admin_password, :roles => [{ superuser_role => [] }] } } + admin = { admin_user => { :plaintext => admin_password, :roles => [{ 'superuser' => [] }] } } let(:manifest_class_parameters) do <<-MANIFEST - license => file('#{v[:elasticsearch_license_path]}'), - restart_on_change => true, - security_plugin => '#{v[:elasticsearch_major_version] > 2 ? 'x-pack' : 'shield'}', api_basic_auth_password => '#{admin_password}', api_basic_auth_username => '#{admin_user}', api_ca_file => '#{@tls[:ca][:cert][:path]}', api_protocol => 'https', + ca_certificate => '#{@tls[:ca][:cert][:path]}', + certificate => '#{@tls[:clients].first[:cert][:path]}', + keystore_password => '#{@keystore_password}', + license => file('#{v[:elasticsearch_license_path]}'), + private_key => '#{@tls[:clients].first[:key][:path]}', + restart_on_change => true, + ssl => true, validate_tls => true, MANIFEST end - let(:security_plugins) do - if v[:elasticsearch_major_version] <= 2 - <<-MANIFEST - elasticsearch::plugin { 'elasticsearch/license/latest' : } - elasticsearch::plugin { 'elasticsearch/shield/latest' : } - MANIFEST - elsif semver(v[:elasticsearch_full_version].split('-').first) < semver('6.3.0') - <<-MANIFEST - elasticsearch::plugin { 'x-pack' : } - MANIFEST - else - '' - end - end - describe 'over tls' do user_one = rand_string.call user_two = rand_string.call user_one_pw = rand_string.call user_two_pw = rand_string.call - context "instance #{default_instances.first.first}" do - instance_name = default_instances.keys.first - instance = { instance_name => default_instances[instance_name].merge('ssl' => true) } - - let(:ssl_params) do - <<-MANIFEST - Elasticsearch::Instance['#{instance_name}'] { - ca_certificate => '#{@tls[:ca][:cert][:path]}', - certificate => '#{@tls[:clients].first[:cert][:path]}', - private_key => '#{@tls[:clients].first[:key][:path]}', - keystore_password => '#{@keystore_password}', - } - MANIFEST - end - - describe 'user authentication' do - username_passwords = { - user_one => { :plaintext => user_one_pw, :roles => [{ superuser_role => [] }] }, - user_two => { :plaintext => user_two_pw, :roles => [{ superuser_role => [] }] } - }.merge(admin) - username_passwords[user_two][:hash] = bcrypt(username_passwords[user_two][:plaintext]) - - include_examples('security plugin manifest', instance, username_passwords) - include_examples( - 'secured request', 'denies unauthorized access', - instance, '/_cluster/health', - lambda { |r| r.status }, 401 - ) - include_examples( - 'secured request', "permits user #{user_one} access", - instance, '/_cluster/health', - lambda { |r| r.status }, 200, - user_one, user_one_pw - ) - include_examples( - 'secured request', "permits user #{user_two} access", - instance, '/_cluster/health', - lambda { |r| r.status }, 200, - user_two, user_two_pw - ) - end + describe 'user authentication' do + username_passwords = { + user_one => { :plaintext => user_one_pw, :roles => [{ 'superuser' => [] }] }, + user_two => { :plaintext => user_two_pw, :roles => [{ 'superuser' => [] }] } + }.merge(admin) + username_passwords[user_two][:hash] = bcrypt(username_passwords[user_two][:plaintext]) - describe 'changing passwords' do - new_password = rand_string.call - username_passwords = { - user_one => { - :plaintext => new_password, - :changed => true, - :roles => [{ superuser_role => [] }] - } - } - - include_examples('security plugin manifest', instance, username_passwords) - include_examples( - 'secured request', 'denies unauthorized access', instance, '/_cluster/health', - lambda { |r| r.status }, 401 - ) - include_examples( - 'secured request', "permits user #{user_two} access with new password", - instance, '/_cluster/health', - lambda { |r| r.status }, 200, - user_one, new_password - ) - end + include_examples('security plugin manifest', username_passwords) + include_examples( + 'secured request', 'denies unauthorized access', + es_config, '/_cluster/health', + lambda { |r| r.status }, 401 + ) + include_examples( + 'secured request', "permits user #{user_one} access", + es_config, '/_cluster/health', + lambda { |r| r.status }, 200, + user_one, user_one_pw + ) + include_examples( + 'secured request', "permits user #{user_two} access", + es_config, '/_cluster/health', + lambda { |r| r.status }, 200, + user_two, user_two_pw + ) + end - describe 'roles' do - password = rand_string.call - username = rand_string.call - user = { - username => { - :plaintext => password, - :roles => [{ - rand_string.call => { - 'cluster' => [ - 'cluster:monitor/health' - ] - } - }] - } + describe 'changing passwords' do + new_password = rand_string.call + username_passwords = { + user_one => { + :plaintext => new_password, + :changed => true, + :roles => [{ 'superuser' => [] }] } + } - include_examples('security plugin manifest', instance, user) - include_examples( - 'secured request', 'denies unauthorized access', - instance, '/_snapshot', - lambda { |r| r.status }, 403, - username, password - ) - include_examples( - 'secured request', 'permits authorized access', - instance, '/_cluster/health', - lambda { |r| r.status }, 200, - username, password - ) - end + include_examples('security plugin manifest', username_passwords) + include_examples( + 'secured request', 'denies unauthorized access', es_config, '/_cluster/health', + lambda { |r| r.status }, 401 + ) + include_examples( + 'secured request', "permits user #{user_two} access with new password", + es_config, '/_cluster/health', + lambda { |r| r.status }, 200, + user_one, new_password + ) end - describe 'with two instances' do - let(:ssl_params) do - @tls[:clients].each_with_index.map do |cert, i| - format(%( - Elasticsearch::Instance['es-%02d'] { - ca_certificate => '#{@tls[:ca][:cert][:path]}', - certificate => '#{cert[:cert][:path]}', - private_key => '#{cert[:key][:path]}', - keystore_password => '#{@keystore_password}', - } - ), i + 1) - end.join("\n") - end - - ssl_instances = default_instances.map do |instance, meta| - new_config = if v[:elasticsearch_major_version] > 2 - { 'xpack.ssl.verification_mode' => 'none' } - else - { 'shield.ssl.hostname_verification' => false } - end - [ - instance, - { - 'config' => meta['config'].merge(new_config).merge( - 'discovery.zen.minimum_master_nodes' => default_instances.keys.size - ), - 'ssl' => true - } - ] - end.to_h - - username = rand_string.call + describe 'roles' do password = rand_string.call - - include_examples( - 'security plugin manifest', - ssl_instances, + username = rand_string.call + user = { username => { :plaintext => password, - :roles => [{ superuser_role => [] }] + :roles => [{ + rand_string.call => { + 'cluster' => [ + 'cluster:monitor/health' + ] + } + }] } - ) + } + include_examples('security plugin manifest', user) + include_examples( + 'secured request', 'denies unauthorized access', + es_config, '/_snapshot', + lambda { |r| r.status }, 403, + username, password + ) include_examples( - 'secured request', 'clusters between two nodes', - ssl_instances, '/_nodes', - lambda { |r| JSON.parse(r.body)['nodes'].size }, 2, + 'secured request', 'permits authorized access', + es_config, '/_cluster/health', + lambda { |r| r.status }, 200, username, password ) end end end end diff --git a/spec/helpers/acceptance/tests/snapshot_repository_shared_examples.rb b/spec/helpers/acceptance/tests/snapshot_repository_shared_examples.rb index 3eed58d..abd329f 100644 --- a/spec/helpers/acceptance/tests/snapshot_repository_shared_examples.rb +++ b/spec/helpers/acceptance/tests/snapshot_repository_shared_examples.rb @@ -1,55 +1,81 @@ require 'json' require 'helpers/acceptance/tests/manifest_shared_examples' # Main entrypoint for snapshot tests shared_examples 'snapshot repository acceptance tests' do describe 'elasticsearch::snapshot_repository', :with_cleanup do + es_config = { + 'http.port' => 9200, + 'node.name' => 'elasticsearchSnapshot01', + 'path.repo' => '/var/lib/elasticsearch' + } + + # Override the manifest in order to populate 'path.repo' + let(:manifest) do + package = if not v[:is_snapshot] + <<-MANIFEST + # Hard version set here due to plugin incompatibilities. + version => '#{v[:elasticsearch_full_version]}', + MANIFEST + else + <<-MANIFEST + manage_repo => false, + package_url => '#{v[:snapshot_package]}', + MANIFEST + end + + <<-MANIFEST + api_timeout => 60, + config => { + 'cluster.name' => '#{v[:cluster_name]}', + 'http.bind_host' => '0.0.0.0', + #{es_config.map { |k, v| " '#{k}' => '#{v}'," }.join("\n")} + }, + jvm_options => [ + '-Xms128m', + '-Xmx128m', + ], + oss => #{v[:oss]}, + #{package} + MANIFEST + end + let(:manifest_class_parameters) { 'restart_on_change => true' } let(:extra_manifest) do <<-MANIFEST elasticsearch::snapshot_repository { 'backup': ensure => 'present', api_timeout => 60, location => '/var/lib/elasticsearch/backup', max_restore_rate => '20mb', max_snapshot_rate => '80mb', - require => Elasticsearch::Instance['es-01'] } MANIFEST end - instance = { - 'es-01' => { - 'config' => { - 'http.port' => 9200, - 'path.repo' => '/var/lib/elasticsearch' - } - } - } - instance['es-01']['config']['path.repo'] = [instance['es-01']['config']['path.repo']] if v[:elasticsearch_major_version] > 2 - - include_examples('manifest application', instance) + include_examples('manifest application', es_config) - describe port(9200) do + es_port = es_config['http.port'] + describe port(es_port) do it 'open', :with_retries do should be_listening end end describe server :container do describe http( - 'http://localhost:9200/_snapshot/backup' + "http://localhost:#{es_port}/_snapshot/backup" ) do it 'returns the snapshot repository', :with_retries do expect(JSON.parse(response.body)['backup']) .to include('settings' => a_hash_including( - 'location' => '/var/lib/elasticsearch/backup', - 'max_restore_rate' => '20mb', - 'max_snapshot_rate' => '80mb' + 'location' => '/var/lib/elasticsearch/backup', + 'max_restore_rate' => '20mb', + 'max_snapshot_rate' => '80mb' )) end end end end end diff --git a/spec/helpers/acceptance/tests/template_shared_examples.rb b/spec/helpers/acceptance/tests/template_shared_examples.rb index fd9782d..11044ad 100644 --- a/spec/helpers/acceptance/tests/template_shared_examples.rb +++ b/spec/helpers/acceptance/tests/template_shared_examples.rb @@ -1,121 +1,111 @@ require 'json' require 'helpers/acceptance/tests/manifest_shared_examples' require 'helpers/acceptance/tests/bad_manifest_shared_examples' # Describes how to apply a manifest with a template, verify it, and clean it up -shared_examples 'template application' do |instances, name, template, param| +shared_examples 'template application' do |es_config, name, template, param| context 'present' do let(:extra_manifest) do <<-MANIFEST elasticsearch::template { '#{name}': ensure => 'present', #{param} } MANIFEST end - include_examples( - 'manifest application', - instances - ) + include_examples('manifest application') - include_examples 'template content', instances, template + include_examples('template content', es_config, template) end context 'absent' do let(:extra_manifest) do <<-MANIFEST elasticsearch::template { '#{name}': ensure => absent, } MANIFEST end - include_examples( - 'manifest application', - instances - ) + include_examples('manifest application') end end # Verifies the content of a loaded index template. -shared_examples 'template content' do |instances, template| - instances.each_value do |i| - describe port(i['config']['http.port']) do - it 'open', :with_retries do - should be_listening - end +shared_examples 'template content' do |es_config, template| + elasticsearch_port = es_config['http.port'] + describe port(elasticsearch_port) do + it 'open', :with_retries do + should be_listening end + end - describe server :container do - describe http( - "http://localhost:#{i['config']['http.port']}/_template", - :params => { 'flat_settings' => 'false' } - ) do - it 'returns the installed template', :with_retries do - expect(JSON.parse(response.body).values) - .to include(include(template)) - end + describe server :container do + describe http( + "http://localhost:#{elasticsearch_port}/_template", + :params => { 'flat_settings' => 'false' } + ) do + it 'returns the installed template', :with_retries do + expect(JSON.parse(response.body).values) + .to include(include(template)) end end end end # Main entrypoint for template tests -shared_examples 'template operations' do |instances, template| +shared_examples 'template operations' do |es_config, template| describe 'template resources' do before :all do shell "mkdir -p #{default['distmoduledir']}/another/files" create_remote_file( default, "#{default['distmoduledir']}/another/files/good.json", JSON.dump(template) ) create_remote_file( default, "#{default['distmoduledir']}/another/files/bad.json", JSON.dump(template)[0..-5] ) end context 'configured through' do context '`source`' do include_examples( 'template application', - instances, + es_config, SecureRandom.hex(8), template, "source => 'puppet:///modules/another/good.json'" ) end context '`content`' do include_examples( 'template application', - instances, + es_config, SecureRandom.hex(8), template, "content => '#{JSON.dump(template)}'" ) end context 'bad json' do let(:extra_manifest) do <<-MANIFEST elasticsearch::template { '#{SecureRandom.hex(8)}': ensure => 'present', file => 'puppet:///modules/another/bad.json' } MANIFEST end - include_examples( - 'invalid manifest application', - instances - ) + include_examples('invalid manifest application') end end end end diff --git a/spec/helpers/class_shared_examples.rb b/spec/helpers/class_shared_examples.rb new file mode 100644 index 0000000..c7d631a --- /dev/null +++ b/spec/helpers/class_shared_examples.rb @@ -0,0 +1,8 @@ +shared_examples 'class' do + it { should compile.with_all_deps } + it { should contain_augeas('/etc/sysconfig/elasticsearch') } + it { should contain_file('/etc/elasticsearch/elasticsearch.yml') } + it { should contain_datacat('/etc/elasticsearch/elasticsearch.yml') } + it { should contain_datacat_fragment('main_config') } + it { should contain_service('elasticsearch') } +end diff --git a/spec/helpers/instance_shared_examples.rb b/spec/helpers/instance_shared_examples.rb deleted file mode 100644 index b89dc4e..0000000 --- a/spec/helpers/instance_shared_examples.rb +++ /dev/null @@ -1,35 +0,0 @@ -shared_examples 'instance' do |name, init| - it { should contain_elasticsearch__instance(name) } - it { should contain_augeas("defaults_#{name}") } - it { should contain_datacat("/etc/elasticsearch/#{name}/elasticsearch.yml") } - it { should contain_datacat_fragment("main_config_#{name}") } - it { should contain_elasticsearch__instance(name) } - it { should contain_elasticsearch__service(name) } - it { should contain_exec("mkdir_configdir_elasticsearch_#{name}") } - it { should contain_exec("mkdir_datadir_elasticsearch_#{name}") - .with(:command => "mkdir -p /var/lib/elasticsearch/#{name}") } - it { should contain_exec("mkdir_logdir_elasticsearch_#{name}") - .with(:command => "mkdir -p /var/log/elasticsearch/#{name}") } - it { should contain_elasticsearch__service(name) } - it { should contain_service("elasticsearch-instance-#{name}") } - - %w[/var/log/elasticsearch /var/lib/elasticsearch /etc/elasticsearch].each do |dir| - it { should contain_file("#{dir}/#{name}").with(:ensure => 'directory') } - end - - %w[elasticsearch.yml jvm.options logging.yml log4j2.properties].each do |file| - it { should contain_file("/etc/elasticsearch/#{name}/#{file}") } - end - - case init - when :sysv - it { should contain_elasticsearch__service__init(name) } - it { should contain_elasticsearch_service_file("/etc/init.d/elasticsearch-#{name}") } - it { should contain_file("/etc/init.d/elasticsearch-#{name}") } - when :systemd - it { should contain_elasticsearch__service__systemd(name) } - it { should contain_elasticsearch_service_file("/lib/systemd/system/elasticsearch-#{name}.service") } - it { should contain_file("/lib/systemd/system/elasticsearch-#{name}.service") } - it { should contain_exec("systemd_reload_#{name}") } - end -end diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb index 744c3ff..cd90602 100644 --- a/spec/spec_helper.rb +++ b/spec/spec_helper.rb @@ -1,17 +1,21 @@ -require 'puppetlabs_spec_helper/module_spec_helper' -require_relative 'helpers/instance_shared_examples' +require_relative 'helpers/class_shared_examples' require 'rspec-puppet-utils' require 'rspec-puppet-facts' include RspecPuppetFacts def fixture_path File.expand_path(File.join(__FILE__, '..', 'fixtures')) end $LOAD_PATH.unshift(File.expand_path(File.dirname(__FILE__) + '/../')) +RSpec.configure do |c| + c.mock_with :rspec +end +require 'puppetlabs_spec_helper/module_spec_helper' + RSpec.configure do |c| c.add_setting :fixture_path, :default => fixture_path - c.mock_with(:rspec) + # c.mock_with(:rspec) c.hiera_config = File.join(fixture_path, '/hiera/hiera.yaml') end diff --git a/spec/spec_helper_acceptance.rb b/spec/spec_helper_acceptance.rb index 63d60b6..8431fcc 100644 --- a/spec/spec_helper_acceptance.rb +++ b/spec/spec_helper_acceptance.rb @@ -1,283 +1,260 @@ require 'beaker-rspec' +require 'beaker/puppet_install_helper' require 'securerandom' require 'thread' require 'infrataster/rspec' require 'rspec/retry' require 'vault' +require 'simp/beaker_helpers' +include Simp::BeakerHelpers + require_relative 'spec_helper_tls' require_relative 'spec_utilities' require_relative '../lib/puppet_x/elastic/deep_to_i' require_relative '../lib/puppet_x/elastic/deep_to_s' -def f - RSpec.configuration.fact -end +# def f +# RSpec.configuration.fact +# end + +run_puppet_install_helper('agent') unless ENV['BEAKER_provision'] == 'no' RSpec.configure do |c| # General-purpose spec-global variables c.add_setting :v, :default => {} + # Puppet debug logging + v[:puppet_debug] = ENV['BEAKER_debug'] ? true : false + unless ENV['snapshot_version'].nil? v[:snapshot_version] = ENV['snapshot_version'] v[:is_snapshot] = ENV['SNAPSHOT_TEST'] == 'true' end unless ENV['ELASTICSEARCH_VERSION'].nil? and v[:snapshot_version].nil? v[:elasticsearch_full_version] = ENV['ELASTICSEARCH_VERSION'] || v[:snapshot_version] v[:elasticsearch_major_version] = v[:elasticsearch_full_version].split('.').first.to_i v[:elasticsearch_package] = {} - v[:template] = if v[:elasticsearch_major_version] < 6 - JSON.load(File.new('spec/fixtures/templates/pre_6.0.json')) + v[:template] = if v[:elasticsearch_major_version] == 6 + JSON.load(File.new('spec/fixtures/templates/6.x.json')) elsif v[:elasticsearch_major_version] >= 8 JSON.load(File.new('spec/fixtures/templates/post_8.0.json')) else - JSON.load(File.new('spec/fixtures/templates/post_6.0.json')) + JSON.load(File.new('spec/fixtures/templates/7.x.json')) end v[:template] = Puppet_X::Elastic.deep_to_i(Puppet_X::Elastic.deep_to_s(v[:template])) v[:pipeline] = JSON.load(File.new('spec/fixtures/pipelines/example.json')) v[:elasticsearch_plugins] = Dir[ artifact("*#{v[:elasticsearch_full_version]}.zip", ['plugins']) ].map do |plugin| plugin_filename = File.basename(plugin) plugin_name = plugin_filename.match(/^(?.+)-#{v[:elasticsearch_full_version]}.zip/)[:name] [ plugin_name, { :path => plugin, :url => derive_plugin_urls_for(v[:elasticsearch_full_version], [plugin_name]).keys.first } ] end.to_h end v[:oss] = (not ENV['OSS_PACKAGE'].nil?) and ENV['OSS_PACKAGE'] == 'true' v[:cluster_name] = SecureRandom.hex(10) # rspec-retry c.display_try_failure_messages = true - c.default_sleep_interval = 5 + c.default_sleep_interval = 10 # General-case retry keyword for unstable tests c.around :each, :with_retries do |example| - example.run_with_retry retry: 4 - end - # More forgiving retry config for really flaky tests - c.around :each, :with_generous_retries do |example| example.run_with_retry retry: 10 end # Helper hook for module cleanup c.after :context, :with_cleanup do apply_manifest <<-EOS class { 'elasticsearch': ensure => 'absent', manage_repo => true, oss => #{v[:oss]}, } - elasticsearch::instance { 'es-01': ensure => 'absent' } file { '/usr/share/elasticsearch/plugin': ensure => 'absent', force => true, recurse => true, require => Class['elasticsearch'], } EOS end c.before :context, :with_certificates do @keystore_password = SecureRandom.hex @role = [*('a'..'z')].sample(8).join # Setup TLS cert placement @tls = gen_certs(2, '/tmp') create_remote_file hosts, @tls[:ca][:cert][:path], @tls[:ca][:cert][:pem] @tls[:clients].each do |node| node.each do |_type, params| create_remote_file hosts, params[:path], params[:pem] end end end c.before :context, :with_license do Vault.address = ENV['VAULT_ADDR'] - Vault.auth.approle ENV['VAULT_APPROLE_ROLE_ID'], ENV['VAULT_APPROLE_SECRET_ID'] + if ENV['CI'] + Vault.auth.approle(ENV['VAULT_APPROLE_ROLE_ID'], ENV['VAULT_APPROLE_SECRET_ID']) + else + Vault.auth.token(ENV['VAULT_TOKEN']) + end licenses = Vault.with_retries(Vault::HTTPConnectionError) do Vault.logical.read(ENV['VAULT_PATH']) end.data raise 'No license found!' unless licenses - license = case v[:elasticsearch_major_version] - when 2 - licenses[:v2] - else - licenses[:v5] - end + # license = case v[:elasticsearch_major_version] + # when 6 + # licenses[:v5] + # else + # licenses[:v7] + # end + license = licenses[:v7] create_remote_file hosts, '/tmp/license.json', license v[:elasticsearch_license_path] = '/tmp/license.json' end c.after :context, :then_purge do shell 'rm -rf {/usr/share,/etc,/var/lib}/elasticsearch*' end c.before :context, :first_purge do shell 'rm -rf {/usr/share,/etc,/var/lib}/elasticsearch*' end # Provide a hook filter to spit out some ES logs if the example fails. c.after(:example, :logs_on_failure) do |example| if example.exception hosts.each do |host| on host, "find / -name '#{v[:cluster_name]}.log' | xargs cat || true" do |result| puts result.formatted_output end end end end end files_dir = ENV['files_dir'] || './spec/fixtures/artifacts' # General bootstrapping steps for each host hosts.each do |host| - # Set the host to 'aio' in order to adopt the puppet-agent style of - # installation, and configure paths/etc. - host[:type] = 'aio' - configure_defaults_on host, 'aio' - - # Install Puppet - # - # We spawn a thread to print dots periodically while installing puppet to - # avoid inactivity timeouts in Travis. Don't judge me. - progress = Thread.new do - print 'Installing puppet..' - print '.' while sleep 5 - end + # # Set the host to 'aio' in order to adopt the puppet-agent style of + # # installation, and configure paths/etc. + # host[:type] = 'aio' + # configure_defaults_on host, 'aio' - case host.name - when /debian-9/ - # A few special cases need to be installed from gems (if the distro is - # very new and has no puppet repo package or has no upstream packages). - install_puppet_from_gem( - host, - version: Gem.loaded_specs['puppet'].version - ) - else - # Otherwise, just use the all-in-one agent package. - install_puppet_agent_on( - host, - puppet_agent_version: to_agent_version(Gem.loaded_specs['puppet'].version) - ) - end - # Quit the print thread and include some debugging. - progress.exit - puts "done. Installed version #{shell('puppet --version').output}" - - RSpec.configure do |c| - c.add_setting :fact, :default => JSON.parse(fact('', '-j')) - end - - if f['os']['family'] == 'Suse' + if fact('os.family') == 'Suse' install_package host, '--force-resolution augeas-devel libxml2-devel ruby-devel' on host, 'gem install ruby-augeas --no-ri --no-rdoc' end - v[:ext] = case f['os']['family'] + v[:ext] = case fact('os.family') when 'Debian' 'deb' else 'rpm' end if v[:elasticsearch_package] v[:elasticsearch_package].merge!( derive_full_package_url( v[:elasticsearch_full_version], [v[:ext]] ).flat_map do |url, filename| [[:url, url], [:filename, filename], [:path, artifact(filename)]] end.to_h ) end Infrataster::Server.define(:docker) do |server| server.address = host[:ip] server.ssh = host[:ssh].tap { |s| s.delete :forward_agent } end Infrataster::Server.define(:container) do |server| server.address = host[:vm_ip] # this gets ignored anyway server.from = :docker end end RSpec.configure do |c| if v[:is_snapshot] c.before :suite do scp_to default, "#{files_dir}/elasticsearch-snapshot.#{v[:ext]}", "/tmp/elasticsearch-snapshot.#{v[:ext]}" v[:snapshot_package] = "file:/tmp/elasticsearch-snapshot.#{v[:ext]}" end end c.before :suite do # Install module and dependencies install_dev_puppet_module :ignore_list => [ 'junit' ] + Beaker::DSL::InstallUtils::ModuleUtils::PUPPET_MODULE_INSTALL_IGNORE hosts.each do |host| modules = %w[archive augeas_core datacat java java_ks stdlib elastic_stack] dist_module = { 'Debian' => ['apt'], 'Suse' => ['zypprepo'], 'RedHat' => %w[concat yumrepo_core] - }[f['os']['family']] + }[fact('os.family')] modules += dist_module unless dist_module.nil? modules.each do |mod| copy_module_to( host, :module_name => mod, :source => "spec/fixtures/modules/#{mod}" ) end on(host, 'mkdir -p etc/puppet/modules/another/files/') # Apt doesn't update package caches sometimes, ensure we're caught up. - shell 'apt-get update' if f['os']['family'] == 'Debian' + shell 'apt-get update' if fact('os.family') == 'Debian' end # Use the Java class once before the suite of tests unless shell('command -v java', :accept_all_exit_codes => true).exit_code.zero? - java = case f['os']['name'] + java = case fact('os.name') when 'OpenSuSE' 'package => "java-1_8_0-openjdk-headless",' else '' end apply_manifest <<-MANIFEST class { "java" : - distribution => "jre", + distribution => "jdk", #{java} } MANIFEST end end end -# Java 8 is only easy to manage on recent distros -def v5x_capable? - (f['os']['family'] == 'RedHat' and \ - not (f['os']['name'] == 'OracleLinux' and \ - f['os']['release']['major'] == '6')) or \ - f.dig 'os', 'distro', 'codename' == 'xenial' -end +# # Java 8 is only easy to manage on recent distros +# def v5x_capable? +# (fact('os.family') == 'RedHat' and \ +# not (fact('os.name') == 'OracleLinux' and \ +# f['os']['release']['major'] == '6')) or \ +# f.dig 'os', 'distro', 'codename' == 'xenial' +# end diff --git a/spec/spec_helper_tls.rb b/spec/spec_helper_tls.rb index bd97562..c3a1250 100644 --- a/spec/spec_helper_tls.rb +++ b/spec/spec_helper_tls.rb @@ -1,90 +1,102 @@ require 'openssl' def gen_certs(num_certs, path) ret = { :clients => [] } serial = 1_000_000 ca_key = OpenSSL::PKey::RSA.new 2048 # CA Cert - ca_name = OpenSSL::X509::Name.parse 'CN=ca/DC=example' + ca_name = OpenSSL::X509::Name.parse 'CN=ca/DC=example/DC=com' ca_cert = OpenSSL::X509::Certificate.new ca_cert.serial = serial serial += 1 ca_cert.version = 2 ca_cert.not_before = Time.now ca_cert.not_after = Time.now + 86_400 ca_cert.public_key = ca_key.public_key ca_cert.subject = ca_name ca_cert.issuer = ca_name extension_factory = OpenSSL::X509::ExtensionFactory.new extension_factory.subject_certificate = ca_cert extension_factory.issuer_certificate = ca_cert - ca_cert.add_extension extension_factory.create_extension( - 'subjectAltName', ['localhost', '127.0.0.1'].map { |d| "DNS: #{d}" }.join(',') - ) + # ca_cert.add_extension extension_factory.create_extension( + # 'subjectAltName', ['localhost', '127.0.0.1'].map { |d| "DNS: #{d}" }.join(',') + # ) ca_cert.add_extension extension_factory.create_extension( 'subjectKeyIdentifier', 'hash' ) ca_cert.add_extension extension_factory.create_extension( 'basicConstraints', 'CA:TRUE', true ) - ca_cert.sign ca_key, OpenSSL::Digest::SHA1.new + ca_cert.sign ca_key, OpenSSL::Digest::SHA256.new ret[:ca] = { :cert => { :pem => ca_cert.to_pem, :path => path + '/ca_cert.pem' } } num_certs.times do |i| key, cert, serial = gen_cert_pair serial, ca_cert - cert.sign ca_key, OpenSSL::Digest::SHA1.new + cert.sign ca_key, OpenSSL::Digest::SHA256.new ret[:clients] << { :key => { :pem => key.to_pem, :path => path + '/' + i.to_s + '_key.pem' }, :cert => { :pem => cert.to_pem, :path => path + '/' + i.to_s + '_cert.pem' } } end ret end def gen_cert_pair(serial, ca_cert) serial += 1 # Node Key key = OpenSSL::PKey::RSA.new 2048 - node_name = OpenSSL::X509::Name.parse 'CN=localhost/DC=example' + node_name = OpenSSL::X509::Name.parse 'CN=localhost/DC=example/DC=com' + + # prepare SANS list + sans = ['localhost.localdomain', 'localhost', 'localhost.example.com'] + sans_list = sans.map { |domain| "DNS:#{domain}" } # Node Cert cert = OpenSSL::X509::Certificate.new cert.serial = serial cert.version = 2 cert.not_before = Time.now cert.not_after = Time.now + 6000 cert.subject = node_name cert.public_key = key.public_key cert.issuer = ca_cert.subject csr_extension_factory = OpenSSL::X509::ExtensionFactory.new csr_extension_factory.subject_certificate = cert csr_extension_factory.issuer_certificate = ca_cert + cert.add_extension csr_extension_factory.create_extension( + 'subjectAltName', + sans_list.join(',') + ) cert.add_extension csr_extension_factory.create_extension( 'basicConstraints', 'CA:FALSE' ) cert.add_extension csr_extension_factory.create_extension( 'keyUsage', 'keyEncipherment,dataEncipherment,digitalSignature' ) + cert.add_extension csr_extension_factory.create_extension( + 'extendedKeyUsage', + 'serverAuth,clientAuth' + ) cert.add_extension csr_extension_factory.create_extension( 'subjectKeyIdentifier', 'hash' ) [key, cert, serial] end diff --git a/spec/spec_utilities.rb b/spec/spec_utilities.rb index 5ca112e..489bd6a 100644 --- a/spec/spec_utilities.rb +++ b/spec/spec_utilities.rb @@ -1,136 +1,134 @@ require 'bcrypt' require 'open-uri' def to_agent_version(puppet_version) # REF: https://docs.puppet.com/puppet/latest/reference/about_agent.html { # Puppet => Agent '4.10.4' => '1.10.4', '4.10.3' => '1.10.3', '4.10.2' => '1.10.2', '4.10.1' => '1.10.1', '4.10.0' => '1.10.0', '4.9.4' => '1.9.3', '4.8.2' => '1.8.3', '4.7.1' => '1.7.2', '4.7.0' => '1.7.1', '4.6.2' => '1.6.2', '4.5.3' => '1.5.3', '4.4.2' => '1.4.2', '4.4.1' => '1.4.1', '4.4.0' => '1.4.0', '4.3.2' => '1.3.6', '4.3.1' => '1.3.2', '4.3.0' => '1.3.0', '4.2.3' => '1.2.7', '4.2.2' => '1.2.6', '4.2.1' => '1.2.2', '4.2.0' => '1.2.1', '4.1.0' => '1.1.1', '4.0.0' => '1.0.1' }[puppet_version] end def derive_artifact_urls_for(full_version, plugins = ['analysis-icu']) derive_full_package_url(full_version).merge( derive_plugin_urls_for(full_version, plugins) ) end def derive_full_package_url(full_version, extensions = %w[deb rpm]) extensions.map do |ext| - url = if full_version.start_with? '2' - "https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-#{full_version}.#{ext}" - else + url = if full_version.start_with? '6' "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-#{full_version}.#{ext}" + elsif ext == 'deb' + "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-#{full_version}-amd64.#{ext}" + else + "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-#{full_version}-x86_64.#{ext}" end [url, File.basename(url)] end.to_h end def derive_plugin_urls_for(full_version, plugins = ['analysis-icu']) plugins.map do |plugin| - url = if full_version.start_with? '2' - "https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/plugin/#{plugin}/#{full_version}/#{plugin}-#{full_version}.zip" - else - "https://artifacts.elastic.co/downloads/elasticsearch-plugins/#{plugin}/#{plugin}-#{full_version}.zip" - end + url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/#{plugin}/#{plugin}-#{full_version}.zip" [url, File.join('plugins', File.basename(url))] end.to_h end def artifact(file, fixture_path = []) File.join(%w[spec fixtures artifacts] + fixture_path + [File.basename(file)]) end def get(url, file_path) puts "Fetching #{url}..." found = false until found uri = URI.parse(url) conn = Net::HTTP.new(uri.host, uri.port) conn.use_ssl = true res = conn.get(uri.path) if res.header['location'] url = res.header['location'] else found = true end end File.open(file_path, 'w+') { |fh| fh.write res.body } end def fetch_archives(archives) archives.each do |url, orig_fp| fp = "spec/fixtures/artifacts/#{orig_fp}" if File.exist? fp if fp.end_with? 'tar.gz' and !system("tar -tzf #{fp} &>/dev/null") puts "Archive #{fp} corrupt, re-fetching..." File.delete fp else puts "Already retrieved intact archive #{fp}..." next end end get url, fp end end -def pid_for(instance) - if fact('operatingsystem') == 'Ubuntu' \ - and Gem::Version.new(fact('operatingsystemrelease')) \ - < Gem::Version.new('15.04') - "/var/run/elasticsearch-#{instance}.pid" - elsif fact('operatingsystem') == 'Debian' \ +def pid_file + if fact('operatingsystem') == 'Debian' \ and fact('lsbmajdistrelease').to_i <= 7 - "/var/run/elasticsearch-#{instance}.pid" + '/var/run/elasticsearch.pid' else - "/var/run/elasticsearch/elasticsearch-#{instance}.pid" + '/var/run/elasticsearch/elasticsearch.pid' end end def vault_available? - %w[VAULT_ADDR VAULT_APPROLE_ROLE_ID VAULT_APPROLE_SECRET_ID VAULT_PATH].select do |var| - ENV[var].nil? - end.empty? + if ENV['CI'] + %w[VAULT_ADDR VAULT_APPROLE_ROLE_ID VAULT_APPROLE_SECRET_ID VAULT_PATH].select do |var| + ENV[var].nil? + end.empty? + else + true + end end def http_retry(url) retries ||= 0 open(url).read rescue retry if (retries += 1) < 3 end # Helper to store arbitrary testing setting values def v RSpec.configuration.v end def semver(version) Gem::Version.new version end def bcrypt(value) BCrypt::Password.create(value) end diff --git a/spec/templates/002_jvm.options.erb_spec.rb b/spec/templates/002_jvm.options.erb_spec.rb deleted file mode 100644 index e5b28cf..0000000 --- a/spec/templates/002_jvm.options.erb_spec.rb +++ /dev/null @@ -1,24 +0,0 @@ -require 'spec_helper' -require 'yaml' - -describe 'jvm.options.erb' do - let :harness do - TemplateHarness.new( - 'templates/etc/elasticsearch/jvm.options.erb' - ) - end - - it 'render the same string each time' do - harness.set( - '@jvm_options', [ - '-Xms2g', - '-Xmx2g' - ] - ) - - first_render = harness.run - second_render = harness.run - - expect(first_render).to eq(second_render) - end -end diff --git a/spec/unit/facter/es_facts_spec.rb b/spec/unit/facter/es_facts_spec.rb index 4aae002..d6bfe9e 100644 --- a/spec/unit/facter/es_facts_spec.rb +++ b/spec/unit/facter/es_facts_spec.rb @@ -1,137 +1,107 @@ require 'spec_helper' require 'webmock/rspec' describe 'elasticsearch facts' do before(:each) do - Dir[File.join(RSpec.configuration.fixture_path, 'facts', '*.json')].map do |json| - File.basename(json).split('.').first.split('-').first - end.uniq.sort.each_with_index do |instance, n| - stub_request(:get, "http://localhost:920#{n}/") - .with(:headers => { 'Accept' => '*/*', 'User-Agent' => 'Ruby' }) - .to_return( - :status => 200, - :body => File.read( - File.join( - fixture_path, - "facts/#{instance}-root.json" - ) + stub_request(:get, 'http://localhost:9200/') + .with(:headers => { 'Accept' => '*/*', 'User-Agent' => 'Ruby' }) + .to_return( + :status => 200, + :body => File.read( + File.join( + fixture_path, + 'facts/Warlock-root.json' ) ) + ) - stub_request(:get, "http://localhost:920#{n}/_nodes/#{instance}") - .with(:headers => { 'Accept' => '*/*', 'User-Agent' => 'Ruby' }) - .to_return( - :status => 200, - :body => File.read( - File.join( - fixture_path, - "facts/#{instance}-nodes.json" - ) + stub_request(:get, 'http://localhost:9200/_nodes/Warlock') + .with(:headers => { 'Accept' => '*/*', 'User-Agent' => 'Ruby' }) + .to_return( + :status => 200, + :body => File.read( + File.join( + fixture_path, + 'facts/Warlock-nodes.json' ) ) - end + ) allow(File) .to receive(:directory?) .and_return(true) - allow(Dir) - .to receive(:foreach) - .and_yield('es01').and_yield('es02').and_yield('es03').and_yield('es-ssl') - - %w[es01 es02 es03 es-ssl].each do |instance| - allow(File) - .to receive(:readable?) - .with("/etc/elasticsearch/#{instance}/elasticsearch.yml") - .and_return(true) - end + allow(File) + .to receive(:readable?) + .and_return(true) allow(YAML) .to receive(:load_file) - .with('/etc/elasticsearch/es01/elasticsearch.yml', any_args) + .with('/etc/elasticsearch/elasticsearch.yml', any_args) .and_return({}) - allow(YAML) - .to receive(:load_file) - .with('/etc/elasticsearch/es02/elasticsearch.yml', any_args) - .and_return('http.port' => '9201') - - allow(YAML) - .to receive(:load_file) - .with('/etc/elasticsearch/es03/elasticsearch.yml', any_args) - .and_return('http.port' => '9202') - - allow(YAML) - .to receive(:load_file) - .with('/etc/elasticsearch/es-ssl/elasticsearch.yml', any_args) - .and_return( - 'xpack.security.http.ssl.enabled' => true, - 'shield.http.ssl' => true, - 'http.port' => '9443' - ) - require 'lib/facter/es_facts' end - describe 'elasticsearch_ports' do - it 'finds listening ports' do - expect(Facter.fact(:elasticsearch_ports).value.split(',')) - .to contain_exactly('9200', '9201', '9202', '9443') + describe 'elasticsearch_port' do + it 'finds listening port' do + expect(Facter.fact(:elasticsearch_port).value) + .to eq('9200') end end describe 'instance' do it 'returns the node name' do - expect(Facter.fact(:elasticsearch_9200_name).value).to eq('Warlock') + expect(Facter.fact(:elasticsearch_name).value).to eq('Warlock') end it 'returns the node version' do - expect(Facter.fact(:elasticsearch_9200_version).value).to eq('1.4.2') + expect(Facter.fact(:elasticsearch_version).value).to eq('1.4.2') end it 'returns the cluster name' do - expect(Facter.fact(:elasticsearch_9200_cluster_name).value) + expect(Facter.fact(:elasticsearch_cluster_name).value) .to eq('elasticsearch') end it 'returns the node ID' do - expect(Facter.fact(:elasticsearch_9200_node_id).value) + expect(Facter.fact(:elasticsearch_node_id).value) .to eq('yQAWBO3FS8CupZnSvAVziQ') end it 'returns the mlockall boolean' do - expect(Facter.fact(:elasticsearch_9200_mlockall).value).to be_falsy + expect(Facter.fact(:elasticsearch_mlockall).value).to be_falsy end it 'returns installed plugins' do - expect(Facter.fact(:elasticsearch_9200_plugins).value).to eq('kopf') + expect(Facter.fact(:elasticsearch_plugins).value).to eq('kopf') end describe 'kopf plugin' do it 'returns the correct version' do - expect(Facter.fact(:elasticsearch_9200_plugin_kopf_version).value) + expect(Facter.fact(:elasticsearch_plugin_kopf_version).value) .to eq('1.4.3') end it 'returns the correct description' do - expect(Facter.fact(:elasticsearch_9200_plugin_kopf_description).value) + expect(Facter.fact(:elasticsearch_plugin_kopf_description).value) .to eq('kopf - simple web administration tool for ElasticSearch') end it 'returns the plugin URL' do - expect(Facter.fact(:elasticsearch_9200_plugin_kopf_url).value) + expect(Facter.fact(:elasticsearch_plugin_kopf_url).value) .to eq('/_plugin/kopf/') end it 'returns the plugin JVM boolean' do - expect(Facter.fact(:elasticsearch_9200_plugin_kopf_jvm).value) + expect(Facter.fact(:elasticsearch_plugin_kopf_jvm).value) .to be_falsy end it 'returns the plugin _site boolean' do - expect(Facter.fact(:elasticsearch_9200_plugin_kopf_site).value) + expect(Facter.fact(:elasticsearch_plugin_kopf_site).value) .to be_truthy end end # of describe plugin end # of describe instance end # of describe elasticsearch facts diff --git a/spec/unit/provider/elasticsearch_keystore/elasticsearch_keystore_spec.rb b/spec/unit/provider/elasticsearch_keystore/elasticsearch_keystore_spec.rb index 341d79e..a679b50 100644 --- a/spec/unit/provider/elasticsearch_keystore/elasticsearch_keystore_spec.rb +++ b/spec/unit/provider/elasticsearch_keystore/elasticsearch_keystore_spec.rb @@ -1,157 +1,161 @@ require 'spec_helper_rspec' shared_examples 'keystore instance' do |instance| describe "instance #{instance}" do subject { described_class.instances.find { |x| x.name == instance } } it { expect(subject.exists?).to be_truthy } it { expect(subject.name).to eq(instance) } it { expect(subject.settings) .to eq(['node.name', 'cloud.aws.access_key']) } end end describe Puppet::Type.type(:elasticsearch_keystore).provider(:elasticsearch_keystore) do let(:executable) { '/usr/share/elasticsearch/bin/elasticsearch-keystore' } let(:instances) { [] } before do Facter.clear Facter.add('osfamily') { setcode { 'Debian' } } allow(described_class) .to receive(:command) .with(:keystore) .and_return(executable) allow(File).to receive(:exist?) .with('/etc/elasticsearch/scripts/elasticsearch.keystore') .and_return(false) end describe 'instances' do before do allow(Dir).to receive(:[]) .with('/etc/elasticsearch/*') .and_return((['scripts'] + instances).map do |directory| "/etc/elasticsearch/#{directory}" end) instances.each do |instance| instance_dir = "/etc/elasticsearch/#{instance}" defaults_file = "/etc/default/elasticsearch-#{instance}" allow(File).to receive(:exist?) .with("#{instance_dir}/elasticsearch.keystore") .and_return(true) expect(described_class) .to receive(:execute) .with( [executable, 'list'], :custom_environment => { 'ES_INCLUDE' => defaults_file, 'ES_PATH_CONF' => "/etc/elasticsearch/#{instance}" }, - :uid => 'elasticsearch', :gid => 'elasticsearch' + :uid => 'elasticsearch', + :gid => 'elasticsearch', + :failonfail => true ) .and_return( Puppet::Util::Execution::ProcessOutput.new( "node.name\ncloud.aws.access_key\n", 0 ) ) end end it 'should have an instance method' do expect(described_class).to respond_to(:instances) end context 'without any keystores' do it 'should return no resources' do expect(described_class.instances.size).to eq(0) end end context 'with one instance' do let(:instances) { ['es-01'] } it { expect(described_class.instances.length).to eq(instances.length) } include_examples 'keystore instance', 'es-01' end context 'with multiple instances' do let(:instances) { ['es-01', 'es-02'] } it { expect(described_class.instances.length).to eq(instances.length) } include_examples 'keystore instance', 'es-01' include_examples 'keystore instance', 'es-02' end end # of describe instances describe 'prefetch' do it 'should have a prefetch method' do expect(described_class).to respond_to :prefetch end end describe 'flush' do let(:provider) { described_class.new(:name => 'es-03') } let(:resource) do Puppet::Type.type(:elasticsearch_keystore).new( :name => 'es-03', :provider => provider ) end it 'creates the keystore' do expect(described_class).to( receive(:execute) .with( [executable, 'create'], :custom_environment => { 'ES_INCLUDE' => '/etc/default/elasticsearch-es-03', 'ES_PATH_CONF' => '/etc/elasticsearch/es-03' }, - :uid => 'elasticsearch', :gid => 'elasticsearch' + :uid => 'elasticsearch', + :gid => 'elasticsearch', + :failonfail => true ) .and_return(Puppet::Util::Execution::ProcessOutput.new('', 0)) ) resource[:ensure] = :present provider.create provider.flush end it 'deletes the keystore' do expect(File).to( receive(:delete) .with(File.join(%w[/ etc elasticsearch es-03 elasticsearch.keystore])) ) resource[:ensure] = :absent provider.destroy provider.flush end it 'updates settings' do settings = { 'cloud.aws.access_key' => 'AKIAFOOBARFOOBAR', 'cloud.aws.secret_key' => 'AKIAFOOBARFOOBAR' } settings.each do |setting, value| expect(provider.class).to( receive(:run_keystore) .with(['add', '--force', '--stdin', setting], 'es-03', '/etc/elasticsearch', value) .and_return(Puppet::Util::Execution::ProcessOutput.new('', 0)) ) end # Note that the settings hash is passed in wrapped in an array to mimic # the behavior in real-world puppet runs. resource[:ensure] = :present resource[:settings] = [settings] provider.settings = [settings] provider.flush end end # of describe flush end # of describe Puppet::Type elasticsearch_keystore diff --git a/spec/unit/provider/elasticsearch_license/all_spec.rb b/spec/unit/provider/elasticsearch_license/all_spec.rb deleted file mode 100644 index 8018274..0000000 --- a/spec/unit/provider/elasticsearch_license/all_spec.rb +++ /dev/null @@ -1,66 +0,0 @@ -require_relative '../../../helpers/unit/provider/elasticsearch_rest_shared_examples' - -{ - 'shield' => 'license', - 'xpack' => 'xpack/license' -}.each_pair do |plugin, endpoint| - describe Puppet::Type.type(:elasticsearch_license).provider(plugin.to_sym) do - let(:name) { plugin } - - let(:example_1) do - { - :name => plugin, - :ensure => :present, - :provider => plugin.to_sym, - :content => { - 'license' => { - 'status' => 'active', - 'uid' => 'cbff45e7-c553-41f7-ae4f-9205eabd80xx', - 'type' => 'trial', - 'issue_date' => '2018-02-22T23:12:05.550Z', - 'issue_date_in_millis' => 1_519_341_125_550, - 'expiry_date' => '2018-03-24T23:12:05.550Z', - 'expiry_date_in_millis' => 1_521_933_125_550, - 'max_nodes' => 1_000, - 'issued_to' => 'test', - 'issuer' => 'elasticsearch', - 'start_date_in_millis' => 1_513_814_400_000 - } - } - } - end - - let(:json_1) do - { - 'license' => { - 'status' => 'active', - 'uid' => 'cbff45e7-c553-41f7-ae4f-9205eabd80xx', - 'type' => 'trial', - 'issue_date' => '2018-02-22T23:12:05.550Z', - 'issue_date_in_millis' => '1519341125550', - 'expiry_date' => '2018-03-24T23:12:05.550Z', - 'expiry_date_in_millis' => '1521933125550', - 'max_nodes' => '1000', - 'issued_to' => 'test', - 'issuer' => 'elasticsearch', - 'start_date_in_millis' => '1513814400000' - } - } - end - - let(:resource) { Puppet::Type::Elasticsearch_index.new props } - let(:provider) { described_class.new resource } - let(:props) do - { - :name => name, - :settings => { - 'index' => { - 'number_of_replicas' => 0 - } - } - } - end - - include_examples 'REST API', endpoint, nil, true - end -end diff --git a/spec/unit/provider/elasticsearch_license/xpack_spec.rb b/spec/unit/provider/elasticsearch_license/xpack_spec.rb new file mode 100644 index 0000000..978b3bd --- /dev/null +++ b/spec/unit/provider/elasticsearch_license/xpack_spec.rb @@ -0,0 +1,61 @@ +require_relative '../../../helpers/unit/provider/elasticsearch_rest_shared_examples' + +describe Puppet::Type.type(:elasticsearch_license).provider(:xpack) do + let(:name) { 'xpack' } + + let(:example_1) do + { + :name => 'xpack', + :ensure => :present, + :provider => :xpack, + :content => { + 'license' => { + 'status' => 'active', + 'uid' => 'cbff45e7-c553-41f7-ae4f-9205eabd80xx', + 'type' => 'trial', + 'issue_date' => '2018-02-22T23:12:05.550Z', + 'issue_date_in_millis' => 1_519_341_125_550, + 'expiry_date' => '2018-03-24T23:12:05.550Z', + 'expiry_date_in_millis' => 1_521_933_125_550, + 'max_nodes' => 1_000, + 'issued_to' => 'test', + 'issuer' => 'elasticsearch', + 'start_date_in_millis' => 1_513_814_400_000 + } + } + } + end + + let(:json_1) do + { + 'license' => { + 'status' => 'active', + 'uid' => 'cbff45e7-c553-41f7-ae4f-9205eabd80xx', + 'type' => 'trial', + 'issue_date' => '2018-02-22T23:12:05.550Z', + 'issue_date_in_millis' => '1519341125550', + 'expiry_date' => '2018-03-24T23:12:05.550Z', + 'expiry_date_in_millis' => '1521933125550', + 'max_nodes' => '1000', + 'issued_to' => 'test', + 'issuer' => 'elasticsearch', + 'start_date_in_millis' => '1513814400000' + } + } + end + + let(:resource) { Puppet::Type::Elasticsearch_index.new props } + let(:provider) { described_class.new resource } + let(:props) do + { + :name => name, + :settings => { + 'index' => { + 'number_of_replicas' => 0 + } + } + } + end + + include_examples 'REST API', 'xpack/license', nil, true +end diff --git a/spec/unit/provider/elasticsearch_plugin/plugin_spec.rb b/spec/unit/provider/elasticsearch_plugin/plugin_spec.rb deleted file mode 100644 index 08365ef..0000000 --- a/spec/unit/provider/elasticsearch_plugin/plugin_spec.rb +++ /dev/null @@ -1,23 +0,0 @@ -require_relative 'shared_examples' - -provider_class = Puppet::Type.type(:elasticsearch_plugin).provider(:plugin) - -describe provider_class do - let(:resource_name) { 'lmenezes/elasticsearch-kopf' } - let(:resource) do - Puppet::Type.type(:elasticsearch_plugin).new( - :name => resource_name, - :ensure => :present, - :provider => 'plugin' - ) - end - let(:provider) do - provider = provider_class.new - provider.resource = resource - provider - end - let(:klass) { provider_class } - - include_examples 'plugin provider', '1.7.0' - include_examples 'plugin provider', '2.0.0' -end diff --git a/spec/unit/provider/elasticsearch_plugin/elasticsearch_plugin_spec.rb b/spec/unit/provider/elasticsearch_plugin/ruby_spec.rb similarity index 92% rename from spec/unit/provider/elasticsearch_plugin/elasticsearch_plugin_spec.rb rename to spec/unit/provider/elasticsearch_plugin/ruby_spec.rb index 090b709..d3e3796 100644 --- a/spec/unit/provider/elasticsearch_plugin/elasticsearch_plugin_spec.rb +++ b/spec/unit/provider/elasticsearch_plugin/ruby_spec.rb @@ -1,23 +1,23 @@ require_relative 'shared_examples' provider_class = Puppet::Type.type(:elasticsearch_plugin).provider(:elasticsearch_plugin) describe provider_class do let(:resource_name) { 'lmenezes/elasticsearch-kopf' } let(:resource) do Puppet::Type.type(:elasticsearch_plugin).new( :name => resource_name, :ensure => :present, :provider => 'elasticsearch_plugin' ) end let(:provider) do provider = provider_class.new provider.resource = resource provider end let(:shortname) { provider.plugin_name(resource_name) } let(:klass) { provider_class } - include_examples 'plugin provider', '5.0.1' + include_examples 'plugin provider', '7.0.0' end diff --git a/spec/unit/provider/elasticsearch_plugin/shared_examples.rb b/spec/unit/provider/elasticsearch_plugin/shared_examples.rb index 250ba0f..094390a 100644 --- a/spec/unit/provider/elasticsearch_plugin/shared_examples.rb +++ b/spec/unit/provider/elasticsearch_plugin/shared_examples.rb @@ -1,171 +1,147 @@ require 'spec_helper_rspec' shared_examples 'plugin provider' do |version| describe "elasticsearch #{version}" do before(:each) do allow(File).to receive(:open) allow(provider).to receive(:es_version).and_return version end describe 'setup' do it 'installs with default parameters' do expect(provider).to receive(:plugin).with( ['install', resource_name].tap do |args| if Puppet::Util::Package.versioncmp(version, '2.2.0') >= 0 args.insert 1, '--batch' end end ) provider.create end it 'installs via URLs' do resource[:url] = 'http://url/to/my/plugin.zip' expect(provider).to receive(:plugin).with( ['install'] + ['http://url/to/my/plugin.zip'].tap do |args| args.unshift('kopf', '--url') if version.start_with? '1' if Puppet::Util::Package.versioncmp(version, '2.2.0') >= 0 args.unshift '--batch' end args end ) provider.create end it 'installs with a local file' do resource[:source] = '/tmp/plugin.zip' expect(provider).to receive(:plugin).with( ['install'] + ['file:///tmp/plugin.zip'].tap do |args| args.unshift('kopf', '--url') if version.start_with? '1' if Puppet::Util::Package.versioncmp(version, '2.2.0') >= 0 args.unshift '--batch' end args end ) provider.create end describe 'proxying' do it 'installs behind a proxy' do resource[:proxy] = 'http://localhost:3128' - if version.start_with? '2' - expect(provider) - .to receive(:plugin) - .with([ - '-Dhttp.proxyHost=localhost', - '-Dhttp.proxyPort=3128', - '-Dhttps.proxyHost=localhost', - '-Dhttps.proxyPort=3128', - 'install', - resource_name - ]) - provider.create - else - expect(provider.with_environment do - ENV['ES_JAVA_OPTS'] - end).to eq([ + expect(provider) + .to receive(:plugin) + .with([ '-Dhttp.proxyHost=localhost', '-Dhttp.proxyPort=3128', '-Dhttps.proxyHost=localhost', - '-Dhttps.proxyPort=3128' - ].join(' ')) - end + '-Dhttps.proxyPort=3128', + 'install', + '--batch', + resource_name + ]) + provider.create end it 'uses authentication credentials' do resource[:proxy] = 'http://elastic:password@es.local:8080' - if version.start_with? '2' - expect(provider) - .to receive(:plugin) - .with([ - '-Dhttp.proxyHost=es.local', - '-Dhttp.proxyPort=8080', - '-Dhttp.proxyUser=elastic', - '-Dhttp.proxyPassword=password', - '-Dhttps.proxyHost=es.local', - '-Dhttps.proxyPort=8080', - '-Dhttps.proxyUser=elastic', - '-Dhttps.proxyPassword=password', - 'install', - resource_name - ]) - provider.create - else - expect(provider.with_environment do - ENV['ES_JAVA_OPTS'] - end).to eq([ + expect(provider) + .to receive(:plugin) + .with([ '-Dhttp.proxyHost=es.local', '-Dhttp.proxyPort=8080', '-Dhttp.proxyUser=elastic', '-Dhttp.proxyPassword=password', '-Dhttps.proxyHost=es.local', '-Dhttps.proxyPort=8080', '-Dhttps.proxyUser=elastic', - '-Dhttps.proxyPassword=password' - ].join(' ')) - end + '-Dhttps.proxyPassword=password', + 'install', + '--batch', + resource_name + ]) + provider.create end end describe 'configdir' do it 'sets the ES_PATH_CONF env var' do resource[:configdir] = '/etc/elasticsearch' expect(provider.with_environment do ENV['ES_PATH_CONF'] end).to eq('/etc/elasticsearch') end end end # of setup describe 'java_opts' do it 'uses authentication credentials' do resource[:java_opts] = ['-Des.plugins.staging=4a2ffaf5'] expect(provider.with_environment do ENV['ES_JAVA_OPTS'] end).to eq('-Des.plugins.staging=4a2ffaf5') end end describe 'java_home' do it 'sets the JAVA_HOME env var' do resource[:java_home] = '/opt/foo' expect(provider.with_environment do ENV['JAVA_HOME'] end).to eq('/opt/foo') end end describe 'java_home unset' do - existing_java_home = ENV['JAVA_HOME'] - it 'does not change JAVA_HOME env var' do + elasticsearch_java_home = '/usr/share/elasticsearch/jdk' + it 'defaults to the elasticsearch bundled JDK' do resource[:java_home] = '' expect(provider.with_environment do ENV['JAVA_HOME'] - end).to eq(existing_java_home) + end).to eq(elasticsearch_java_home) end end describe 'plugin_name' do let(:resource_name) { 'appbaseio/dejaVu' } it 'maintains mixed-case names' do expect(provider.plugin_path).to include('dejaVu') end end describe 'removal' do it 'uninstalls the plugin' do expect(provider).to receive(:plugin).with( ['remove', resource_name.split('-').last] ) provider.destroy end end end end diff --git a/spec/unit/provider/elasticsearch_role/all_spec.rb b/spec/unit/provider/elasticsearch_role/all_spec.rb deleted file mode 100644 index 7ce018c..0000000 --- a/spec/unit/provider/elasticsearch_role/all_spec.rb +++ /dev/null @@ -1,61 +0,0 @@ -require 'spec_helper_rspec' - -[:oss_xpack, :shield, :xpack].each do |provider| - describe Puppet::Type.type(:elasticsearch_role).provider(provider) do - describe 'instances' do - it 'should have an instance method' do - expect(described_class).to respond_to :instances - end - - context 'with no roles' do - it 'should return no resources' do - expect(described_class.parse("\n")).to eq([]) - end - end - - context 'with one role' do - it 'should return one resource' do - expect(described_class.parse(%( - admin: - cluster: all - indices: - '*': all - ))[0]).to eq( - :ensure => :present, - :name => 'admin', - :privileges => { - 'cluster' => 'all', - 'indices' => { - '*' => 'all' - } - } - ) - end - end - - context 'with multiple roles' do - it 'should return three resources' do - expect(described_class.parse(%( - admin: - cluster: all - indices: - '*': all - user: - indices: - '*': read - power_user: - cluster: monitor - indices: - '*': all - )).length).to eq(3) - end - end - end # of describe instances - - describe 'prefetch' do - it 'should have a prefetch method' do - expect(described_class).to respond_to :prefetch - end - end - end # of describe puppet type -end diff --git a/spec/unit/provider/elasticsearch_role/ruby_spec.rb b/spec/unit/provider/elasticsearch_role/ruby_spec.rb new file mode 100644 index 0000000..bbaafed --- /dev/null +++ b/spec/unit/provider/elasticsearch_role/ruby_spec.rb @@ -0,0 +1,59 @@ +require 'spec_helper_rspec' + +describe Puppet::Type.type(:elasticsearch_role).provider(:ruby) do + describe 'instances' do + it 'should have an instance method' do + expect(described_class).to respond_to :instances + end + + context 'with no roles' do + it 'should return no resources' do + expect(described_class.parse("\n")).to eq([]) + end + end + + context 'with one role' do + it 'should return one resource' do + expect(described_class.parse(%( + admin: + cluster: all + indices: + '*': all + ))[0]).to eq( + :ensure => :present, + :name => 'admin', + :privileges => { + 'cluster' => 'all', + 'indices' => { + '*' => 'all' + } + } + ) + end + end + + context 'with multiple roles' do + it 'should return three resources' do + expect(described_class.parse(%( + admin: + cluster: all + indices: + '*': all + user: + indices: + '*': read + power_user: + cluster: monitor + indices: + '*': all + )).length).to eq(3) + end + end + end # of describe instances + + describe 'prefetch' do + it 'should have a prefetch method' do + expect(described_class).to respond_to :prefetch + end + end +end diff --git a/spec/unit/provider/elasticsearch_role_mapping/all_spec.rb b/spec/unit/provider/elasticsearch_role_mapping/all_spec.rb deleted file mode 100644 index 095f5ef..0000000 --- a/spec/unit/provider/elasticsearch_role_mapping/all_spec.rb +++ /dev/null @@ -1,53 +0,0 @@ -require 'spec_helper_rspec' - -[:oss_xpack, :shield, :xpack].each do |provider| - describe Puppet::Type.type(:elasticsearch_role_mapping).provider(provider) do - describe 'instances' do - it 'should have an instance method' do - expect(described_class).to respond_to :instances - end - - context 'with no roles' do - it 'should return no resources' do - expect(described_class.parse("\n")).to eq([]) - end - end - - context 'with one role' do - it 'should return one resource' do - expect(described_class.parse(%( - admin: - - "cn=users,dc=example,dc=com" - ))[0]).to eq( - :ensure => :present, - :name => 'admin', - :mappings => [ - 'cn=users,dc=example,dc=com' - ] - ) - end - end - - context 'with multiple roles' do - it 'should return three resources' do - expect(described_class.parse(%( - admin: - - "cn=users,dc=example,dc=com" - user: - - "cn=users,dc=example,dc=com" - - "cn=admins,dc=example,dc=com" - - "cn=John Doe,cn=other users,dc=example,dc=com" - power_user: - - "cn=admins,dc=example,dc=com" - )).length).to eq(3) - end - end - end # of describe instances - - describe 'prefetch' do - it 'should have a prefetch method' do - expect(described_class).to respond_to :prefetch - end - end - end # of describe puppet type -end diff --git a/spec/unit/provider/elasticsearch_role_mapping/ruby_spec.rb b/spec/unit/provider/elasticsearch_role_mapping/ruby_spec.rb new file mode 100644 index 0000000..f2af032 --- /dev/null +++ b/spec/unit/provider/elasticsearch_role_mapping/ruby_spec.rb @@ -0,0 +1,51 @@ +require 'spec_helper_rspec' + +describe Puppet::Type.type(:elasticsearch_role_mapping).provider(:ruby) do + describe 'instances' do + it 'should have an instance method' do + expect(described_class).to respond_to :instances + end + + context 'with no roles' do + it 'should return no resources' do + expect(described_class.parse("\n")).to eq([]) + end + end + + context 'with one role' do + it 'should return one resource' do + expect(described_class.parse(%( + admin: + - "cn=users,dc=example,dc=com" + ))[0]).to eq( + :ensure => :present, + :name => 'admin', + :mappings => [ + 'cn=users,dc=example,dc=com' + ] + ) + end + end + + context 'with multiple roles' do + it 'should return three resources' do + expect(described_class.parse(%( + admin: + - "cn=users,dc=example,dc=com" + user: + - "cn=users,dc=example,dc=com" + - "cn=admins,dc=example,dc=com" + - "cn=John Doe,cn=other users,dc=example,dc=com" + power_user: + - "cn=admins,dc=example,dc=com" + )).length).to eq(3) + end + end + end # of describe instances + + describe 'prefetch' do + it 'should have a prefetch method' do + expect(described_class).to respond_to :prefetch + end + end +end diff --git a/spec/unit/provider/elasticsearch_user/all_spec.rb b/spec/unit/provider/elasticsearch_user/all_spec.rb deleted file mode 100644 index 3f70bb9..0000000 --- a/spec/unit/provider/elasticsearch_user/all_spec.rb +++ /dev/null @@ -1,65 +0,0 @@ -require 'spec_helper_rspec' - -[:elasticsearch_users, :esusers, :users].each do |provider| - describe Puppet::Type.type(:elasticsearch_user).provider(provider) do - describe 'instances' do - it 'should have an instance method' do - expect(described_class).to respond_to :instances - end - - context 'without users' do - before do - expect(described_class).to receive(:command_with_path).with('list').and_return( - 'No users found' - ) - end - - it 'should return no resources' do - expect(described_class.instances.size).to eq(0) - end - end - - context 'with one user' do - before do - expect(described_class).to receive(:command_with_path).with('list').and_return( - 'elastic : admin*,power_user' - ) - end - - it 'should return one resource' do - expect(described_class.instances[0].instance_variable_get( - '@property_hash' - )).to eq( - :ensure => :present, - :name => 'elastic', - :provider => provider - ) - end - end - - context 'with multiple users' do - before do - expect(described_class).to receive( - :command_with_path - ).with('list').and_return( - <<-EOL - elastic : admin* - logstash : user - kibana : kibana - EOL - ) - end - - it 'should return three resources' do - expect(described_class.instances.length).to eq(3) - end - end - end # of describe instances - - describe 'prefetch' do - it 'should have a prefetch method' do - expect(described_class).to respond_to :prefetch - end - end - end # of describe puppet type -end diff --git a/spec/unit/provider/elasticsearch_user/ruby_spec.rb b/spec/unit/provider/elasticsearch_user/ruby_spec.rb new file mode 100644 index 0000000..627c854 --- /dev/null +++ b/spec/unit/provider/elasticsearch_user/ruby_spec.rb @@ -0,0 +1,63 @@ +require 'spec_helper_rspec' + +describe Puppet::Type.type(:elasticsearch_user).provider(:ruby) do + describe 'instances' do + it 'should have an instance method' do + expect(described_class).to respond_to :instances + end + + context 'without users' do + before do + expect(described_class).to receive(:command_with_path).with('list').and_return( + 'No users found' + ) + end + + it 'should return no resources' do + expect(described_class.instances.size).to eq(0) + end + end + + context 'with one user' do + before do + expect(described_class).to receive(:command_with_path).with('list').and_return( + 'elastic : admin*,power_user' + ) + end + + it 'should return one resource' do + expect(described_class.instances[0].instance_variable_get( + '@property_hash' + )).to eq( + :ensure => :present, + :name => 'elastic', + :provider => :ruby + ) + end + end + + context 'with multiple users' do + before do + expect(described_class).to receive( + :command_with_path + ).with('list').and_return( + <<-EOL + elastic : admin* + logstash : user + kibana : kibana + EOL + ) + end + + it 'should return three resources' do + expect(described_class.instances.length).to eq(3) + end + end + end # of describe instances + + describe 'prefetch' do + it 'should have a prefetch method' do + expect(described_class).to respond_to :prefetch + end + end +end diff --git a/spec/unit/provider/elasticsearch_user_file/all_spec.rb b/spec/unit/provider/elasticsearch_user_file/all_spec.rb deleted file mode 100644 index 8bd1ea9..0000000 --- a/spec/unit/provider/elasticsearch_user_file/all_spec.rb +++ /dev/null @@ -1,46 +0,0 @@ -require 'spec_helper_rspec' - -[:oss_xpack, :shield, :xpack].each do |provider| - describe Puppet::Type.type(:elasticsearch_user_file).provider(provider) do - describe 'instances' do - it 'should have an instance method' do - expect(described_class).to respond_to :instances - end - - context 'without users' do - it 'should return no resources' do - expect(described_class.parse("\n")).to eq([]) - end - end - - context 'with one user' do - it 'should return one resource' do - expect(described_class.parse(%( - elastic:$2a$10$DddrTs0PS3qNknUTq0vpa.g.0JpU.jHDdlKp1xox1W5ZHX.w8Cc8C - ).gsub(/^\s+/, ''))[0]).to eq( - :name => 'elastic', - :hashed_password => '$2a$10$DddrTs0PS3qNknUTq0vpa.g.0JpU.jHDdlKp1xox1W5ZHX.w8Cc8C', - :record_type => provider - ) - end - end - - context 'with multiple users' do - it 'should return three resources' do - expect(described_class.parse(%( - - admin:$2a$10$DddrTs0PS3qNknUTq0vpa.g.0JpU.jHDdlKp1xox1W5ZHX.w8Cc8C - user:$2a$10$caYr8GhYeJ2Yo0yEhQhQvOjLSwt8Lm6MKQWx8WSnZ/L/IL5sGdQFu - kibana:$2a$10$daYr8GhYeJ2Yo0yEhQhQvOjLSwt8Lm6MKQWx8WSnZ/L/IL5sGdQFu - ).gsub(/^\s+/, '')).length).to eq(3) - end - end - end # of describe instances - - describe 'prefetch' do - it 'should have a prefetch method' do - expect(described_class).to respond_to :prefetch - end - end - end # of describe puppet type -end diff --git a/spec/unit/provider/elasticsearch_user_file/ruby_spec.rb b/spec/unit/provider/elasticsearch_user_file/ruby_spec.rb new file mode 100644 index 0000000..78dfc65 --- /dev/null +++ b/spec/unit/provider/elasticsearch_user_file/ruby_spec.rb @@ -0,0 +1,44 @@ +require 'spec_helper_rspec' + +describe Puppet::Type.type(:elasticsearch_user_file).provider(:ruby) do + describe 'instances' do + it 'should have an instance method' do + expect(described_class).to respond_to :instances + end + + context 'without users' do + it 'should return no resources' do + expect(described_class.parse("\n")).to eq([]) + end + end + + context 'with one user' do + it 'should return one resource' do + expect(described_class.parse(%( + elastic:$2a$10$DddrTs0PS3qNknUTq0vpa.g.0JpU.jHDdlKp1xox1W5ZHX.w8Cc8C + ).gsub(/^\s+/, ''))[0]).to eq( + :name => 'elastic', + :hashed_password => '$2a$10$DddrTs0PS3qNknUTq0vpa.g.0JpU.jHDdlKp1xox1W5ZHX.w8Cc8C', + :record_type => :ruby + ) + end + end + + context 'with multiple users' do + it 'should return three resources' do + expect(described_class.parse(%( + + admin:$2a$10$DddrTs0PS3qNknUTq0vpa.g.0JpU.jHDdlKp1xox1W5ZHX.w8Cc8C + user:$2a$10$caYr8GhYeJ2Yo0yEhQhQvOjLSwt8Lm6MKQWx8WSnZ/L/IL5sGdQFu + kibana:$2a$10$daYr8GhYeJ2Yo0yEhQhQvOjLSwt8Lm6MKQWx8WSnZ/L/IL5sGdQFu + ).gsub(/^\s+/, '')).length).to eq(3) + end + end + end # of describe instances + + describe 'prefetch' do + it 'should have a prefetch method' do + expect(described_class).to respond_to :prefetch + end + end +end diff --git a/spec/unit/provider/elasticsearch_user_roles/all_spec.rb b/spec/unit/provider/elasticsearch_user_roles/all_spec.rb deleted file mode 100644 index e79808a..0000000 --- a/spec/unit/provider/elasticsearch_user_roles/all_spec.rb +++ /dev/null @@ -1,46 +0,0 @@ -require 'spec_helper_rspec' - -[:oss_xpack, :shield, :xpack].each do |provider| - describe Puppet::Type.type(:elasticsearch_user_roles) - .provider(provider) do - describe 'instances' do - it 'should have an instance method' do - expect(described_class).to respond_to :instances - end - - context 'without roles' do - it 'should return no resources' do - expect(described_class.parse("\n")).to eq([]) - end - end - - context 'with one user' do - it 'should return one resource' do - expect(described_class.parse(%( - admin:elastic - power_user:elastic - ))[0]).to eq( - :name => 'elastic', - :roles => %w[admin power_user] - ) - end - end - - context 'with multiple users' do - it 'should return three resources' do - expect(described_class.parse(%( - admin:elastic - logstash:user - kibana:kibana - )).length).to eq(3) - end - end - end # of describe instances - - describe 'prefetch' do - it 'should have a prefetch method' do - expect(described_class).to respond_to :prefetch - end - end - end # of describe puppet type -end diff --git a/spec/unit/provider/elasticsearch_user_roles/ruby_spec.rb b/spec/unit/provider/elasticsearch_user_roles/ruby_spec.rb new file mode 100644 index 0000000..2effbd8 --- /dev/null +++ b/spec/unit/provider/elasticsearch_user_roles/ruby_spec.rb @@ -0,0 +1,44 @@ +require 'spec_helper_rspec' + +describe Puppet::Type.type(:elasticsearch_user_roles) + .provider(:ruby) do + describe 'instances' do + it 'should have an instance method' do + expect(described_class).to respond_to :instances + end + + context 'without roles' do + it 'should return no resources' do + expect(described_class.parse("\n")).to eq([]) + end + end + + context 'with one user' do + it 'should return one resource' do + expect(described_class.parse(%( + admin:elastic + power_user:elastic + ))[0]).to eq( + :name => 'elastic', + :roles => %w[admin power_user] + ) + end + end + + context 'with multiple users' do + it 'should return three resources' do + expect(described_class.parse(%( + admin:elastic + logstash:user + kibana:kibana + )).length).to eq(3) + end + end + end # of describe instances + + describe 'prefetch' do + it 'should have a prefetch method' do + expect(described_class).to respond_to :prefetch + end + end +end diff --git a/spec/unit/type/elasticsearch_license_spec.rb b/spec/unit/type/elasticsearch_license_spec.rb index 647cc77..2ddbdd1 100644 --- a/spec/unit/type/elasticsearch_license_spec.rb +++ b/spec/unit/type/elasticsearch_license_spec.rb @@ -1,77 +1,75 @@ require_relative '../../helpers/unit/type/elasticsearch_rest_shared_examples' describe Puppet::Type.type(:elasticsearch_license) do - %w[x-pack shield].each do |plugin| - let(:resource_name) { plugin } + let(:resource_name) { 'license' } - include_examples 'REST API types', 'license', :content + include_examples 'REST API types', 'license', :content - describe "license for #{plugin}" do - let(:resource) do - described_class.new( - :name => resource_name, - :ensure => 'present', - :content => { - 'license' => { - 'uid' => 'cbff45e7-c553-41f7-ae4f-9205eabd80xx', - 'type' => 'trial', - 'issue_date_in_millis' => '1519341125550', - 'expiry_date_in_millis' => '1521933125550', - 'max_nodes' => '1000', - 'issued_to' => 'test', - 'issuer' => 'elasticsearch', - 'signature' => 'secretvalue', - 'start_date_in_millis' => '1513814400000' - } + describe 'license' do + let(:resource) do + described_class.new( + :name => resource_name, + :ensure => 'present', + :content => { + 'license' => { + 'uid' => 'cbff45e7-c553-41f7-ae4f-9205eabd80xx', + 'type' => 'trial', + 'issue_date_in_millis' => '1519341125550', + 'expiry_date_in_millis' => '1521933125550', + 'max_nodes' => '1000', + 'issued_to' => 'test', + 'issuer' => 'elasticsearch', + 'signature' => 'secretvalue', + 'start_date_in_millis' => '1513814400000' } - ) - end + } + ) + end - let(:content) { resource.property(:content) } + let(:content) { resource.property(:content) } - describe 'insync?' do - let(:is_content) do - { - 'license' => { - 'status' => 'active', - 'uid' => 'cbff45e7-c553-41f7-ae4f-9205eabd80xx', - 'type' => 'trial', - 'issue_date' => '2018-02-22T23:12:05.550Z', - 'issue_date_in_millis' => 1_519_341_125_550, - 'expiry_date' => '2018-03-24T23:12:05.550Z', - 'expiry_date_in_millis' => 1_521_933_125_550, - 'max_nodes' => 1_000, - 'issued_to' => 'test', - 'issuer' => 'elasticsearch', - 'start_date_in_millis' => 1_513_814_400_000 - } + describe 'insync?' do + let(:is_content) do + { + 'license' => { + 'status' => 'active', + 'uid' => 'cbff45e7-c553-41f7-ae4f-9205eabd80xx', + 'type' => 'trial', + 'issue_date' => '2018-02-22T23:12:05.550Z', + 'issue_date_in_millis' => 1_519_341_125_550, + 'expiry_date' => '2018-03-24T23:12:05.550Z', + 'expiry_date_in_millis' => 1_521_933_125_550, + 'max_nodes' => 1_000, + 'issued_to' => 'test', + 'issuer' => 'elasticsearch', + 'start_date_in_millis' => 1_513_814_400_000 } - end + } + end - describe 'synced properties' do - it 'only enforces defined content' do - expect(content.insync?(is_content)).to be_truthy - end + describe 'synced properties' do + it 'only enforces defined content' do + expect(content.insync?(is_content)).to be_truthy end + end - describe 'out-of-sync property' do - { - 'uid' => 'cbff45e7-c553-41f7-ae4f-xxxxxxxxxxxx', - 'issue_date_in_millis' => '1513814400000', - 'expiry_date_in_millis' => '1533167999999', - 'start_date_in_millis' => '-1' - }.each_pair do |field, value| - let(:changed_content) do - is_content['license'][field] = value - is_content - end + describe 'out-of-sync property' do + { + 'uid' => 'cbff45e7-c553-41f7-ae4f-xxxxxxxxxxxx', + 'issue_date_in_millis' => '1513814400000', + 'expiry_date_in_millis' => '1533167999999', + 'start_date_in_millis' => '-1' + }.each_pair do |field, value| + let(:changed_content) do + is_content['license'][field] = value + is_content + end - it "detection for #{field}" do - expect(content.insync?(changed_content)).to be_falsy - end + it "detection for #{field}" do + expect(content.insync?(changed_content)).to be_falsy end end end end end end diff --git a/spec/unit/type/elasticsearch_plugin_spec.rb b/spec/unit/type/elasticsearch_plugin_spec.rb index 33c58db..4523db7 100644 --- a/spec/unit/type/elasticsearch_plugin_spec.rb +++ b/spec/unit/type/elasticsearch_plugin_spec.rb @@ -1,36 +1,19 @@ require 'spec_helper_rspec' describe Puppet::Type.type(:elasticsearch_plugin) do let(:resource_name) { 'lmenezes/elasticsearch-kopf' } describe 'input validation' do describe 'when validating attributes' do [:configdir, :java_opts, :java_home, :name, :source, :url, :proxy].each do |param| it "should have a #{param} parameter" do expect(described_class.attrtype(param)).to eq(:param) end end it 'should have an ensure property' do expect(described_class.attrtype(:ensure)).to eq(:property) end end end end - -describe Puppet::Type.type(:elasticsearch_plugin).provider(:plugin) do - it 'should install a plugin' do - resource = Puppet::Type.type(:elasticsearch_plugin).new( - :name => 'lmenezes/elasticsearch-kopf', - :ensure => :present - ) - allow(File).to receive(:open) - provider = described_class.new(resource) - allow(provider).to receive(:es_version).and_return '1.7.3' - expect(provider).to receive(:plugin).with([ - 'install', - 'lmenezes/elasticsearch-kopf' - ]) - provider.create - end -end diff --git a/spec/unit/type/elasticsearch_service_file_spec.rb b/spec/unit/type/elasticsearch_service_file_spec.rb deleted file mode 100644 index 930d9c9..0000000 --- a/spec/unit/type/elasticsearch_service_file_spec.rb +++ /dev/null @@ -1,73 +0,0 @@ -require 'spec_helper_rspec' - -describe Puppet::Type.type(:elasticsearch_service_file) do - let(:resource_name) { '/usr/lib/systemd/system/elasticsearch-es-01.service' } - - describe 'attribute validation' do - [ - :name, - :defaults_location, - :group, - :instance, - :homedir, - :memlock, - :nofile, - :nproc, - :package_name, - :pid_dir, - :user - ].each do |param| - it "should have a #{param} parameter" do - expect(described_class.attrtype(param)).to eq(:param) - end - end - - [:content, :ensure].each do |prop| - it "should have a #{prop} property" do - expect(described_class.attrtype(prop)).to eq(:property) - end - end - - describe 'namevar validation' do - it 'should have :name as its namevar' do - expect(described_class.key_attributes).to eq([:name]) - end - end - - describe 'content' do - it 'should accept simple strings' do - expect(described_class.new( - :name => resource_name, - :content => "Test\n" - )[:content]).to eq( - "Test\n" - ) - end - end - - describe 'ensure' do - it 'should support present as a value for ensure' do - expect { described_class.new( - :name => resource_name, - :ensure => :present, - :content => {} - ) }.to_not raise_error - end - - it 'should support absent as a value for ensure' do - expect { described_class.new( - :name => resource_name, - :ensure => :absent - ) }.to_not raise_error - end - - it 'should not support other values' do - expect { described_class.new( - :name => resource_name, - :ensure => :foo, - :content => {} - ) }.to raise_error(Puppet::Error, /Invalid value/) - end - end - end # of describing when validing values -end # of describe Puppet::Type diff --git a/spec/unit/type/elasticsearch_template_spec.rb b/spec/unit/type/elasticsearch_template_spec.rb index 0d50347..abeabf0 100644 --- a/spec/unit/type/elasticsearch_template_spec.rb +++ b/spec/unit/type/elasticsearch_template_spec.rb @@ -1,134 +1,134 @@ require_relative '../../helpers/unit/type/elasticsearch_rest_shared_examples' describe Puppet::Type.type(:elasticsearch_template) do let(:resource_name) { 'test_template' } include_examples 'REST API types', 'template', :content describe 'template attribute validation' do it 'should have a source parameter' do expect(described_class.attrtype(:source)).to eq(:param) end describe 'content and source validation' do it 'should require either "content" or "source"' do expect do described_class.new( :name => resource_name, :ensure => :present ) end.to raise_error(Puppet::Error, /content.*or.*source.*required/) end it 'should fail with both defined' do expect do described_class.new( :name => resource_name, :content => {}, :source => 'puppet:///example.json' ) end.to raise_error(Puppet::Error, /simultaneous/) end it 'should parse source paths into the content property' do file_stub = 'foo' [ Puppet::FileServing::Metadata, Puppet::FileServing::Content ].each do |klass| allow(klass).to receive(:indirection) .and_return(Object) end allow(Object).to receive(:find) .and_return(file_stub) allow(file_stub).to receive(:content) .and_return('{"template":"foobar-*", "order": 1}') expect(described_class.new( :name => resource_name, :source => '/example.json' )[:content]).to include( 'template' => 'foobar-*', 'order' => 1 ) end it 'should qualify settings' do expect(described_class.new( :name => resource_name, :content => { 'settings' => { 'number_of_replicas' => '2', 'index' => { 'number_of_shards' => '3' } } } )[:content]).to eq( 'order' => 0, 'aliases' => {}, 'mappings' => {}, 'settings' => { 'index' => { 'number_of_replicas' => 2, 'number_of_shards' => 3 } } ) end it 'detects flat qualified index settings' do expect(described_class.new( :name => resource_name, :content => { 'settings' => { 'number_of_replicas' => '2', 'index.number_of_shards' => '3' } } )[:content]).to eq( 'order' => 0, 'aliases' => {}, 'mappings' => {}, 'settings' => { 'index' => { 'number_of_replicas' => 2, 'number_of_shards' => 3 } } ) end end end # of describing when validing values describe 'insync?' do # Although users can pass the type a hash structure with any sort of values # - string, integer, or other native datatype - the Elasticsearch API # normalizes all values to strings. In order to verify that the type does # not incorrectly detect changes when values may be in string form, we take # an example template and force all values to strings to mimic what # Elasticsearch does. it 'is idempotent' do def deep_stringify(obj) if obj.is_a? Array obj.map { |element| deep_stringify(element) } elsif obj.is_a? Hash obj.merge(obj) { |_key, val| deep_stringify(val) } elsif [true, false].include? obj obj else obj.to_s end end - json = JSON.parse(File.read('spec/fixtures/templates/post_6.0.json')) + json = JSON.parse(File.read('spec/fixtures/templates/6.x.json')) is_template = described_class.new( :name => resource_name, :ensure => 'present', :content => json ).property(:content) should_template = described_class.new( :name => resource_name, :ensure => 'present', :content => deep_stringify(json) ).property(:content).should expect(is_template.insync?(should_template)).to be_truthy end end end # of describe Puppet::Type diff --git a/spec/unit/type/es_instance_conn_validator_spec.rb b/spec/unit/type/es_instance_conn_validator_spec.rb new file mode 100644 index 0000000..8ef7849 --- /dev/null +++ b/spec/unit/type/es_instance_conn_validator_spec.rb @@ -0,0 +1,88 @@ +require_relative '../../helpers/unit/type/elasticsearch_rest_shared_examples' + +describe Puppet::Type.type(:es_instance_conn_validator) do + let(:resource_name) { 'conn-validator' } + let(:conn_validator) do + Puppet::Type.type(:es_instance_conn_validator) + .new(name: resource_name) + end + + describe 'when validating attributes' do + [:name, :server, :port, :timeout, :sleep_interval].each do |param| + it 'should have a #{param} parameter' do + expect(described_class.attrtype(param)).to eq(:param) + end + end + + [:ensure].each do |prop| + it 'should have a #{prop} property' do + expect(described_class.attrtype(prop)).to eq(:property) + end + end + + describe 'namevar validation' do + it 'should have :name as its namevar' do + expect(described_class.key_attributes).to eq([:name]) + end + end + end # describe when validating attributes + + describe 'when validating values' do + describe 'ensure' do + it 'should support present as a value for ensure' do + expect { described_class.new( + :name => resource_name, + :ensure => :present + ) }.to_not raise_error + end + + it 'should support absent as a value for ensure' do + expect { described_class.new( + :name => resource_name, + :ensure => :absent + ) }.to_not raise_error + end + + it 'should not support other values' do + expect { described_class.new( + :name => resource_name, + :ensure => :foo + ) }.to raise_error(Puppet::Error, /Invalid value/) + end + end # describe 'ensure' + + describe 'timeout' do + it 'should support a numerical value' do + conn_validator[:timeout] = 120 + expect(conn_validator[:timeout]).to eq(120) + end + + it 'should have a default value of 60' do + expect(conn_validator[:timeout]).to eq(60) + end + + it 'should not support a non-numeric value' do + expect do + conn_validator[:timeout] = 'string' + end.to raise_error(Puppet::Error, /invalid value/) + end + end # describe 'timeout' + + describe 'sleep_interval' do + it 'should support a numerical value' do + conn_validator[:sleep_interval] = 120 + expect(conn_validator[:sleep_interval]).to eq(120) + end + + it 'should have a default value of 10' do + expect(conn_validator[:sleep_interval]).to eq(10) + end + + it 'should not support a non-numeric value' do + expect do + conn_validator[:sleep_interval] = 'string' + end.to raise_error(Puppet::Error, /invalid value/) + end + end # describe 'sleep_interval + end # describe 'when valdating values' +end # of describe Puppet::Type diff --git a/templates/etc/elasticsearch/jvm.options.erb b/templates/etc/elasticsearch/jvm.options.erb deleted file mode 100644 index c9b0dde..0000000 --- a/templates/etc/elasticsearch/jvm.options.erb +++ /dev/null @@ -1,42 +0,0 @@ -# This file is managed by Puppet -- <%= @name %> -# -# Set the 'jvm_options' parameter on the elasticsearch class to change this file. -<% -def set_default(options, match_string, default) - options.detect {|o| o.include?(match_string)} || options.push(default) -end - -defaults = { - '-Xms' => '-Xms2g', - '-Xmx' => '-Xmx2g', - 'UseConcMarkSweepGC' => '-XX:+UseConcMarkSweepGC', - 'CMSInitiatingOccupancyFraction=' => '-XX:CMSInitiatingOccupancyFraction=75', - 'UseCMSInitiatingOccupancyOnly' => '-XX:+UseCMSInitiatingOccupancyOnly', - 'AlwaysPreTouch' => '-XX:+AlwaysPreTouch', - 'server' => '-server', - '-Xss' => '-Xss1m', - '-Djava.awt.headless=' => '-Djava.awt.headless=true', - '-Dfile.encoding=' => '-Dfile.encoding=UTF-8', - '-Djna.nosys=' => '-Djna.nosys=true', - 'OmitStackTraceInFastThrow' => '-XX:-OmitStackTraceInFastThrow', - '-Dio.netty.noUnsafe' => '-Dio.netty.noUnsafe=true', - '-Dio.netty.noKeySetOptimization' => '-Dio.netty.noKeySetOptimization=true', - '-Dio.netty.recycler.maxCapacityPerThread' => '-Dio.netty.recycler.maxCapacityPerThread=0', - '-Dlog4j.shutdownHookEnabled' => '-Dlog4j.shutdownHookEnabled=false', - '-Dlog4j2.disable.jmx' => '-Dlog4j2.disable.jmx=true', - 'HeapDumpOnOutOfMemoryError' => '-XX:+HeapDumpOnOutOfMemoryError', - 'PrintGCDetails' => '-XX:+PrintGCDetails', - 'PrintGCDateStamps' => '-XX:+PrintGCDateStamps', - 'PrintTenuringDistribution' => '-XX:+PrintTenuringDistribution', - 'Xloggc' => "-Xloggc:#{@logdir}/gc.log", - 'UseGCLogFileRotation' => '-XX:+UseGCLogFileRotation', - 'NumberOfGCLogFiles' => '-XX:NumberOfGCLogFiles=32', - 'GCLogFileSize' => '-XX:GCLogFileSize=64m', -} -defaults.each {|k,v| set_default(@jvm_options, k, v)} - --%> - -<% @jvm_options.sort.each do |line| -%> -<%= line %> -<% end -%> diff --git a/templates/etc/elasticsearch/log4j2.properties.erb b/templates/etc/elasticsearch/log4j2.properties.erb index 4e4d831..89ba143 100644 --- a/templates/etc/elasticsearch/log4j2.properties.erb +++ b/templates/etc/elasticsearch/log4j2.properties.erb @@ -1,99 +1,99 @@ -status = <%= @logging_level.downcase %> +status = <%= scope['elasticsearch::logging_level'].downcase %> # log action execution errors for easier debugging logger.action.name = org.elasticsearch.action logger.action.level = debug appender.console.type = Console appender.console.name = console appender.console.layout.type = PatternLayout appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n -<%- if @file_rolling_type == 'file' -%> +<%- if scope['elasticsearch::file_rolling_type'] == 'file' -%> appender.fixed.type = File appender.fixed.name = fixed appender.fixed.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log appender.fixed.layout.type = PatternLayout appender.fixed.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n <%- else -%> appender.rolling.type = RollingFile appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log appender.rolling.layout.type = PatternLayout appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n -<%- if @file_rolling_type == 'dailyRollingFile' -%> +<%- if scope['elasticsearch::file_rolling_type'] == 'dailyRollingFile' -%> appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz appender.rolling.policies.type = Policies appender.rolling.policies.time.type = TimeBasedTriggeringPolicy appender.rolling.policies.time.interval = 1 appender.rolling.policies.time.modulate = true -<%- elsif @file_rolling_type == 'rollingFile' -%> +<%- elsif scope['elasticsearch::file_rolling_type'] == 'rollingFile' -%> appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%i.log.gz appender.rolling.policies.type = Policies appender.rolling.policies.size.type = SizeBasedTriggeringPolicy -appender.rolling.policies.size.size = <%= @rolling_file_max_file_size %> +appender.rolling.policies.size.size = <%= scope['elasticsearch::rolling_file_max_file_size'] %> appender.rolling.strategy.type = DefaultRolloverStrategy -appender.rolling.strategy.max = <%= @rolling_file_max_backup_index %> +appender.rolling.strategy.max = <%= scope['elasticsearch::rolling_file_max_backup_index'] %> <%- end -%> <%- end -%> -rootLogger.level = <%= @logging_level.downcase %> +rootLogger.level = <%= scope['elasticsearch::logging_level'].downcase %> rootLogger.appenderRef.console.ref = console -<%- if @file_rolling_type == 'file' -%> +<%- if scope['elasticsearch::file_rolling_type'] == 'file' -%> rootLogger.appenderRef.fixed.ref = fixed <%- else -%> rootLogger.appenderRef.rolling.ref = rolling <%- end -%> appender.deprecation_rolling.type = RollingFile appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log appender.deprecation_rolling.layout.type = PatternLayout appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz appender.deprecation_rolling.policies.type = Policies appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.deprecation_rolling.policies.size.size = 1GB appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy appender.deprecation_rolling.strategy.max = 4 logger.deprecation.name = org.elasticsearch.deprecation -<%- if @deprecation_logging -%> -logger.deprecation.level = <%= @deprecation_logging_level.downcase %> +<%- if scope['elasticsearch::deprecation_logging'] -%> +logger.deprecation.level = <%= scope['elasitcsearch::deprecation_logging_level'].downcase %> <%- else -%> logger.deprecation.level = warn <%- end -%> logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling logger.deprecation.additivity = false appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log appender.index_search_slowlog_rolling.layout.type = PatternLayout appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log appender.index_search_slowlog_rolling.policies.type = Policies appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy appender.index_search_slowlog_rolling.policies.time.interval = 1 appender.index_search_slowlog_rolling.policies.time.modulate = true logger.index_search_slowlog_rolling.name = index.search.slowlog logger.index_search_slowlog_rolling.level = trace logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling logger.index_search_slowlog_rolling.additivity = false appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log appender.index_indexing_slowlog_rolling.layout.type = PatternLayout appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log appender.index_indexing_slowlog_rolling.policies.type = Policies appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy appender.index_indexing_slowlog_rolling.policies.time.interval = 1 appender.index_indexing_slowlog_rolling.policies.time.modulate = true logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling logger.index_indexing_slowlog.additivity = false diff --git a/templates/etc/elasticsearch/logging.yml.erb b/templates/etc/elasticsearch/logging.yml.erb deleted file mode 100644 index 795fa78..0000000 --- a/templates/etc/elasticsearch/logging.yml.erb +++ /dev/null @@ -1,71 +0,0 @@ -# This file is managed by Puppet, do not edit manually, your changes *will* be overwritten! -# -# Please see the source file for context and more information: -# -# https://github.com/elasticsearch/elasticsearch/blob/master/config/logging.yml -# - -es.logger.level: <%= @logging_level %> -rootLogger: <%= @logging_level %>, console, file - -# ----- Configuration set by Puppet --------------------------------------------- - -<% @logging_hash.sort.each do |key,value| %> -logger.<%= key %>: <%= value %> -<% end %> - -<% if @deprecation_logging -%> -logger.deprecation: <%= @deprecation_logging_level %>, deprecation_log_file -<% end -%> - -# ------------------------------------------------------------------------------- - -additivity: - index.search.slowlog: false - index.indexing.slowlog: false - -appender: - console: - type: console - layout: - type: consolePattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - file: - type: <%= @file_rolling_type %> - file: ${path.logs}/${cluster.name}.log - <%- if @file_rolling_type == 'dailyRollingFile' -%> - datePattern: <%= @daily_rolling_date_pattern %> - <%- elsif @file_rolling_type == 'rollingFile' -%> - maxBackupIndex: <%= @rolling_file_max_backup_index %> - maxFileSize: <%= @rolling_file_max_file_size %> - <%- end -%> - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - index_search_slow_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_index_search_slowlog.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - index_indexing_slow_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - -<% if @deprecation_logging -%> - deprecation_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_deprecation.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" -<% end %> diff --git a/templates/etc/sysconfig/defaults.erb b/templates/etc/sysconfig/defaults.erb index 0f16a88..29cd20c 100644 --- a/templates/etc/sysconfig/defaults.erb +++ b/templates/etc/sysconfig/defaults.erb @@ -1,6 +1,6 @@ -<%- unless @new_init_defaults.key? 'CONF_FILE' -%> +<%- unless scope['elasticsearch::config::init_defaults'].key? 'CONF_FILE' -%> rm CONF_FILE <%- end -%> -<% @new_init_defaults.sort.map do |key, value| -%> +<% scope['elasticsearch::config::init_defaults'].sort.map do |key, value| -%> set <%= key %> '<%= value %>' <% end -%>