diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..d77700e --- /dev/null +++ b/.editorconfig @@ -0,0 +1,14 @@ +# editorconfig.org + +# MANAGED BY MODULESYNC + +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 2 +tab_width = 2 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/.fixtures.yml b/.fixtures.yml index d9de351..cd920aa 100644 --- a/.fixtures.yml +++ b/.fixtures.yml @@ -1,20 +1,16 @@ fixtures: repositories: - apt: - repo: "git://github.com/puppetlabs/puppetlabs-apt.git" - ref: 2.3.0 - stdlib: - repo: "git://github.com/puppetlabs/puppetlabs-stdlib.git" - ref: 4.11.0 - epel: - repo: 'https://github.com/stahnma/puppet-module-epel.git' - ref: 1.2.2 - augeasproviders_sysctl: - repo: "git://github.com/hercules-team/augeasproviders_sysctl.git" - tag: v2.1.0 - augeasproviders_core: - repo: "git://github.com/hercules-team/augeasproviders_core.git" - tag: v2.1.0 - symlinks: - redis: "#{source_dir}" - + apt: 'https://github.com/puppetlabs/puppetlabs-apt.git' + stdlib: 'https://github.com/puppetlabs/puppetlabs-stdlib.git' + epel: 'https://github.com/voxpupuli/puppet-epel.git' + augeasproviders_sysctl: 'https://github.com/hercules-team/augeasproviders_sysctl.git' + augeasproviders_core: 'https://github.com/hercules-team/augeasproviders_core.git' + augeas_core: + repo: "https://github.com/puppetlabs/puppetlabs-augeas_core" + puppet_version: ">= 6.0.0" + yumrepo_core: + repo: "https://github.com/puppetlabs/puppetlabs-yumrepo_core" + puppet_version: ">= 6.0.0" + systemd: + repo: 'https://github.com/camptocamp/puppet-systemd.git' + puppet_version: "< 6.1.0" diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 0000000..2240a97 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,281 @@ +# Contribution guidelines + +## Table of contents + +* [Contributing](#contributing) +* [Writing proper commits - short version](#writing-proper-commits-short-version) +* [Writing proper commits - long version](#writing-proper-commits-long-version) +* [Dependencies](#dependencies) + * [Note for OS X users](#note-for-os-x-users) +* [The test matrix](#the-test-matrix) +* [Syntax and style](#syntax-and-style) +* [Running the unit tests](#running-the-unit-tests) +* [Unit tests in docker](#unit-tests-in-docker) +* [Integration tests](#integration-tests) + +This module has grown over time based on a range of contributions from +people using it. If you follow these contributing guidelines your patch +will likely make it into a release a little more quickly. + +## Contributing + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +[Contributor Code of Conduct](https://voxpupuli.org/coc/). + +* Fork the repo. +* Create a separate branch for your change. +* We only take pull requests with passing tests, and documentation. [travis-ci](http://travis-ci.org) runs the tests for us. You can also execute them locally. This is explained [in a later section](#the-test-matrix). +* Checkout [our docs](https://voxpupuli.org/docs/reviewing_pr/) we use to review a module and the [official styleguide](https://puppet.com/docs/puppet/6.0/style_guide.html). They provide some guidance for new code that might help you before you submit a pull request. +* Add a test for your change. Only refactoring and documentation changes require no new tests. If you are adding functionality or fixing a bug, please add a test. +* Squash your commits down into logical components. Make sure to rebase against our current master. +* Push the branch to your fork and submit a pull request. + +Please be prepared to repeat some of these steps as our contributors review your code. + +## Writing proper commits - short version + +* Make commits of logical units. +* Check for unnecessary whitespace with "git diff --check" before committing. +* Commit using Unix line endings (check the settings around "crlf" in git-config(1)). +* Do not check in commented out code or unneeded files. +* The first line of the commit message should be a short description (50 characters is the soft limit, excluding ticket number(s)), and should skip the full stop. +* Associate the issue in the message. The first line should include the issue number in the form "(#XXXX) Rest of message". +* The body should provide a meaningful commit message, which: + *uses the imperative, present tense: `change`, not `changed` or `changes`. + * includes motivation for the change, and contrasts its implementation with the previous behavior. + * Make sure that you have tests for the bug you are fixing, or feature you are adding. + * Make sure the test suites passes after your commit: + * When introducing a new feature, make sure it is properly documented in the README.md + +## Writing proper commits - long version + + 1. Make separate commits for logically separate changes. + + Please break your commits down into logically consistent units + which include new or changed tests relevant to the rest of the + change. The goal of doing this is to make the diff easier to + read for whoever is reviewing your code. In general, the easier + your diff is to read, the more likely someone will be happy to + review it and get it into the code base. + + If you are going to refactor a piece of code, please do so as a + separate commit from your feature or bug fix changes. + + We also really appreciate changes that include tests to make + sure the bug is not re-introduced, and that the feature is not + accidentally broken. + + Describe the technical detail of the change(s). If your + description starts to get too long, that is a good sign that you + probably need to split up your commit into more finely grained + pieces. + + Commits which plainly describe the things which help + reviewers check the patch and future developers understand the + code are much more likely to be merged in with a minimum of + bike-shedding or requested changes. Ideally, the commit message + would include information, and be in a form suitable for + inclusion in the release notes for the version of Puppet that + includes them. + + Please also check that you are not introducing any trailing + whitespace or other "whitespace errors". You can do this by + running "git diff --check" on your changes before you commit. + + 2. Sending your patches + + To submit your changes via a GitHub pull request, we _highly_ + recommend that you have them on a topic branch, instead of + directly on `master`. + It makes things much easier to keep track of, especially if + you decide to work on another thing before your first change + is merged in. + + GitHub has some pretty good + [general documentation](http://help.github.com/) on using + their site. They also have documentation on + [creating pull requests](http://help.github.com/send-pull-requests/). + + In general, after pushing your topic branch up to your + repository on GitHub, you can switch to the branch in the + GitHub UI and click "Pull Request" towards the top of the page + in order to open a pull request. + + + 3. Update the related GitHub issue. + + If there is a GitHub issue associated with the change you + submitted, then you should update the ticket to include the + location of your branch, along with any other commentary you + may wish to make. + +## Dependencies + +The testing and development tools have a bunch of dependencies, +all managed by [bundler](http://bundler.io/) according to the +[Puppet support matrix](http://docs.puppetlabs.com/guides/platforms.html#ruby-versions). + +By default the tests use a baseline version of Puppet. + +If you have Ruby 2.x or want a specific version of Puppet, +you must set an environment variable such as: + +```sh +export PUPPET_VERSION="~> 5.5.6" +``` + +You can install all needed gems for spec tests into the modules directory by +running: + +```sh +bundle install --path .vendor/ --without development system_tests release --jobs "$(nproc)" +``` + +If you also want to run acceptance tests: + +```sh +bundle install --path .vendor/ --with system_tests --without development release --jobs "$(nproc)" +``` + +Our all in one solution if you don't know if you need to install or update gems: + +```sh +bundle install --path .vendor/ --with system_tests --without development release --jobs "$(nproc)"; bundle update; bundle clean +``` + +As an alternative to the `--jobs "$(nproc)` parameter, you can set an +environment variable: + +```sh +BUNDLE_JOBS="$(nproc)" +``` + +### Note for OS X users + +`nproc` isn't a valid command under OS x. As an alternative, you can do: + +```sh +--jobs "$(sysctl -n hw.ncpu)" +``` + +## The test matrix + +### Syntax and style + +The test suite will run [Puppet Lint](http://puppet-lint.com/) and +[Puppet Syntax](https://github.com/gds-operations/puppet-syntax) to +check various syntax and style things. You can run these locally with: + +```sh +bundle exec rake lint +bundle exec rake validate +``` + +It will also run some [Rubocop](http://batsov.com/rubocop/) tests +against it. You can run those locally ahead of time with: + +```sh +bundle exec rake rubocop +``` + +### Running the unit tests + +The unit test suite covers most of the code, as mentioned above please +add tests if you're adding new functionality. If you've not used +[rspec-puppet](http://rspec-puppet.com/) before then feel free to ask +about how best to test your new feature. + +To run the linter, the syntax checker and the unit tests: + +```sh +bundle exec rake test +``` + +To run your all the unit tests + +```sh +bundle exec rake spec +``` + +To run a specific spec test set the `SPEC` variable: + +```sh +bundle exec rake spec SPEC=spec/foo_spec.rb +``` + +#### Unit tests in docker + +Some people don't want to run the dependencies locally or don't want to install +ruby. We ship a Dockerfile that enables you to run all unit tests and linting. +You only need to run: + +```sh +docker build . +``` + +Please ensure that a docker daemon is running and that your user has the +permission to talk to it. You can specify a remote docker host by setting the +`DOCKER_HOST` environment variable. it will copy the content of the module into +the docker image. So it will not work if a Gemfile.lock exists. + +### Integration tests + +The unit tests just check the code runs, not that it does exactly what +we want on a real machine. For that we're using +[beaker](https://github.com/puppetlabs/beaker). + +This fires up a new virtual machine (using vagrant) and runs a series of +simple tests against it after applying the module. You can run this +with: + +```sh +bundle exec rake acceptance +``` + +This will run the tests on the module's default nodeset. You can override the +nodeset used, e.g., + +```sh +BEAKER_set=centos-7-x64 bundle exec rake acceptance +``` + +There are default rake tasks for the various acceptance test modules, e.g., + +```sh +bundle exec rake beaker:centos-7-x64 +bundle exec rake beaker:ssh:centos-7-x64 +``` + +If you don't want to have to recreate the virtual machine every time you can +use `BEAKER_destroy=no` and `BEAKER_provision=no`. On the first run you will at +least need `BEAKER_provision` set to yes (the default). The Vagrantfile for the +created virtual machines will be in `.vagrant/beaker_vagrant_files`. + +Beaker also supports docker containers. We also use that in our automated CI +pipeline at [travis-ci](http://travis-ci.org). To use that instead of Vagrant: + +```sh +PUPPET_INSTALL_TYPE=agent BEAKER_IS_PE=no BEAKER_PUPPET_COLLECTION=puppet6 BEAKER_debug=true BEAKER_setfile=debian10-64{hypervisor=docker} BEAKER_destroy=yes bundle exec rake beaker +``` + +You can replace the string `debian10` with any common operating system. +The following strings are known to work: + +* ubuntu1604 +* ubuntu1804 +* debian8 +* debian9 +* debian10 +* centos6 +* centos7 +* centos8 + +The easiest way to debug in a docker container is to open a shell: + +```sh +docker exec -it -u root ${container_id_or_name} bash +``` + +The source of this file is in our [modulesync_config](https://github.com/voxpupuli/modulesync_config/blob/master/moduleroot/.github/CONTRIBUTING.md.erb) +repository. diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..593e7aa --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,26 @@ + + +## Affected Puppet, Ruby, OS and module versions/distributions + +- Puppet: +- Ruby: +- Distribution: +- Module version: + +## How to reproduce (e.g Puppet code you use) + +## What are you seeing + +## What behaviour did you expect instead + +## Output log + +## Any additional information you'd like to impart diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..342807b --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,20 @@ + +#### Pull Request (PR) description + + +#### This Pull Request (PR) fixes the following issues + diff --git a/.gitignore b/.gitignore index be54f94..e9b3cf4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,41 +1,20 @@ -.vagrant/ -log/ - -# Default .gitignore for Ruby +pkg/ Gemfile.lock +Gemfile.local +vendor/ +.vendor/ +spec/fixtures/manifests/ +spec/fixtures/modules/ +.vagrant/ .bundle/ -*.gem -*.rbc -.bundle -.config -coverage -InstalledFiles -lib/bundler/man -pkg -rdoc -spec/reports -test/tmp -test/version_tmp -tmp - -# YARD artifacts -.yardoc -_yardoc -doc/ - -# Vim -*.swp - -# Eclipse -.project - -# Visual Studio Code -.vscode/ - -# OS X -.DS_Store - -# Puppet +.ruby-version coverage/ -spec/fixtures/manifests/* -spec/fixtures/modules/* +log/ +.idea/ +.dependencies/ +.librarian/ +Puppetfile.lock +*.iml +.*.sw? +.yardoc/ +Guardfile diff --git a/.msync.yml b/.msync.yml new file mode 100644 index 0000000..8864fc0 --- /dev/null +++ b/.msync.yml @@ -0,0 +1 @@ +modulesync_config_version: '2.12.0' diff --git a/.overcommit.yml b/.overcommit.yml new file mode 100644 index 0000000..1b03fad --- /dev/null +++ b/.overcommit.yml @@ -0,0 +1,64 @@ +# Managed by https://github.com/voxpupuli/modulesync_configs +# +# Hooks are only enabled if you take action. +# +# To enable the hooks run: +# +# ``` +# bundle exec overcommit --install +# # ensure .overcommit.yml does not harm to you and then +# bundle exec overcommit --sign +# ``` +# +# (it will manage the .git/hooks directory): +# +# Examples howto skip a test for a commit or push: +# +# ``` +# SKIP=RuboCop git commit +# SKIP=PuppetLint git commit +# SKIP=RakeTask git push +# ``` +# +# Don't invoke overcommit at all: +# +# ``` +# OVERCOMMIT_DISABLE=1 git commit +# ``` +# +# Read more about overcommit: https://github.com/brigade/overcommit +# +# To manage this config yourself in your module add +# +# ``` +# .overcommit.yml: +# unmanaged: true +# ``` +# +# to your modules .sync.yml config +--- +PreCommit: + RuboCop: + enabled: true + description: 'Runs rubocop on modified files only' + command: ['bundle', 'exec', 'rubocop'] + PuppetLint: + enabled: true + description: 'Runs puppet-lint on modified files only' + command: ['bundle', 'exec', 'puppet-lint'] + YamlSyntax: + enabled: true + JsonSyntax: + enabled: true + TrailingWhitespace: + enabled: true + +PrePush: + RakeTarget: + enabled: true + description: 'Run rake targets' + targets: + - 'validate' + - 'test' + - 'rubocop' + command: [ 'bundle', 'exec', 'rake' ] diff --git a/.pmtignore b/.pmtignore new file mode 100644 index 0000000..4e6d54b --- /dev/null +++ b/.pmtignore @@ -0,0 +1,21 @@ +docs/ +pkg/ +Gemfile.lock +Gemfile.local +vendor/ +.vendor/ +spec/fixtures/manifests/ +spec/fixtures/modules/ +.vagrant/ +.bundle/ +.ruby-version +coverage/ +log/ +.idea/ +.dependencies/ +.librarian/ +Puppetfile.lock +*.iml +.*.sw? +.yardoc/ +Dockerfile diff --git a/.rspec b/.rspec index 3c7915c..8c18f1a 100644 --- a/.rspec +++ b/.rspec @@ -1,4 +1,2 @@ ---format d +--format documentation --color ---tty ---backtrace diff --git a/.rspec_parallel b/.rspec_parallel new file mode 100644 index 0000000..e4d136b --- /dev/null +++ b/.rspec_parallel @@ -0,0 +1 @@ +--format progress diff --git a/.rubocop.yml b/.rubocop.yml new file mode 100644 index 0000000..c2ebc88 --- /dev/null +++ b/.rubocop.yml @@ -0,0 +1,546 @@ +require: rubocop-rspec +AllCops: +# Puppet Server 5 defaults to jruby 1.7 so TargetRubyVersion must stay at 1.9 until we drop support for puppet 5 + TargetRubyVersion: 1.9 + Include: + - ./**/*.rb + Exclude: + - files/**/* + - vendor/**/* + - .vendor/**/* + - pkg/**/* + - spec/fixtures/**/* + - Gemfile + - Rakefile + - Guardfile + - Vagrantfile +Lint/ConditionPosition: + Enabled: True + +Lint/ElseLayout: + Enabled: True + +Lint/UnreachableCode: + Enabled: True + +Lint/UselessComparison: + Enabled: True + +Lint/EnsureReturn: + Enabled: True + +Lint/HandleExceptions: + Enabled: True + +Lint/LiteralInCondition: + Enabled: True + +Lint/ShadowingOuterLocalVariable: + Enabled: True + +Lint/LiteralInInterpolation: + Enabled: True + +Style/HashSyntax: + Enabled: True + +Style/RedundantReturn: + Enabled: True + +Layout/EndOfLine: + Enabled: False + +Lint/AmbiguousOperator: + Enabled: True + +Lint/AssignmentInCondition: + Enabled: True + +Layout/SpaceBeforeComment: + Enabled: True + +Style/AndOr: + Enabled: True + +Style/RedundantSelf: + Enabled: True + +Metrics/BlockLength: + Enabled: False + +# Method length is not necessarily an indicator of code quality +Metrics/MethodLength: + Enabled: False + +# Module length is not necessarily an indicator of code quality +Metrics/ModuleLength: + Enabled: False + +Style/WhileUntilModifier: + Enabled: True + +Lint/AmbiguousRegexpLiteral: + Enabled: True + +Security/Eval: + Enabled: True + +Lint/BlockAlignment: + Enabled: True + +Lint/DefEndAlignment: + Enabled: True + +Lint/EndAlignment: + Enabled: True + +Lint/DeprecatedClassMethods: + Enabled: True + +Lint/Loop: + Enabled: True + +Lint/ParenthesesAsGroupedExpression: + Enabled: True + +Lint/RescueException: + Enabled: True + +Lint/StringConversionInInterpolation: + Enabled: True + +Lint/UnusedBlockArgument: + Enabled: True + +Lint/UnusedMethodArgument: + Enabled: True + +Lint/UselessAccessModifier: + Enabled: True + +Lint/UselessAssignment: + Enabled: True + +Lint/Void: + Enabled: True + +Layout/AccessModifierIndentation: + Enabled: True + +Style/AccessorMethodName: + Enabled: True + +Style/Alias: + Enabled: True + +Layout/AlignArray: + Enabled: True + +Layout/AlignHash: + Enabled: True + +Layout/AlignParameters: + Enabled: True + +Metrics/BlockNesting: + Enabled: True + +Style/AsciiComments: + Enabled: True + +Style/Attr: + Enabled: True + +Style/BracesAroundHashParameters: + Enabled: True + +Style/CaseEquality: + Enabled: True + +Layout/CaseIndentation: + Enabled: True + +Style/CharacterLiteral: + Enabled: True + +Style/ClassAndModuleCamelCase: + Enabled: True + +Style/ClassAndModuleChildren: + Enabled: False + +Style/ClassCheck: + Enabled: True + +# Class length is not necessarily an indicator of code quality +Metrics/ClassLength: + Enabled: False + +Style/ClassMethods: + Enabled: True + +Style/ClassVars: + Enabled: True + +Style/WhenThen: + Enabled: True + +Style/WordArray: + Enabled: True + +Style/UnneededPercentQ: + Enabled: True + +Layout/Tab: + Enabled: True + +Layout/SpaceBeforeSemicolon: + Enabled: True + +Layout/TrailingBlankLines: + Enabled: True + +Layout/SpaceInsideBlockBraces: + Enabled: True + +Layout/SpaceInsideBrackets: + Enabled: True + +Layout/SpaceInsideHashLiteralBraces: + Enabled: True + +Layout/SpaceInsideParens: + Enabled: True + +Layout/LeadingCommentSpace: + Enabled: True + +Layout/SpaceBeforeFirstArg: + Enabled: True + +Layout/SpaceAfterColon: + Enabled: True + +Layout/SpaceAfterComma: + Enabled: True + +Layout/SpaceAfterMethodName: + Enabled: True + +Layout/SpaceAfterNot: + Enabled: True + +Layout/SpaceAfterSemicolon: + Enabled: True + +Layout/SpaceAroundEqualsInParameterDefault: + Enabled: True + +Layout/SpaceAroundOperators: + Enabled: True + +Layout/SpaceBeforeBlockBraces: + Enabled: True + +Layout/SpaceBeforeComma: + Enabled: True + +Style/CollectionMethods: + Enabled: True + +Layout/CommentIndentation: + Enabled: True + +Style/ColonMethodCall: + Enabled: True + +Style/CommentAnnotation: + Enabled: True + +# 'Complexity' is very relative +Metrics/CyclomaticComplexity: + Enabled: False + +Style/ConstantName: + Enabled: True + +Style/Documentation: + Enabled: False + +Style/DefWithParentheses: + Enabled: True + +Style/PreferredHashMethods: + Enabled: True + +Layout/DotPosition: + EnforcedStyle: trailing + +Style/DoubleNegation: + Enabled: True + +Style/EachWithObject: + Enabled: True + +Layout/EmptyLineBetweenDefs: + Enabled: True + +Layout/IndentArray: + Enabled: True + +Layout/IndentHash: + Enabled: True + +Layout/IndentationConsistency: + Enabled: True + +Layout/IndentationWidth: + Enabled: True + +Layout/EmptyLines: + Enabled: True + +Layout/EmptyLinesAroundAccessModifier: + Enabled: True + +Style/EmptyLiteral: + Enabled: True + +# Configuration parameters: AllowURI, URISchemes. +Metrics/LineLength: + Enabled: False + +Style/MethodCallWithoutArgsParentheses: + Enabled: True + +Style/MethodDefParentheses: + Enabled: True + +Style/LineEndConcatenation: + Enabled: True + +Layout/TrailingWhitespace: + Enabled: True + +Style/StringLiterals: + Enabled: True + +Style/TrailingCommaInArguments: + Enabled: True + +Style/TrailingCommaInLiteral: + Enabled: True + +Style/GlobalVars: + Enabled: True + +Style/GuardClause: + Enabled: True + +Style/IfUnlessModifier: + Enabled: True + +Style/MultilineIfThen: + Enabled: True + +Style/NegatedIf: + Enabled: True + +Style/NegatedWhile: + Enabled: True + +Style/Next: + Enabled: True + +Style/SingleLineBlockParams: + Enabled: True + +Style/SingleLineMethods: + Enabled: True + +Style/SpecialGlobalVars: + Enabled: True + +Style/TrivialAccessors: + Enabled: True + +Style/UnlessElse: + Enabled: True + +Style/VariableInterpolation: + Enabled: True + +Style/VariableName: + Enabled: True + +Style/WhileUntilDo: + Enabled: True + +Style/EvenOdd: + Enabled: True + +Style/FileName: + Enabled: True + +Style/For: + Enabled: True + +Style/Lambda: + Enabled: True + +Style/MethodName: + Enabled: True + +Style/MultilineTernaryOperator: + Enabled: True + +Style/NestedTernaryOperator: + Enabled: True + +Style/NilComparison: + Enabled: True + +Style/FormatString: + Enabled: True + +Style/MultilineBlockChain: + Enabled: True + +Style/Semicolon: + Enabled: True + +Style/SignalException: + Enabled: True + +Style/NonNilCheck: + Enabled: True + +Style/Not: + Enabled: True + +Style/NumericLiterals: + Enabled: True + +Style/OneLineConditional: + Enabled: True + +Style/OpMethod: + Enabled: True + +Style/ParenthesesAroundCondition: + Enabled: True + +Style/PercentLiteralDelimiters: + Enabled: True + +Style/PerlBackrefs: + Enabled: True + +Style/PredicateName: + Enabled: True + +Style/RedundantException: + Enabled: True + +Style/SelfAssignment: + Enabled: True + +Style/Proc: + Enabled: True + +Style/RaiseArgs: + Enabled: True + +Style/RedundantBegin: + Enabled: True + +Style/RescueModifier: + Enabled: True + +# based on https://github.com/voxpupuli/modulesync_config/issues/168 +Style/RegexpLiteral: + EnforcedStyle: percent_r + Enabled: True + +Lint/UnderscorePrefixedVariableName: + Enabled: True + +Metrics/ParameterLists: + Enabled: False + +Lint/RequireParentheses: + Enabled: True + +Style/ModuleFunction: + Enabled: True + +Lint/Debugger: + Enabled: True + +Style/IfWithSemicolon: + Enabled: True + +Style/Encoding: + Enabled: True + +Style/BlockDelimiters: + Enabled: True + +Layout/MultilineBlockLayout: + Enabled: True + +# 'Complexity' is very relative +Metrics/AbcSize: + Enabled: False + +# 'Complexity' is very relative +Metrics/PerceivedComplexity: + Enabled: False + +Lint/UselessAssignment: + Enabled: True + +Layout/ClosingParenthesisIndentation: + Enabled: True + +# RSpec + +RSpec/BeforeAfterAll: + Exclude: + - spec/acceptance/**/* + +# We don't use rspec in this way +RSpec/DescribeClass: + Enabled: False + +# Example length is not necessarily an indicator of code quality +RSpec/ExampleLength: + Enabled: False + +RSpec/NamedSubject: + Enabled: False + +# disabled for now since they cause a lot of issues +# these issues aren't easy to fix +RSpec/RepeatedDescription: + Enabled: False + +RSpec/NestedGroups: + Enabled: False + +# this is broken on ruby1.9 +Layout/IndentHeredoc: + Enabled: False + +# disable Yaml safe_load. This is needed to support ruby2.0.0 development envs +Security/YAMLLoad: + Enabled: false + +# This affects hiera interpolation, as well as some configs that we push. +Style/FormatStringToken: + Enabled: false + +# This is useful, but sometimes a little too picky about where unit tests files +# are located. +RSpec/FilePath: + Enabled: false diff --git a/.sync.yml b/.sync.yml new file mode 100644 index 0000000..a3a81ba --- /dev/null +++ b/.sync.yml @@ -0,0 +1,33 @@ +--- +.travis.yml: + secure: "lmmQ5+ueE39cDevK+TjJWgD7b9X6FQDoyL/OTRXzOuCu/96yXQrm/KBq4E2IsFTx91XiqOH6ZnKcgiGwaY54E10ddTvlZhEa8orMmwfkrLAbyh7POB1q3AtQze7RPLvOuHZc6XIRBDiiFKMrzvwdP5OgU9iPEFdSRZll64nw240=" + docker_sets: + - set: ubuntu1604-64m + - set: ubuntu1804-64m + - set: ubuntu2004-64m + - set: centos6-64m + - set: centos7-64m + - set: centos8-64m + - set: debian9-64m + - set: debian10-64m +Gemfile: + optional: + ':test': + - gem: 'redis' + - gem: 'mock_redis' +spec/spec_helper.rb: + unmanaged: true +spec/acceptance/nodesets/ec2/amazonlinux-2016091.yml: + delete: true +spec/acceptance/nodesets/ec2/image_templates.yaml: + delete: true +spec/acceptance/nodesets/ec2/rhel-73-x64.yml: + delete: true +spec/acceptance/nodesets/ec2/sles-12sp2-x64.yml: + delete: true +spec/acceptance/nodesets/ec2/ubuntu-1604-x64.yml: + delete: true +spec/acceptance/nodesets/ec2/windows-2016-base-x64.yml: + delete: true +spec/acceptance/nodesets/archlinux-2-x64.yml: + delete: true diff --git a/.travis.yml b/.travis.yml index 381dfae..6990f2d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,71 +1,107 @@ --- +dist: bionic language: ruby -bundler_args: --without system_tests -script: 'SPEC_OPTS="--format documentation" bundle exec rake validate lint spec' +cache: bundler before_install: - - gem install bundler # -v x.x.x if a specific version is needed + - yes | gem update --system + - bundle --version +script: + - 'bundle exec rake $CHECK' matrix: fast_finish: true include: - - sudo: required - dist: trusty - rvm: 2.1.9 - env: PUPPET_GEM_VERSION="~> 3.0" FUTURE_PARSER="yes" ORDERING="random" - - sudo: required - dist: trusty - rvm: 2.2.2 - env: PUPPET_GEM_VERSION="~> 3.0" STRICT_VARIABLES="yes" ORDERING="random" - - sudo: required - dist: trusty - rvm: 2.1.9 - env: PUPPET_GEM_VERSION="~> 4.0" STRICT_VARIABLES="yes" ORDERING="random" - - sudo: required + - rvm: 2.4.4 + bundler_args: --without system_tests development release + env: PUPPET_VERSION="~> 5.0" CHECK=test + - rvm: 2.5.3 + bundler_args: --without system_tests development release + env: PUPPET_VERSION="~> 6.0" CHECK=test_with_coveralls + - rvm: 2.5.3 + bundler_args: --without system_tests development release + env: PUPPET_VERSION="~> 6.0" CHECK=rubocop + - rvm: 2.4.4 + bundler_args: --without system_tests development release + env: PUPPET_VERSION="~> 5.0" CHECK=build DEPLOY_TO_FORGE=yes + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet5 BEAKER_debug=true BEAKER_setfile=ubuntu1604-64m BEAKER_HYPERVISOR=docker CHECK=beaker services: docker - rvm: '2.3.3' - env: PUPPET_INSTALL_VERSION="1.10.8" PUPPET_INSTALL_TYPE=agent BEAKER_set="centos-6-docker" - script: bundle exec rake beaker:suites['default',$BEAKER_set] - bundler_args: --without development - - sudo: required + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet6 BEAKER_debug=true BEAKER_setfile=ubuntu1604-64m BEAKER_HYPERVISOR=docker CHECK=beaker services: docker - rvm: '2.3.3' - env: PUPPET_INSTALL_VERSION="1.10.8" PUPPET_INSTALL_TYPE=agent BEAKER_set="centos-7-docker" - script: bundle exec rake beaker:suites['default',$BEAKER_set] - bundler_args: --without development - - sudo: required + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet5 BEAKER_debug=true BEAKER_setfile=ubuntu1804-64m BEAKER_HYPERVISOR=docker CHECK=beaker services: docker - rvm: '2.3.3' - env: PUPPET_INSTALL_VERSION="1.10.8" PUPPET_INSTALL_TYPE=agent BEAKER_set="debian-8-docker" - script: bundle exec rake beaker:suites['default',$BEAKER_set] - bundler_args: --without development - - sudo: required + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet6 BEAKER_debug=true BEAKER_setfile=ubuntu1804-64m BEAKER_HYPERVISOR=docker CHECK=beaker services: docker - rvm: '2.3.3' - env: PUPPET_INSTALL_VERSION="1.10.8" PUPPET_INSTALL_TYPE=agent BEAKER_set="ubuntu-1604-docker" - script: bundle exec rake beaker:suites['default',$BEAKER_set] - bundler_args: --without development - - sudo: required + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet6 BEAKER_debug=true BEAKER_setfile=ubuntu2004-64m BEAKER_HYPERVISOR=docker CHECK=beaker + services: docker + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet5 BEAKER_debug=true BEAKER_setfile=centos6-64m BEAKER_HYPERVISOR=docker CHECK=beaker + services: docker + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet6 BEAKER_debug=true BEAKER_setfile=centos6-64m BEAKER_HYPERVISOR=docker CHECK=beaker + services: docker + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet5 BEAKER_debug=true BEAKER_setfile=centos7-64m BEAKER_HYPERVISOR=docker CHECK=beaker + services: docker + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet6 BEAKER_debug=true BEAKER_setfile=centos7-64m BEAKER_HYPERVISOR=docker CHECK=beaker + services: docker + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet5 BEAKER_debug=true BEAKER_setfile=centos8-64m BEAKER_HYPERVISOR=docker CHECK=beaker + services: docker + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet6 BEAKER_debug=true BEAKER_setfile=centos8-64m BEAKER_HYPERVISOR=docker CHECK=beaker + services: docker + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet5 BEAKER_debug=true BEAKER_setfile=debian9-64m BEAKER_HYPERVISOR=docker CHECK=beaker + services: docker + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet6 BEAKER_debug=true BEAKER_setfile=debian9-64m BEAKER_HYPERVISOR=docker CHECK=beaker + services: docker + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet5 BEAKER_debug=true BEAKER_setfile=debian10-64m BEAKER_HYPERVISOR=docker CHECK=beaker + services: docker + - rvm: 2.5.3 + bundler_args: --without development release + env: PUPPET_INSTALL_TYPE=agent BEAKER_PUPPET_COLLECTION=puppet6 BEAKER_debug=true BEAKER_setfile=debian10-64m BEAKER_HYPERVISOR=docker CHECK=beaker services: docker - rvm: '2.3.3' - env: PUPPET_INSTALL_VERSION="1.5.2" PUPPET_INSTALL_TYPE=agent BEAKER_set="ubuntu-1404-docker" - script: bundle exec rake beaker:suites['default',$BEAKER_set] - bundler_args: --without development - - sudo: required - rvm: '2.4.0' - env: PUPPET_VERSION="~> 4.0" CHECK=build DEPLOY_TO_FORGE=yes - script: bundle exec rake build - bundler_args: --without development branches: only: - master - /^v\d/ +notifications: + email: false + webhooks: https://voxpupu.li/incoming/travis + irc: + on_success: always + on_failure: always + channels: + - "chat.freenode.org#voxpupuli-notifications" deploy: provider: puppetforge - user: arioch + user: puppet password: - secure: N5N2gNr0MTHY2bjBnW6FKk3qwWpYEJ+h5jM5E6058hX4f3zFYQVWwO0xXtlgJ61bG89KN4hMNhvZgYjlh+aAFMS2iNoNiBcNDI4dksu2lR4iIVeP5FIp8JnEh7jNDuldaIqHo2GKjRSkl70lcNC6NzYcAAsiyFvkn5XEDLkbKR8= + secure: "lmmQ5+ueE39cDevK+TjJWgD7b9X6FQDoyL/OTRXzOuCu/96yXQrm/KBq4E2IsFTx91XiqOH6ZnKcgiGwaY54E10ddTvlZhEa8orMmwfkrLAbyh7POB1q3AtQze7RPLvOuHZc6XIRBDiiFKMrzvwdP5OgU9iPEFdSRZll64nw240=" on: tags: true # all_branches is required to use tags all_branches: true # Only publish the build marked with "DEPLOY_TO_FORGE" condition: "$DEPLOY_TO_FORGE = yes" diff --git a/.yardopts b/.yardopts new file mode 100644 index 0000000..3687f51 --- /dev/null +++ b/.yardopts @@ -0,0 +1,2 @@ +--markup markdown +--output-dir docs/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 4aa66f3..d6971b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,368 +1,558 @@ -# Change log +# Changelog All notable changes to this project will be documented in this file. +Each new release typically also includes the latest modulesync defaults. +These should not affect the functionality of the module. -## [v3.2.0](https://github.com/arioch/puppet-redis/tree/v3.2.0) (2017-07-11) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/v3.1.1...v3.2.0) +## [v6.0.0](https://github.com/voxpupuli/puppet-redis/tree/v6.0.0) (2020-05-12) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/v5.0.0...v6.0.0) + +**Breaking changes:** + +- Make apt and epel soft dependencies [\#358](https://github.com/voxpupuli/puppet-redis/pull/358) ([ekohl](https://github.com/ekohl)) + +**Implemented enhancements:** + +- Add support for RedHat/CentOS 8 [\#350](https://github.com/voxpupuli/puppet-redis/pull/350) ([yakatz](https://github.com/yakatz)) +- Add Debian 10 support [\#344](https://github.com/voxpupuli/puppet-redis/pull/344) ([ekohl](https://github.com/ekohl)) +- Finishing touches for multi-instance support [\#343](https://github.com/voxpupuli/puppet-redis/pull/343) ([fraenki](https://github.com/fraenki)) +- Set RuntimeDirectory in service template [\#342](https://github.com/voxpupuli/puppet-redis/pull/342) ([basti-nis](https://github.com/basti-nis)) + +**Fixed bugs:** + +- change systemd stop command to redis-cli [\#355](https://github.com/voxpupuli/puppet-redis/pull/355) ([alexskr](https://github.com/alexskr)) +- Fix the sentinel pid file location for Ubuntu 18.04 [\#347](https://github.com/voxpupuli/puppet-redis/pull/347) ([ekohl](https://github.com/ekohl)) + +**Closed issues:** + +- Type forking causes service timeout [\#260](https://github.com/voxpupuli/puppet-redis/issues/260) + +**Merged pull requests:** + +- Use more native rspec and serverspec testing [\#348](https://github.com/voxpupuli/puppet-redis/pull/348) ([ekohl](https://github.com/ekohl)) +- Run acceptance tests with manage\_repo =\> false [\#346](https://github.com/voxpupuli/puppet-redis/pull/346) ([ekohl](https://github.com/ekohl)) +- Update bolt testing to use modern versions [\#345](https://github.com/voxpupuli/puppet-redis/pull/345) ([ekohl](https://github.com/ekohl)) + +## [v5.0.0](https://github.com/voxpupuli/puppet-redis/tree/v5.0.0) (2019-12-03) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/v4.2.1...v5.0.0) + +**Breaking changes:** + +- Closer match parameter to OS defaults [\#336](https://github.com/voxpupuli/puppet-redis/pull/336) ([ekohl](https://github.com/ekohl)) +- Stricter data types [\#328](https://github.com/voxpupuli/puppet-redis/pull/328) ([ekohl](https://github.com/ekohl)) +- Drop support for Redis 2, Debian 8 and Ubuntu 14.04; add Debian 9 and Ubuntu 18.04 [\#326](https://github.com/voxpupuli/puppet-redis/pull/326) ([ekohl](https://github.com/ekohl)) + +**Implemented enhancements:** + +- Instance is not read from hiera file [\#329](https://github.com/voxpupuli/puppet-redis/issues/329) +- Add SCL support [\#334](https://github.com/voxpupuli/puppet-redis/pull/334) ([ekohl](https://github.com/ekohl)) +- Support instances inside Hiera [\#330](https://github.com/voxpupuli/puppet-redis/pull/330) ([lordbink](https://github.com/lordbink)) + +**Closed issues:** + +- Can't disable unixsocket after [\#331](https://github.com/voxpupuli/puppet-redis/issues/331) +- 2.8 version of Redis on Debian 8 errors out [\#246](https://github.com/voxpupuli/puppet-redis/issues/246) +- Unable to unset unixsocket config parameter [\#228](https://github.com/voxpupuli/puppet-redis/issues/228) +- bind $ipaddress [\#94](https://github.com/voxpupuli/puppet-redis/issues/94) +- Create nodes.conf file? [\#76](https://github.com/voxpupuli/puppet-redis/issues/76) + +**Merged pull requests:** + +- Clean up preinstall handling and use modern facts [\#335](https://github.com/voxpupuli/puppet-redis/pull/335) ([ekohl](https://github.com/ekohl)) +- Allow empty unixsocket\(perm\) [\#333](https://github.com/voxpupuli/puppet-redis/pull/333) ([ekohl](https://github.com/ekohl)) +- Allow privileged ports in data types [\#332](https://github.com/voxpupuli/puppet-redis/pull/332) ([ekohl](https://github.com/ekohl)) +- Fix typo in type [\#327](https://github.com/voxpupuli/puppet-redis/pull/327) ([ekohl](https://github.com/ekohl)) +- Complete the transition to puppet-strings [\#325](https://github.com/voxpupuli/puppet-redis/pull/325) ([ekohl](https://github.com/ekohl)) + +## [v4.2.1](https://github.com/voxpupuli/puppet-redis/tree/v4.2.1) (2019-09-19) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/v4.2.0...v4.2.1) + +**Fixed bugs:** + +- Fixing repl\_ping\_slave\_period not being managed with params [\#318](https://github.com/voxpupuli/puppet-redis/pull/318) ([dom-nie](https://github.com/dom-nie)) + +**Merged pull requests:** + +- Spelling fix [\#323](https://github.com/voxpupuli/puppet-redis/pull/323) ([tetsuo13](https://github.com/tetsuo13)) + +## [v4.2.0](https://github.com/voxpupuli/puppet-redis/tree/v4.2.0) (2019-07-22) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/v4.1.0...v4.2.0) + +**Implemented enhancements:** + +- Adding support for more redis cluster params [\#316](https://github.com/voxpupuli/puppet-redis/pull/316) ([dom-nie](https://github.com/dom-nie)) + +**Closed issues:** + +- Fix systemd service filename [\#310](https://github.com/voxpupuli/puppet-redis/issues/310) +- Fix manage\_service\_file variable [\#308](https://github.com/voxpupuli/puppet-redis/issues/308) + +**Merged pull requests:** + +- Allow `puppetlabs/stdlib` 6.x [\#314](https://github.com/voxpupuli/puppet-redis/pull/314) ([alexjfisher](https://github.com/alexjfisher)) +- Fix manage\_service\_file variable [\#309](https://github.com/voxpupuli/puppet-redis/pull/309) ([CallumBanbery](https://github.com/CallumBanbery)) + +## [v4.1.0](https://github.com/voxpupuli/puppet-redis/tree/v4.1.0) (2019-05-02) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/v4.0.0...v4.1.0) + +**Implemented enhancements:** + +- Add service\_enable parameter to sentinel.pp [\#307](https://github.com/voxpupuli/puppet-redis/pull/307) ([rschemm](https://github.com/rschemm)) + +**Closed issues:** + +- Please release v3.3.2 due to Ubuntu 18.04 / systemd fix. [\#283](https://github.com/voxpupuli/puppet-redis/issues/283) + +**Merged pull requests:** + +- Allow puppetlabs/apt 7.x [\#312](https://github.com/voxpupuli/puppet-redis/pull/312) ([dhoppe](https://github.com/dhoppe)) + +## [v4.0.0](https://github.com/voxpupuli/puppet-redis/tree/v4.0.0) (2019-03-12) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/v3.3.1...v4.0.0) + +This is the first release since the module was migrated to the Vox Pupuli `puppet` namespace. + +**Breaking changes:** + +- Support binding to all interfaces [\#302](https://github.com/voxpupuli/puppet-redis/pull/302) ([alexjfisher](https://github.com/alexjfisher)) +- Drop support for puppet 3 and 4 [\#297](https://github.com/voxpupuli/puppet-redis/pull/297) ([alexjfisher](https://github.com/alexjfisher)) +- Convert `redisget` to `redis::get` API v4 function [\#293](https://github.com/voxpupuli/puppet-redis/pull/293) ([alexjfisher](https://github.com/alexjfisher)) + +**Implemented enhancements:** + +- Bind the service on all available interface [\#60](https://github.com/voxpupuli/puppet-redis/issues/60) +- Add some parameter validation using data types [\#303](https://github.com/voxpupuli/puppet-redis/pull/303) ([alexjfisher](https://github.com/alexjfisher)) +- Initial rhel 8 support [\#284](https://github.com/voxpupuli/puppet-redis/pull/284) ([mbaldessari](https://github.com/mbaldessari)) + +**Closed issues:** + +- Transparent Huge Pages \(THP\) Not Disabled on RHEL [\#278](https://github.com/voxpupuli/puppet-redis/issues/278) +- Looking for maintainer \[Help needed\] [\#277](https://github.com/voxpupuli/puppet-redis/issues/277) +- Travis Credential issues... still :\( [\#267](https://github.com/voxpupuli/puppet-redis/issues/267) +- Outdated dependency puppetlabs-apt \< 3.0.0 [\#264](https://github.com/voxpupuli/puppet-redis/issues/264) +- cannot bind ipv4 and ipv6 [\#257](https://github.com/voxpupuli/puppet-redis/issues/257) +- Deprecate Puppet 3.X Support [\#152](https://github.com/voxpupuli/puppet-redis/issues/152) +- Get acceptance tests running again [\#292](https://github.com/voxpupuli/puppet-redis/issues/292) +- Convert function to API v4 ruby function [\#291](https://github.com/voxpupuli/puppet-redis/issues/291) + +**Merged pull requests:** + +- Re-enable and fix acceptance tests. \(Don't manage `/var/run/redis` on Debian systems\) [\#299](https://github.com/voxpupuli/puppet-redis/pull/299) ([alexjfisher](https://github.com/alexjfisher)) +- Update metadata.json for Vox Pupuli migration [\#298](https://github.com/voxpupuli/puppet-redis/pull/298) ([alexjfisher](https://github.com/alexjfisher)) +- Update `apt` and `stdlib` dependencies [\#296](https://github.com/voxpupuli/puppet-redis/pull/296) ([alexjfisher](https://github.com/alexjfisher)) +- Fix github license detection [\#295](https://github.com/voxpupuli/puppet-redis/pull/295) ([alexjfisher](https://github.com/alexjfisher)) +- Add badges to README [\#294](https://github.com/voxpupuli/puppet-redis/pull/294) ([alexjfisher](https://github.com/alexjfisher)) +- Fix tests and initial Voxpupuli modulesync [\#290](https://github.com/voxpupuli/puppet-redis/pull/290) ([alexjfisher](https://github.com/alexjfisher)) +- Lint and rubocop \(autofixes only\) [\#289](https://github.com/voxpupuli/puppet-redis/pull/289) ([alexjfisher](https://github.com/alexjfisher)) + +## [v3.3.1](https://github.com/voxpupuli/puppet-redis/tree/v3.3.1) (2018-09-13) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/v3.3.0...v3.3.1) + +**Closed issues:** + +- Ulimit configuration broken for systemd [\#268](https://github.com/voxpupuli/puppet-redis/issues/268) + +## [v3.3.0](https://github.com/voxpupuli/puppet-redis/tree/v3.3.0) (2018-06-19) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/v3.2.0...v3.3.0) + +**Closed issues:** + +- output\_buffer\_limit\_pubsub and output\_buffer\_limit\_slave [\#263](https://github.com/voxpupuli/puppet-redis/issues/263) +- Can't release - Require Forge Credential Refresh [\#262](https://github.com/voxpupuli/puppet-redis/issues/262) +- Module v3.2.0 not published to forge [\#255](https://github.com/voxpupuli/puppet-redis/issues/255) +- Inconsistent sentinel\_package\_name handling on Debian [\#253](https://github.com/voxpupuli/puppet-redis/issues/253) +- No pidfile for sentinel [\#238](https://github.com/voxpupuli/puppet-redis/issues/238) +- puppet-redis requires outdated puppetlabs-apt module [\#232](https://github.com/voxpupuli/puppet-redis/issues/232) +- Can't use as slave bound to localhost [\#229](https://github.com/voxpupuli/puppet-redis/issues/229) +- \[Feature Request\] Redis::Instance - Set default unixsocket [\#226](https://github.com/voxpupuli/puppet-redis/issues/226) +- Travis Forge Password Changed [\#216](https://github.com/voxpupuli/puppet-redis/issues/216) + +**Merged pull requests:** + +- Adds logic for installing redid-sentinel package [\#254](https://github.com/voxpupuli/puppet-redis/pull/254) ([petems](https://github.com/petems)) +- Update redis mode on wheezy [\#252](https://github.com/voxpupuli/puppet-redis/pull/252) ([petems](https://github.com/petems)) +- Fix spec for sentinel [\#251](https://github.com/voxpupuli/puppet-redis/pull/251) ([petems](https://github.com/petems)) +- get rid of getvar\_emptystring function [\#249](https://github.com/voxpupuli/puppet-redis/pull/249) ([vicinus](https://github.com/vicinus)) +- Added log\_level to sentinel. [\#248](https://github.com/voxpupuli/puppet-redis/pull/248) ([hp197](https://github.com/hp197)) +- Adds redis\_cli task [\#245](https://github.com/voxpupuli/puppet-redis/pull/245) ([petems](https://github.com/petems)) +- Bump Puppet version for acceptance to 4.10.8 [\#244](https://github.com/voxpupuli/puppet-redis/pull/244) ([petems](https://github.com/petems)) +- protected-mode configuration option \(Redis 3.2+\) [\#243](https://github.com/voxpupuli/puppet-redis/pull/243) ([Dan70402](https://github.com/Dan70402)) +- Switch to using simp-beaker suites [\#241](https://github.com/voxpupuli/puppet-redis/pull/241) ([petems](https://github.com/petems)) +- Bumped apt version dependency to version \< 5.0.0 [\#237](https://github.com/voxpupuli/puppet-redis/pull/237) ([c4m4](https://github.com/c4m4)) +- Updates for EL6 [\#236](https://github.com/voxpupuli/puppet-redis/pull/236) ([petems](https://github.com/petems)) +- Pin version of redis gem [\#235](https://github.com/voxpupuli/puppet-redis/pull/235) ([petems](https://github.com/petems)) +- Added configuration options for client-output-buffer-limit [\#233](https://github.com/voxpupuli/puppet-redis/pull/233) ([Mike-Petersen](https://github.com/Mike-Petersen)) +- Allow `slaveof` when binding to localhost [\#231](https://github.com/voxpupuli/puppet-redis/pull/231) ([joshuaspence](https://github.com/joshuaspence)) +- Fix issues with missing locale for Debian box [\#224](https://github.com/voxpupuli/puppet-redis/pull/224) ([petems](https://github.com/petems)) +- Instance service improvements [\#222](https://github.com/voxpupuli/puppet-redis/pull/222) ([kwevers](https://github.com/kwevers)) +- Make sure the service is en/disabled per user request [\#221](https://github.com/voxpupuli/puppet-redis/pull/221) ([kwevers](https://github.com/kwevers)) +- Split Redis instance socket files [\#220](https://github.com/voxpupuli/puppet-redis/pull/220) ([kwevers](https://github.com/kwevers)) +- Split Redis workdir [\#219](https://github.com/voxpupuli/puppet-redis/pull/219) ([kwevers](https://github.com/kwevers)) +- fix package\_ensure version on ubuntu when it is in the 3:3.2.1 format [\#218](https://github.com/voxpupuli/puppet-redis/pull/218) ([sp-joseluis-ledesma](https://github.com/sp-joseluis-ledesma)) +- Split the redis instance logfiles by default [\#217](https://github.com/voxpupuli/puppet-redis/pull/217) ([kwevers](https://github.com/kwevers)) +- Use package\_ensure if it specifies a version instead of the minimum\_version [\#215](https://github.com/voxpupuli/puppet-redis/pull/215) ([sp-joseluis-ledesma](https://github.com/sp-joseluis-ledesma)) + +## [v3.2.0](https://github.com/voxpupuli/puppet-redis/tree/v3.2.0) (2017-07-11) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/v3.1.1...v3.2.0) **Implemented enhancements:** -- Cluster Support [\#62](https://github.com/arioch/puppet-redis/issues/62) +- Cluster Support [\#62](https://github.com/voxpupuli/puppet-redis/issues/62) **Closed issues:** -- redis\_server\_version fact fails to parse output [\#210](https://github.com/arioch/puppet-redis/issues/210) -- Support for multi-instances per host [\#113](https://github.com/arioch/puppet-redis/issues/113) +- redis\_server\_version fact fails to parse output [\#210](https://github.com/voxpupuli/puppet-redis/issues/210) +- Support for multi-instances per host [\#113](https://github.com/voxpupuli/puppet-redis/issues/113) **Merged pull requests:** -- updated redis systemd unit file for better use with instances [\#214](https://github.com/arioch/puppet-redis/pull/214) ([bostrowski13](https://github.com/bostrowski13)) -- Updates docker images for CentOS 6 and 7 [\#213](https://github.com/arioch/puppet-redis/pull/213) ([petems](https://github.com/petems)) -- Update EPEL module [\#212](https://github.com/arioch/puppet-redis/pull/212) ([petems](https://github.com/petems)) -- Refactor redisget\(\) method [\#211](https://github.com/arioch/puppet-redis/pull/211) ([petems](https://github.com/petems)) -- Update docs for puppet-strings [\#206](https://github.com/arioch/puppet-redis/pull/206) ([petems](https://github.com/petems)) -- Add redis::instance defined type [\#200](https://github.com/arioch/puppet-redis/pull/200) ([petems](https://github.com/petems)) -- Adding note about Puppet 3 support [\#153](https://github.com/arioch/puppet-redis/pull/153) ([petems](https://github.com/petems)) +- updated redis systemd unit file for better use with instances [\#214](https://github.com/voxpupuli/puppet-redis/pull/214) ([bostrowski13](https://github.com/bostrowski13)) +- Updates docker images for CentOS 6 and 7 [\#213](https://github.com/voxpupuli/puppet-redis/pull/213) ([petems](https://github.com/petems)) +- Update EPEL module [\#212](https://github.com/voxpupuli/puppet-redis/pull/212) ([petems](https://github.com/petems)) +- Refactor redisget\(\) method [\#211](https://github.com/voxpupuli/puppet-redis/pull/211) ([petems](https://github.com/petems)) +- Update docs for puppet-strings [\#206](https://github.com/voxpupuli/puppet-redis/pull/206) ([petems](https://github.com/petems)) +- Add redis::instance defined type [\#200](https://github.com/voxpupuli/puppet-redis/pull/200) ([petems](https://github.com/petems)) +- Adding note about Puppet 3 support [\#153](https://github.com/voxpupuli/puppet-redis/pull/153) ([petems](https://github.com/petems)) + +## [v3.1.1](https://github.com/voxpupuli/puppet-redis/tree/v3.1.1) (2017-05-19) -## [v3.1.1](https://github.com/arioch/puppet-redis/tree/v3.1.1) (2017-05-19) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/v3.1.0...v3.1.1) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/v3.1.0...v3.1.1) -## [v3.1.0](https://github.com/arioch/puppet-redis/tree/v3.1.0) (2017-05-19) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/v3.0.0...v3.1.0) +## [v3.1.0](https://github.com/voxpupuli/puppet-redis/tree/v3.1.0) (2017-05-19) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/v3.0.0...v3.1.0) **Implemented enhancements:** -- Releasing v3.0.1 [\#205](https://github.com/arioch/puppet-redis/issues/205) -- Please cut a release [\#201](https://github.com/arioch/puppet-redis/issues/201) -- Deployment to Forge [\#185](https://github.com/arioch/puppet-redis/issues/185) +- Releasing v3.0.1 [\#205](https://github.com/voxpupuli/puppet-redis/issues/205) +- Please cut a release [\#201](https://github.com/voxpupuli/puppet-redis/issues/201) +- Deployment to Forge [\#185](https://github.com/voxpupuli/puppet-redis/issues/185) **Fixed bugs:** -- Sort problem in v1.2.4 template [\#195](https://github.com/arioch/puppet-redis/issues/195) +- Sort problem in v1.2.4 template [\#195](https://github.com/voxpupuli/puppet-redis/issues/195) **Merged pull requests:** -- Add an optional third parameter to redisget\(\) to specify a default value [\#209](https://github.com/arioch/puppet-redis/pull/209) ([petems](https://github.com/petems)) -- Updates docs for puppet functions [\#208](https://github.com/arioch/puppet-redis/pull/208) ([petems](https://github.com/petems)) -- Add switch to manage File\[/var/run/redis\] [\#204](https://github.com/arioch/puppet-redis/pull/204) ([petems](https://github.com/petems)) -- Ignore selinux default context for /etc/systemd/system/redis.service.d [\#202](https://github.com/arioch/puppet-redis/pull/202) ([amoralej](https://github.com/amoralej)) -- Make TravisCI push to the Forge [\#191](https://github.com/arioch/puppet-redis/pull/191) ([arioch](https://github.com/arioch)) +- Add an optional third parameter to redisget\(\) to specify a default value [\#209](https://github.com/voxpupuli/puppet-redis/pull/209) ([petems](https://github.com/petems)) +- Updates docs for puppet functions [\#208](https://github.com/voxpupuli/puppet-redis/pull/208) ([petems](https://github.com/petems)) +- Add switch to manage File\[/var/run/redis\] [\#204](https://github.com/voxpupuli/puppet-redis/pull/204) ([petems](https://github.com/petems)) +- Ignore selinux default context for /etc/systemd/system/redis.service.d [\#202](https://github.com/voxpupuli/puppet-redis/pull/202) ([amoralej](https://github.com/amoralej)) +- Make TravisCI push to the Forge [\#191](https://github.com/voxpupuli/puppet-redis/pull/191) ([arioch](https://github.com/arioch)) + +## [v3.0.0](https://github.com/voxpupuli/puppet-redis/tree/v3.0.0) (2017-05-11) -## [v3.0.0](https://github.com/arioch/puppet-redis/tree/v3.0.0) (2017-05-11) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.2.4...v3.0.0) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.2.4...v3.0.0) **Implemented enhancements:** -- Ubuntu 16.04 support? [\#146](https://github.com/arioch/puppet-redis/issues/146) -- \[Whishlist\] Extend ulimit parameter to support limits.conf and systemd [\#130](https://github.com/arioch/puppet-redis/issues/130) -- Add overcommit.pp to deal with `Can't save in background: fork: Cannot allocate memory` ? [\#105](https://github.com/arioch/puppet-redis/issues/105) +- Ubuntu 16.04 support? [\#146](https://github.com/voxpupuli/puppet-redis/issues/146) +- \[Whishlist\] Extend ulimit parameter to support limits.conf and systemd [\#130](https://github.com/voxpupuli/puppet-redis/issues/130) +- Add overcommit.pp to deal with `Can't save in background: fork: Cannot allocate memory` ? [\#105](https://github.com/voxpupuli/puppet-redis/issues/105) **Fixed bugs:** -- The fix for issue \#192 broke service\_managed false [\#197](https://github.com/arioch/puppet-redis/issues/197) -- Ubuntu 16.04 changed sentinel config filename [\#175](https://github.com/arioch/puppet-redis/issues/175) -- sentinel support broken? [\#166](https://github.com/arioch/puppet-redis/issues/166) -- Ownership problem with Ubuntu redis-server [\#150](https://github.com/arioch/puppet-redis/issues/150) -- Parameters not valid for older Redis in config [\#111](https://github.com/arioch/puppet-redis/issues/111) +- The fix for issue \#192 broke service\_managed false [\#197](https://github.com/voxpupuli/puppet-redis/issues/197) +- Ubuntu 16.04 changed sentinel config filename [\#175](https://github.com/voxpupuli/puppet-redis/issues/175) +- sentinel support broken? [\#166](https://github.com/voxpupuli/puppet-redis/issues/166) +- Ownership problem with Ubuntu redis-server [\#150](https://github.com/voxpupuli/puppet-redis/issues/150) +- Parameters not valid for older Redis in config [\#111](https://github.com/voxpupuli/puppet-redis/issues/111) **Merged pull requests:** -- Update sort to specify key [\#199](https://github.com/arioch/puppet-redis/pull/199) ([petems](https://github.com/petems)) -- Adds tests for when Redis service is unmanaged [\#198](https://github.com/arioch/puppet-redis/pull/198) ([petems](https://github.com/petems)) -- Changing Travis back to Trusty [\#194](https://github.com/arioch/puppet-redis/pull/194) ([petems](https://github.com/petems)) -- Remove service notification [\#193](https://github.com/arioch/puppet-redis/pull/193) ([petems](https://github.com/petems)) -- Improves ulimit configuration [\#192](https://github.com/arioch/puppet-redis/pull/192) ([petems](https://github.com/petems)) -- Updates metadata supported versions [\#190](https://github.com/arioch/puppet-redis/pull/190) ([petems](https://github.com/petems)) -- Adds tests for Ubuntu 1404 and Trusty package [\#189](https://github.com/arioch/puppet-redis/pull/189) ([petems](https://github.com/petems)) -- Adds redis::administration class [\#188](https://github.com/arioch/puppet-redis/pull/188) ([petems](https://github.com/petems)) -- Adds logic for managing redis-sentinel package [\#187](https://github.com/arioch/puppet-redis/pull/187) ([petems](https://github.com/petems)) -- Bump to version 3.0.0 [\#186](https://github.com/arioch/puppet-redis/pull/186) ([petems](https://github.com/petems)) -- Moves location of redis-sentinel file [\#184](https://github.com/arioch/puppet-redis/pull/184) ([petems](https://github.com/petems)) -- \(testing\) Simplify command run by TravisCI [\#183](https://github.com/arioch/puppet-redis/pull/183) ([ghoneycutt](https://github.com/ghoneycutt)) -- Style [\#182](https://github.com/arioch/puppet-redis/pull/182) ([ghoneycutt](https://github.com/ghoneycutt)) -- Adds acceptance tests for the redisget\(\) function [\#181](https://github.com/arioch/puppet-redis/pull/181) ([petems](https://github.com/petems)) -- Add redisget\(\) [\#179](https://github.com/arioch/puppet-redis/pull/179) ([ghoneycutt](https://github.com/ghoneycutt)) -- Fixes ordering of Apt repos [\#178](https://github.com/arioch/puppet-redis/pull/178) ([petems](https://github.com/petems)) -- Add 2.4.10 config file for CentOS 6 [\#177](https://github.com/arioch/puppet-redis/pull/177) ([petems](https://github.com/petems)) -- Refactoring common code patterns [\#174](https://github.com/arioch/puppet-redis/pull/174) ([petems](https://github.com/petems)) -- Changes permission on /var/run/ directory [\#173](https://github.com/arioch/puppet-redis/pull/173) ([petems](https://github.com/petems)) -- Bump Beaker Ruby versions [\#172](https://github.com/arioch/puppet-redis/pull/172) ([petems](https://github.com/petems)) -- Fixes sentinel installation on Debian flavours [\#171](https://github.com/arioch/puppet-redis/pull/171) ([petems](https://github.com/petems)) -- Adds vagrant beaker images [\#170](https://github.com/arioch/puppet-redis/pull/170) ([petems](https://github.com/petems)) -- Adds acceptance test for master/slave testing [\#168](https://github.com/arioch/puppet-redis/pull/168) ([petems](https://github.com/petems)) -- Renames spec file [\#165](https://github.com/arioch/puppet-redis/pull/165) ([petems](https://github.com/petems)) -- Adds specific versions to fixtures [\#164](https://github.com/arioch/puppet-redis/pull/164) ([petems](https://github.com/petems)) -- Changes for RHEL-ish specific configuration [\#162](https://github.com/arioch/puppet-redis/pull/162) ([petems](https://github.com/petems)) -- Changes CentOS Docker images [\#160](https://github.com/arioch/puppet-redis/pull/160) ([petems](https://github.com/petems)) -- Updates fact for CentOS 6 [\#159](https://github.com/arioch/puppet-redis/pull/159) ([petems](https://github.com/petems)) -- Fixes lint arrow errors [\#158](https://github.com/arioch/puppet-redis/pull/158) ([petems](https://github.com/petems)) -- Fixes ownership issue on Ubuntu [\#157](https://github.com/arioch/puppet-redis/pull/157) ([petems](https://github.com/petems)) -- README lint [\#155](https://github.com/arioch/puppet-redis/pull/155) ([matonb](https://github.com/matonb)) -- Archlinux: Added tests and update config\_dir parameter [\#149](https://github.com/arioch/puppet-redis/pull/149) ([bartjanssens92](https://github.com/bartjanssens92)) -- Add CHANGELOG [\#148](https://github.com/arioch/puppet-redis/pull/148) ([petems](https://github.com/petems)) -- Added Archlinux as supported OS [\#147](https://github.com/arioch/puppet-redis/pull/147) ([bartjanssens92](https://github.com/bartjanssens92)) - -## [1.2.4](https://github.com/arioch/puppet-redis/tree/1.2.4) (2016-12-05) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.2.3...1.2.4) +- Update sort to specify key [\#199](https://github.com/voxpupuli/puppet-redis/pull/199) ([petems](https://github.com/petems)) +- Adds tests for when Redis service is unmanaged [\#198](https://github.com/voxpupuli/puppet-redis/pull/198) ([petems](https://github.com/petems)) +- Changing Travis back to Trusty [\#194](https://github.com/voxpupuli/puppet-redis/pull/194) ([petems](https://github.com/petems)) +- Remove service notification [\#193](https://github.com/voxpupuli/puppet-redis/pull/193) ([petems](https://github.com/petems)) +- Improves ulimit configuration [\#192](https://github.com/voxpupuli/puppet-redis/pull/192) ([petems](https://github.com/petems)) +- Updates metadata supported versions [\#190](https://github.com/voxpupuli/puppet-redis/pull/190) ([petems](https://github.com/petems)) +- Adds tests for Ubuntu 1404 and Trusty package [\#189](https://github.com/voxpupuli/puppet-redis/pull/189) ([petems](https://github.com/petems)) +- Adds redis::administration class [\#188](https://github.com/voxpupuli/puppet-redis/pull/188) ([petems](https://github.com/petems)) +- Adds logic for managing redis-sentinel package [\#187](https://github.com/voxpupuli/puppet-redis/pull/187) ([petems](https://github.com/petems)) +- Bump to version 3.0.0 [\#186](https://github.com/voxpupuli/puppet-redis/pull/186) ([petems](https://github.com/petems)) +- Moves location of redis-sentinel file [\#184](https://github.com/voxpupuli/puppet-redis/pull/184) ([petems](https://github.com/petems)) +- \(testing\) Simplify command run by TravisCI [\#183](https://github.com/voxpupuli/puppet-redis/pull/183) ([ghoneycutt](https://github.com/ghoneycutt)) +- Style [\#182](https://github.com/voxpupuli/puppet-redis/pull/182) ([ghoneycutt](https://github.com/ghoneycutt)) +- Adds acceptance tests for the redisget\(\) function [\#181](https://github.com/voxpupuli/puppet-redis/pull/181) ([petems](https://github.com/petems)) +- Add redisget\(\) [\#179](https://github.com/voxpupuli/puppet-redis/pull/179) ([ghoneycutt](https://github.com/ghoneycutt)) +- Fixes ordering of Apt repos [\#178](https://github.com/voxpupuli/puppet-redis/pull/178) ([petems](https://github.com/petems)) +- Add 2.4.10 config file for CentOS 6 [\#177](https://github.com/voxpupuli/puppet-redis/pull/177) ([petems](https://github.com/petems)) +- Refactoring common code patterns [\#174](https://github.com/voxpupuli/puppet-redis/pull/174) ([petems](https://github.com/petems)) +- Changes permission on /var/run/ directory [\#173](https://github.com/voxpupuli/puppet-redis/pull/173) ([petems](https://github.com/petems)) +- Bump Beaker Ruby versions [\#172](https://github.com/voxpupuli/puppet-redis/pull/172) ([petems](https://github.com/petems)) +- Fixes sentinel installation on Debian flavours [\#171](https://github.com/voxpupuli/puppet-redis/pull/171) ([petems](https://github.com/petems)) +- Adds vagrant beaker images [\#170](https://github.com/voxpupuli/puppet-redis/pull/170) ([petems](https://github.com/petems)) +- Adds acceptance test for master/slave testing [\#168](https://github.com/voxpupuli/puppet-redis/pull/168) ([petems](https://github.com/petems)) +- Renames spec file [\#165](https://github.com/voxpupuli/puppet-redis/pull/165) ([petems](https://github.com/petems)) +- Adds specific versions to fixtures [\#164](https://github.com/voxpupuli/puppet-redis/pull/164) ([petems](https://github.com/petems)) +- Changes for RHEL-ish specific configuration [\#162](https://github.com/voxpupuli/puppet-redis/pull/162) ([petems](https://github.com/petems)) +- Changes CentOS Docker images [\#160](https://github.com/voxpupuli/puppet-redis/pull/160) ([petems](https://github.com/petems)) +- Updates fact for CentOS 6 [\#159](https://github.com/voxpupuli/puppet-redis/pull/159) ([petems](https://github.com/petems)) +- Fixes lint arrow errors [\#158](https://github.com/voxpupuli/puppet-redis/pull/158) ([petems](https://github.com/petems)) +- README lint [\#155](https://github.com/voxpupuli/puppet-redis/pull/155) ([matonb](https://github.com/matonb)) +- Archlinux: Added tests and update config\_dir parameter [\#149](https://github.com/voxpupuli/puppet-redis/pull/149) ([bartjanssens92](https://github.com/bartjanssens92)) +- Add CHANGELOG [\#148](https://github.com/voxpupuli/puppet-redis/pull/148) ([petems](https://github.com/petems)) +- Added Archlinux as supported OS [\#147](https://github.com/voxpupuli/puppet-redis/pull/147) ([bartjanssens92](https://github.com/bartjanssens92)) + +## [1.2.4](https://github.com/voxpupuli/puppet-redis/tree/1.2.4) (2016-12-05) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.2.3...1.2.4) **Implemented enhancements:** -- Speed up Travis [\#118](https://github.com/arioch/puppet-redis/issues/118) +- Speed up Travis [\#118](https://github.com/voxpupuli/puppet-redis/issues/118) **Fixed bugs:** -- Wrong redis.conf after c45049986a7fcb1c9a0591de123c6bf97c761355 [\#142](https://github.com/arioch/puppet-redis/issues/142) -- powerstack.org - No longer hosted [\#103](https://github.com/arioch/puppet-redis/issues/103) +- Wrong redis.conf after c45049986a7fcb1c9a0591de123c6bf97c761355 [\#142](https://github.com/voxpupuli/puppet-redis/issues/142) +- powerstack.org - No longer hosted [\#103](https://github.com/voxpupuli/puppet-redis/issues/103) **Closed issues:** -- redis.conf under /etc/redis.conf [\#81](https://github.com/arioch/puppet-redis/issues/81) -- Add socket option [\#79](https://github.com/arioch/puppet-redis/issues/79) -- preinstall.pp fails on CEntOS 6.5 and Puppet Enterprise 2.7. [\#72](https://github.com/arioch/puppet-redis/issues/72) -- How do I change from powerstack.org repo if I need to? Should I just edit manifests/preinstall.pp? [\#68](https://github.com/arioch/puppet-redis/issues/68) -- puppet-redis || every time when puppet runs, the service restarts [\#59](https://github.com/arioch/puppet-redis/issues/59) -- 'manage\_repo =\> true' causes run to fail because add-apt-repository command isn't available [\#49](https://github.com/arioch/puppet-redis/issues/49) +- redis.conf under /etc/redis.conf [\#81](https://github.com/voxpupuli/puppet-redis/issues/81) +- Add socket option [\#79](https://github.com/voxpupuli/puppet-redis/issues/79) +- preinstall.pp fails on CEntOS 6.5 and Puppet Enterprise 2.7. [\#72](https://github.com/voxpupuli/puppet-redis/issues/72) +- How do I change from powerstack.org repo if I need to? Should I just edit manifests/preinstall.pp? [\#68](https://github.com/voxpupuli/puppet-redis/issues/68) +- puppet-redis || every time when puppet runs, the service restarts [\#59](https://github.com/voxpupuli/puppet-redis/issues/59) +- 'manage\_repo =\> true' causes run to fail because add-apt-repository command isn't available [\#49](https://github.com/voxpupuli/puppet-redis/issues/49) **Merged pull requests:** -- For folks that do not use redis to cache to disk [\#144](https://github.com/arioch/puppet-redis/pull/144) ([petems](https://github.com/petems)) -- Revert "Changes templates to use scope" [\#143](https://github.com/arioch/puppet-redis/pull/143) ([petems](https://github.com/petems)) -- Update sentinel.pp [\#141](https://github.com/arioch/puppet-redis/pull/141) ([xprntl](https://github.com/xprntl)) -- Manage workdir and permissions [\#138](https://github.com/arioch/puppet-redis/pull/138) ([petems](https://github.com/petems)) -- Adds minimum versions parameters [\#137](https://github.com/arioch/puppet-redis/pull/137) ([petems](https://github.com/petems)) -- Adds redis-server version fact [\#136](https://github.com/arioch/puppet-redis/pull/136) ([petems](https://github.com/petems)) -- adding /var/run/redis for Debian based hosts [\#135](https://github.com/arioch/puppet-redis/pull/135) ([petems](https://github.com/petems)) -- Refactor unit tests [\#134](https://github.com/arioch/puppet-redis/pull/134) ([petems](https://github.com/petems)) -- Update acceptance tests [\#133](https://github.com/arioch/puppet-redis/pull/133) ([petems](https://github.com/petems)) -- Move Repo for RHEL systems to EPEL [\#127](https://github.com/arioch/puppet-redis/pull/127) ([petems](https://github.com/petems)) -- Fix beaker [\#126](https://github.com/arioch/puppet-redis/pull/126) ([petems](https://github.com/petems)) -- Speed up Travis even more [\#125](https://github.com/arioch/puppet-redis/pull/125) ([petems](https://github.com/petems)) -- Add fast finish to Travis [\#124](https://github.com/arioch/puppet-redis/pull/124) ([petems](https://github.com/petems)) -- Changes package installation [\#123](https://github.com/arioch/puppet-redis/pull/123) ([petems](https://github.com/petems)) -- Fix Beaker settings [\#122](https://github.com/arioch/puppet-redis/pull/122) ([petems](https://github.com/petems)) -- Fixes gpg key for DotDeb [\#121](https://github.com/arioch/puppet-redis/pull/121) ([petems](https://github.com/petems)) -- Sent bind address rebase [\#120](https://github.com/arioch/puppet-redis/pull/120) ([petems](https://github.com/petems)) -- Changes templates to use scope [\#119](https://github.com/arioch/puppet-redis/pull/119) ([petems](https://github.com/petems)) -- Add save interval squash [\#117](https://github.com/arioch/puppet-redis/pull/117) ([petems](https://github.com/petems)) -- FreeBSD fixes [\#116](https://github.com/arioch/puppet-redis/pull/116) ([petems](https://github.com/petems)) -- Consolidate travis and testing [\#115](https://github.com/arioch/puppet-redis/pull/115) ([petems](https://github.com/petems)) -- Fix specs [\#114](https://github.com/arioch/puppet-redis/pull/114) ([Phil-Friderici](https://github.com/Phil-Friderici)) -- Changed default service\_hasstatus variable from false to true for Debian params. [\#106](https://github.com/arioch/puppet-redis/pull/106) ([mlipiec](https://github.com/mlipiec)) -- Remove single quotes around variable [\#101](https://github.com/arioch/puppet-redis/pull/101) ([rorybrowne](https://github.com/rorybrowne)) - -## [1.2.3](https://github.com/arioch/puppet-redis/tree/1.2.3) (2016-09-19) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.2.2...1.2.3) +- For folks that do not use redis to cache to disk [\#144](https://github.com/voxpupuli/puppet-redis/pull/144) ([petems](https://github.com/petems)) +- Revert "Changes templates to use scope" [\#143](https://github.com/voxpupuli/puppet-redis/pull/143) ([petems](https://github.com/petems)) +- Update sentinel.pp [\#141](https://github.com/voxpupuli/puppet-redis/pull/141) ([xprntl](https://github.com/xprntl)) +- Manage workdir and permissions [\#138](https://github.com/voxpupuli/puppet-redis/pull/138) ([petems](https://github.com/petems)) +- Adds minimum versions parameters [\#137](https://github.com/voxpupuli/puppet-redis/pull/137) ([petems](https://github.com/petems)) +- Adds redis-server version fact [\#136](https://github.com/voxpupuli/puppet-redis/pull/136) ([petems](https://github.com/petems)) +- adding /var/run/redis for Debian based hosts [\#135](https://github.com/voxpupuli/puppet-redis/pull/135) ([petems](https://github.com/petems)) +- Refactor unit tests [\#134](https://github.com/voxpupuli/puppet-redis/pull/134) ([petems](https://github.com/petems)) +- Update acceptance tests [\#133](https://github.com/voxpupuli/puppet-redis/pull/133) ([petems](https://github.com/petems)) +- Speed up Travis even more [\#125](https://github.com/voxpupuli/puppet-redis/pull/125) ([petems](https://github.com/petems)) +- Add fast finish to Travis [\#124](https://github.com/voxpupuli/puppet-redis/pull/124) ([petems](https://github.com/petems)) +- Changes package installation [\#123](https://github.com/voxpupuli/puppet-redis/pull/123) ([petems](https://github.com/petems)) +- Fix Beaker settings [\#122](https://github.com/voxpupuli/puppet-redis/pull/122) ([petems](https://github.com/petems)) +- Fixes gpg key for DotDeb [\#121](https://github.com/voxpupuli/puppet-redis/pull/121) ([petems](https://github.com/petems)) +- Sent bind address rebase [\#120](https://github.com/voxpupuli/puppet-redis/pull/120) ([petems](https://github.com/petems)) +- Changes templates to use scope [\#119](https://github.com/voxpupuli/puppet-redis/pull/119) ([petems](https://github.com/petems)) +- Add save interval squash [\#117](https://github.com/voxpupuli/puppet-redis/pull/117) ([petems](https://github.com/petems)) +- FreeBSD fixes [\#116](https://github.com/voxpupuli/puppet-redis/pull/116) ([petems](https://github.com/petems)) +- Consolidate travis and testing [\#115](https://github.com/voxpupuli/puppet-redis/pull/115) ([petems](https://github.com/petems)) +- Fix specs [\#114](https://github.com/voxpupuli/puppet-redis/pull/114) ([Phil-Friderici](https://github.com/Phil-Friderici)) +- Remove single quotes around variable [\#101](https://github.com/voxpupuli/puppet-redis/pull/101) ([rorybrowne](https://github.com/rorybrowne)) + +## [1.2.3](https://github.com/voxpupuli/puppet-redis/tree/1.2.3) (2016-09-19) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.2.2...1.2.3) **Merged pull requests:** -- Adds spec for unixsocket and perms [\#99](https://github.com/arioch/puppet-redis/pull/99) ([petems](https://github.com/petems)) -- Added the ability to configure Unix socket binding [\#97](https://github.com/arioch/puppet-redis/pull/97) ([gcelestine](https://github.com/gcelestine)) -- Lint and English cleanup in the redis.conf [\#93](https://github.com/arioch/puppet-redis/pull/93) ([ryayon](https://github.com/ryayon)) -- Added more configuration options to redis.conf [\#90](https://github.com/arioch/puppet-redis/pull/90) ([hanej](https://github.com/hanej)) -- Make notification of service optional [\#89](https://github.com/arioch/puppet-redis/pull/89) ([michaeltchapman](https://github.com/michaeltchapman)) +- Adds spec for unixsocket and perms [\#99](https://github.com/voxpupuli/puppet-redis/pull/99) ([petems](https://github.com/petems)) +- Added the ability to configure Unix socket binding [\#97](https://github.com/voxpupuli/puppet-redis/pull/97) ([gcelestine](https://github.com/gcelestine)) +- Lint and English cleanup in the redis.conf [\#93](https://github.com/voxpupuli/puppet-redis/pull/93) ([ryayon](https://github.com/ryayon)) +- Added more configuration options to redis.conf [\#90](https://github.com/voxpupuli/puppet-redis/pull/90) ([hanej](https://github.com/hanej)) +- Make notification of service optional [\#89](https://github.com/voxpupuli/puppet-redis/pull/89) ([michaeltchapman](https://github.com/michaeltchapman)) -## [1.2.2](https://github.com/arioch/puppet-redis/tree/1.2.2) (2016-03-17) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.2.1...1.2.2) +## [1.2.2](https://github.com/voxpupuli/puppet-redis/tree/1.2.2) (2016-03-17) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.2.1...1.2.2) **Closed issues:** -- Error 400 on SERVER: Puppet::Parser::AST::Resource failed with error ArgumentError: Invalid resource type redis [\#86](https://github.com/arioch/puppet-redis/issues/86) -- Potential bug: `64min` should read `64mb` [\#73](https://github.com/arioch/puppet-redis/issues/73) -- Typo on sentinel.pp on if defined [\#69](https://github.com/arioch/puppet-redis/issues/69) -- Does't configure EPEL repository on CentOS 7 [\#61](https://github.com/arioch/puppet-redis/issues/61) +- Error 400 on SERVER: Puppet::Parser::AST::Resource failed with error ArgumentError: Invalid resource type redis [\#86](https://github.com/voxpupuli/puppet-redis/issues/86) +- Potential bug: `64min` should read `64mb` [\#73](https://github.com/voxpupuli/puppet-redis/issues/73) +- Typo on sentinel.pp on if defined [\#69](https://github.com/voxpupuli/puppet-redis/issues/69) +- Does't configure EPEL repository on CentOS 7 [\#61](https://github.com/voxpupuli/puppet-redis/issues/61) + +## [1.2.1](https://github.com/voxpupuli/puppet-redis/tree/1.2.1) (2015-12-09) -## [1.2.1](https://github.com/arioch/puppet-redis/tree/1.2.1) (2015-12-09) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.2.0...1.2.1) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.2.0...1.2.1) **Merged pull requests:** -- Fix puppet-redis for Redis \< 3 [\#77](https://github.com/arioch/puppet-redis/pull/77) ([EmilienM](https://github.com/EmilienM)) +- Fix puppet-redis for Redis \< 3 [\#77](https://github.com/voxpupuli/puppet-redis/pull/77) ([EmilienM](https://github.com/EmilienM)) + +## [1.2.0](https://github.com/voxpupuli/puppet-redis/tree/1.2.0) (2015-12-03) -## [1.2.0](https://github.com/arioch/puppet-redis/tree/1.2.0) (2015-12-03) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.1.3...1.2.0) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.1.3...1.2.0) **Closed issues:** -- $daemonize is defaulted to 'false' on Redhat OS, which causes service command to hang [\#64](https://github.com/arioch/puppet-redis/issues/64) -- Redis service start fail on Debian stable \(Wheezy 7.0\) [\#52](https://github.com/arioch/puppet-redis/issues/52) +- $daemonize is defaulted to 'false' on Redhat OS, which causes service command to hang [\#64](https://github.com/voxpupuli/puppet-redis/issues/64) +- Redis service start fail on Debian stable \(Wheezy 7.0\) [\#52](https://github.com/voxpupuli/puppet-redis/issues/52) **Merged pull requests:** -- Redis Cluster 3.0 Feature [\#71](https://github.com/arioch/puppet-redis/pull/71) ([claudio-walser](https://github.com/claudio-walser)) -- Fix Support for EL7 and Puppet 4 [\#66](https://github.com/arioch/puppet-redis/pull/66) ([trlinkin](https://github.com/trlinkin)) -- Add a option to override the service provider [\#63](https://github.com/arioch/puppet-redis/pull/63) ([nerzhul](https://github.com/nerzhul)) -- add support for hz option [\#50](https://github.com/arioch/puppet-redis/pull/50) ([nerzhul](https://github.com/nerzhul)) +- Redis Cluster 3.0 Feature [\#71](https://github.com/voxpupuli/puppet-redis/pull/71) ([claudio-walser](https://github.com/claudio-walser)) +- Fix Support for EL7 and Puppet 4 [\#66](https://github.com/voxpupuli/puppet-redis/pull/66) ([trlinkin](https://github.com/trlinkin)) +- Add a option to override the service provider [\#63](https://github.com/voxpupuli/puppet-redis/pull/63) ([nerzhul](https://github.com/nerzhul)) +- add support for hz option [\#50](https://github.com/voxpupuli/puppet-redis/pull/50) ([nerzhul](https://github.com/nerzhul)) -## [1.1.3](https://github.com/arioch/puppet-redis/tree/1.1.3) (2015-08-19) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.1.2...1.1.3) +## [1.1.3](https://github.com/voxpupuli/puppet-redis/tree/1.1.3) (2015-08-19) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.1.2...1.1.3) **Closed issues:** -- Unable to bring in as a dependency [\#47](https://github.com/arioch/puppet-redis/issues/47) +- Unable to bring in as a dependency [\#47](https://github.com/voxpupuli/puppet-redis/issues/47) **Merged pull requests:** -- Bump puppetlabs-stdlib version spec [\#48](https://github.com/arioch/puppet-redis/pull/48) ([gblair](https://github.com/gblair)) +- Bump puppetlabs-stdlib version spec [\#48](https://github.com/voxpupuli/puppet-redis/pull/48) ([gblair](https://github.com/gblair)) + +## [1.1.2](https://github.com/voxpupuli/puppet-redis/tree/1.1.2) (2015-08-06) -## [1.1.2](https://github.com/arioch/puppet-redis/tree/1.1.2) (2015-08-06) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.1.1...1.1.2) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.1.1...1.1.2) **Closed issues:** -- Error on CentOS 7 when manage\_repo: true [\#44](https://github.com/arioch/puppet-redis/issues/44) +- Error on CentOS 7 when manage\_repo: true [\#44](https://github.com/voxpupuli/puppet-redis/issues/44) -## [1.1.1](https://github.com/arioch/puppet-redis/tree/1.1.1) (2015-08-04) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.1.0...1.1.1) +## [1.1.1](https://github.com/voxpupuli/puppet-redis/tree/1.1.1) (2015-08-04) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.1.0...1.1.1) **Closed issues:** -- Commit edd7cb55931fe0336bfee475c738ac4b91308f98 seems to be pasting undef params into redis.conf [\#41](https://github.com/arioch/puppet-redis/issues/41) +- Commit edd7cb55931fe0336bfee475c738ac4b91308f98 seems to be pasting undef params into redis.conf [\#41](https://github.com/voxpupuli/puppet-redis/issues/41) **Merged pull requests:** -- Save db to disk [\#46](https://github.com/arioch/puppet-redis/pull/46) ([adrian-balcan-ygt](https://github.com/adrian-balcan-ygt)) -- Use puppetlabs\_spec\_helper for testing [\#45](https://github.com/arioch/puppet-redis/pull/45) ([jlyheden](https://github.com/jlyheden)) -- copy variables used in template to local scope [\#42](https://github.com/arioch/puppet-redis/pull/42) ([eoly](https://github.com/eoly)) +- Save db to disk [\#46](https://github.com/voxpupuli/puppet-redis/pull/46) ([adrian-balcan-ygt](https://github.com/adrian-balcan-ygt)) +- Use puppetlabs\_spec\_helper for testing [\#45](https://github.com/voxpupuli/puppet-redis/pull/45) ([jlyheden](https://github.com/jlyheden)) +- copy variables used in template to local scope [\#42](https://github.com/voxpupuli/puppet-redis/pull/42) ([eoly](https://github.com/eoly)) + +## [1.1.0](https://github.com/voxpupuli/puppet-redis/tree/1.1.0) (2015-06-22) -## [1.1.0](https://github.com/arioch/puppet-redis/tree/1.1.0) (2015-06-22) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.0.7...1.1.0) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.0.7...1.1.0) **Merged pull requests:** -- access out-of-scope variables via the scope.lookupvar method [\#40](https://github.com/arioch/puppet-redis/pull/40) ([eoly](https://github.com/eoly)) +- access out-of-scope variables via the scope.lookupvar method [\#40](https://github.com/voxpupuli/puppet-redis/pull/40) ([eoly](https://github.com/eoly)) -## [1.0.7](https://github.com/arioch/puppet-redis/tree/1.0.7) (2015-06-02) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.0.6...1.0.7) +## [1.0.7](https://github.com/voxpupuli/puppet-redis/tree/1.0.7) (2015-06-02) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.0.6...1.0.7) **Fixed bugs:** -- Sentinel init/upstart file doesn't exist [\#18](https://github.com/arioch/puppet-redis/issues/18) +- Sentinel init/upstart file doesn't exist [\#18](https://github.com/voxpupuli/puppet-redis/issues/18) **Closed issues:** -- Default config\_owner of redis [\#39](https://github.com/arioch/puppet-redis/issues/39) -- Duplicate decleration Package\[redis\] when both include redis and redis::sentinel [\#36](https://github.com/arioch/puppet-redis/issues/36) -- Does not work with Debian 7.8 with redis version 2.4.14-1 [\#24](https://github.com/arioch/puppet-redis/issues/24) +- Default config\_owner of redis [\#39](https://github.com/voxpupuli/puppet-redis/issues/39) +- Duplicate decleration Package\[redis\] when both include redis and redis::sentinel [\#36](https://github.com/voxpupuli/puppet-redis/issues/36) +- Does not work with Debian 7.8 with redis version 2.4.14-1 [\#24](https://github.com/voxpupuli/puppet-redis/issues/24) **Merged pull requests:** -- Fixed duplicate declaration of package [\#38](https://github.com/arioch/puppet-redis/pull/38) ([raiblue](https://github.com/raiblue)) -- fix issue with params.pp with strict\_variables enabled [\#35](https://github.com/arioch/puppet-redis/pull/35) ([eoly](https://github.com/eoly)) -- Enable to not manage the Redis service [\#34](https://github.com/arioch/puppet-redis/pull/34) ([Spredzy](https://github.com/Spredzy)) -- Suse conf file fix [\#33](https://github.com/arioch/puppet-redis/pull/33) ([christofhaerens](https://github.com/christofhaerens)) -- added Suse osfamily [\#32](https://github.com/arioch/puppet-redis/pull/32) ([christofhaerens](https://github.com/christofhaerens)) +- Fixed duplicate declaration of package [\#38](https://github.com/voxpupuli/puppet-redis/pull/38) ([raiblue](https://github.com/raiblue)) +- fix issue with params.pp with strict\_variables enabled [\#35](https://github.com/voxpupuli/puppet-redis/pull/35) ([eoly](https://github.com/eoly)) +- Enable to not manage the Redis service [\#34](https://github.com/voxpupuli/puppet-redis/pull/34) ([Spredzy](https://github.com/Spredzy)) +- Suse conf file fix [\#33](https://github.com/voxpupuli/puppet-redis/pull/33) ([christofhaerens](https://github.com/christofhaerens)) +- added Suse osfamily [\#32](https://github.com/voxpupuli/puppet-redis/pull/32) ([christofhaerens](https://github.com/christofhaerens)) -## [1.0.6](https://github.com/arioch/puppet-redis/tree/1.0.6) (2015-05-05) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.0.5...1.0.6) +## [1.0.6](https://github.com/voxpupuli/puppet-redis/tree/1.0.6) (2015-05-05) -## [1.0.5](https://github.com/arioch/puppet-redis/tree/1.0.5) (2015-03-30) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.0.4...1.0.5) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.0.5...1.0.6) -**Merged pull requests:** +## [1.0.5](https://github.com/voxpupuli/puppet-redis/tree/1.0.5) (2015-03-30) -- adding anchors to make the ensure\_resource behaive [\#30](https://github.com/arioch/puppet-redis/pull/30) ([shawn-sterling](https://github.com/shawn-sterling)) -- Sentinel auth pass [\#29](https://github.com/arioch/puppet-redis/pull/29) ([Vincent--](https://github.com/Vincent--)) -- More specific include and class chaining. [\#27](https://github.com/arioch/puppet-redis/pull/27) ([systemsathomesdotcom](https://github.com/systemsathomesdotcom)) -- Support notification script [\#26](https://github.com/arioch/puppet-redis/pull/26) ([Spredzy](https://github.com/Spredzy)) -- Add basic FreeBSD support [\#25](https://github.com/arioch/puppet-redis/pull/25) ([arioch](https://github.com/arioch)) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.0.4...1.0.5) -## [1.0.4](https://github.com/arioch/puppet-redis/tree/1.0.4) (2015-01-19) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.0.3...1.0.4) +## [1.0.4](https://github.com/voxpupuli/puppet-redis/tree/1.0.4) (2015-01-19) -**Merged pull requests:** +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.0.3...1.0.4) -- Add an initscript for sentinel on apt-based systems [\#23](https://github.com/arioch/puppet-redis/pull/23) ([cdent](https://github.com/cdent)) -- fix trailing comma in metadata.json [\#22](https://github.com/arioch/puppet-redis/pull/22) ([fivetanley](https://github.com/fivetanley)) -- metadata: add stdlib as a requirement [\#21](https://github.com/arioch/puppet-redis/pull/21) ([EmilienM](https://github.com/EmilienM)) -- Avoid resource dependency when having both Redis Server & Sentinel [\#20](https://github.com/arioch/puppet-redis/pull/20) ([EmilienM](https://github.com/EmilienM)) -- Make config file owner dependency on redis package explicit [\#17](https://github.com/arioch/puppet-redis/pull/17) ([cdent](https://github.com/cdent)) -- Make sentinel maninfest ensure the redis package [\#16](https://github.com/arioch/puppet-redis/pull/16) ([cdent](https://github.com/cdent)) -- Ensure the permissions of a new redis-sentinel.conf [\#15](https://github.com/arioch/puppet-redis/pull/15) ([cdent](https://github.com/cdent)) +## [1.0.3](https://github.com/voxpupuli/puppet-redis/tree/1.0.3) (2015-01-05) -## [1.0.3](https://github.com/arioch/puppet-redis/tree/1.0.3) (2015-01-05) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.0.2...1.0.3) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.0.2...1.0.3) **Implemented enhancements:** -- Feature Request: support for redis-sentinel [\#13](https://github.com/arioch/puppet-redis/issues/13) +- Feature Request: support for redis-sentinel [\#13](https://github.com/voxpupuli/puppet-redis/issues/13) -## [1.0.2](https://github.com/arioch/puppet-redis/tree/1.0.2) (2014-12-17) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.0.1...1.0.2) +## [1.0.2](https://github.com/voxpupuli/puppet-redis/tree/1.0.2) (2014-12-17) -**Merged pull requests:** +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.0.1...1.0.2) -- Sentinel support [\#14](https://github.com/arioch/puppet-redis/pull/14) ([cdent](https://github.com/cdent)) +## [1.0.1](https://github.com/voxpupuli/puppet-redis/tree/1.0.1) (2014-10-22) -## [1.0.1](https://github.com/arioch/puppet-redis/tree/1.0.1) (2014-10-22) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/1.0.0...1.0.1) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/1.0.0...1.0.1) -## [1.0.0](https://github.com/arioch/puppet-redis/tree/1.0.0) (2014-10-22) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/0.0.10...1.0.0) +## [1.0.0](https://github.com/voxpupuli/puppet-redis/tree/1.0.0) (2014-10-22) -## [0.0.10](https://github.com/arioch/puppet-redis/tree/0.0.10) (2014-08-29) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/0.0.9...0.0.10) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/0.0.10...1.0.0) -## [0.0.9](https://github.com/arioch/puppet-redis/tree/0.0.9) (2014-08-29) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/0.0.8...0.0.9) +## [0.0.10](https://github.com/voxpupuli/puppet-redis/tree/0.0.10) (2014-08-29) -**Closed issues:** +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/0.0.9...0.0.10) -- Amazon AMI Installation/Initialization Issue [\#11](https://github.com/arioch/puppet-redis/issues/11) +## [0.0.9](https://github.com/voxpupuli/puppet-redis/tree/0.0.9) (2014-08-29) -**Merged pull requests:** +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/0.0.8...0.0.9) -- Add Amazon support [\#12](https://github.com/arioch/puppet-redis/pull/12) ([arioch](https://github.com/arioch)) -- Notify service [\#10](https://github.com/arioch/puppet-redis/pull/10) ([zebzeb](https://github.com/zebzeb)) +**Closed issues:** -## [0.0.8](https://github.com/arioch/puppet-redis/tree/0.0.8) (2014-03-27) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/0.0.7...0.0.8) +- Amazon AMI Installation/Initialization Issue [\#11](https://github.com/voxpupuli/puppet-redis/issues/11) -**Closed issues:** +## [0.0.8](https://github.com/voxpupuli/puppet-redis/tree/0.0.8) (2014-03-27) -- Default bind on public interface [\#6](https://github.com/arioch/puppet-redis/issues/6) -- Install a specific version [\#4](https://github.com/arioch/puppet-redis/issues/4) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/0.0.7...0.0.8) -**Merged pull requests:** +**Closed issues:** -- Syslog [\#9](https://github.com/arioch/puppet-redis/pull/9) ([zebzeb](https://github.com/zebzeb)) -- add extra config file parameter [\#8](https://github.com/arioch/puppet-redis/pull/8) ([zebzeb](https://github.com/zebzeb)) -- pass conf template as a parameter [\#7](https://github.com/arioch/puppet-redis/pull/7) ([zebzeb](https://github.com/zebzeb)) +- Default bind on public interface [\#6](https://github.com/voxpupuli/puppet-redis/issues/6) +- Install a specific version [\#4](https://github.com/voxpupuli/puppet-redis/issues/4) -## [0.0.7](https://github.com/arioch/puppet-redis/tree/0.0.7) (2014-01-13) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/0.0.6...0.0.7) +## [0.0.7](https://github.com/voxpupuli/puppet-redis/tree/0.0.7) (2014-01-13) -**Merged pull requests:** +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/0.0.6...0.0.7) -- bugfix: Added puppetlabs/apt depedency and missing class include [\#5](https://github.com/arioch/puppet-redis/pull/5) ([dgolja](https://github.com/dgolja)) +## [0.0.6](https://github.com/voxpupuli/puppet-redis/tree/0.0.6) (2013-08-07) -## [0.0.6](https://github.com/arioch/puppet-redis/tree/0.0.6) (2013-08-07) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/0.0.5...0.0.6) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/0.0.5...0.0.6) **Merged pull requests:** -- add ubuntu ppa repo support [\#3](https://github.com/arioch/puppet-redis/pull/3) ([nagas](https://github.com/nagas)) +- add ubuntu ppa repo support [\#3](https://github.com/voxpupuli/puppet-redis/pull/3) ([nagas](https://github.com/nagas)) + +## [0.0.5](https://github.com/voxpupuli/puppet-redis/tree/0.0.5) (2013-07-22) -## [0.0.5](https://github.com/arioch/puppet-redis/tree/0.0.5) (2013-07-22) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/0.0.4...0.0.5) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/0.0.4...0.0.5) **Closed issues:** -- Redis config 2.6 not compatible with EPELs 2.4 on Centos6 [\#2](https://github.com/arioch/puppet-redis/issues/2) +- Redis config 2.6 not compatible with EPELs 2.4 on Centos6 [\#2](https://github.com/voxpupuli/puppet-redis/issues/2) -## [0.0.4](https://github.com/arioch/puppet-redis/tree/0.0.4) (2013-07-17) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/0.0.3...0.0.4) +## [0.0.4](https://github.com/voxpupuli/puppet-redis/tree/0.0.4) (2013-07-17) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/0.0.3...0.0.4) **Closed issues:** -- Default config path wrong for RedHat [\#1](https://github.com/arioch/puppet-redis/issues/1) +- Default config path wrong for RedHat [\#1](https://github.com/voxpupuli/puppet-redis/issues/1) + +## [0.0.3](https://github.com/voxpupuli/puppet-redis/tree/0.0.3) (2013-07-08) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/0.0.2...0.0.3) + +## [0.0.2](https://github.com/voxpupuli/puppet-redis/tree/0.0.2) (2013-06-19) + +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/0.0.1...0.0.2) -## [0.0.3](https://github.com/arioch/puppet-redis/tree/0.0.3) (2013-07-08) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/0.0.2...0.0.3) +## [0.0.1](https://github.com/voxpupuli/puppet-redis/tree/0.0.1) (2013-06-19) -## [0.0.2](https://github.com/arioch/puppet-redis/tree/0.0.2) (2013-06-19) -[Full Changelog](https://github.com/arioch/puppet-redis/compare/0.0.1...0.0.2) +[Full Changelog](https://github.com/voxpupuli/puppet-redis/compare/9eeef29abac112e9f44aa2d2c0ed6ea1f2617888...0.0.1) -## [0.0.1](https://github.com/arioch/puppet-redis/tree/0.0.1) (2013-06-19) -\* *This Change Log was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)* \ No newline at end of file +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..6fd6342 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,21 @@ +FROM ruby:2.5.3 + +WORKDIR /opt/puppet + +# https://github.com/puppetlabs/puppet/blob/06ad255754a38f22fb3a22c7c4f1e2ce453d01cb/lib/puppet/provider/service/runit.rb#L39 +RUN mkdir -p /etc/sv + +ARG PUPPET_VERSION="~> 6.0" +ARG PARALLEL_TEST_PROCESSORS=4 + +# Cache gems +COPY Gemfile . +RUN bundle install --without system_tests development release --path=${BUNDLE_PATH:-vendor/bundle} + +COPY . . + +RUN bundle install +RUN bundle exec rake release_checks + +# Container should not saved +RUN exit 1 diff --git a/Gemfile b/Gemfile index 47f2821..187a851 100644 --- a/Gemfile +++ b/Gemfile @@ -1,57 +1,70 @@ source ENV['GEM_SOURCE'] || "https://rubygems.org" def location_for(place, fake_version = nil) if place =~ /^(git[:@][^#]*)#(.*)/ [fake_version, { :git => $1, :branch => $2, :require => false }].compact elsif place =~ /^file:\/\/(.*)/ ['>= 0', { :path => File.expand_path($1), :require => false }] else [place, { :require => false }] end end group :test do - gem 'puppetlabs_spec_helper', '~> 1.2.2', :require => false - gem 'rspec-puppet', :require => false, :git => 'https://github.com/rodjek/rspec-puppet.git' - gem 'rspec-puppet-facts', :require => false - gem 'rspec-puppet-utils', :require => false - gem 'puppet-lint-absolute_classname-check', :require => false - gem 'puppet-lint-leading_zero-check', :require => false - gem 'puppet-lint-trailing_comma-check', :require => false - gem 'puppet-lint-version_comparison-check', :require => false - gem 'puppet-lint-classes_and_types_beginning_with_digits-check', :require => false - gem 'puppet-lint-unquoted_string-check', :require => false - gem 'puppet-lint-variable_contains_upcase', :require => false - gem 'metadata-json-lint', :require => false - gem 'puppet-strings', '1.1.0', :require => false - gem 'puppet_facts', :require => false - gem 'rubocop-rspec', '~> 1.6', :require => false if RUBY_VERSION >= '2.3.0' - gem 'json_pure', '<= 2.0.1', :require => false if RUBY_VERSION < '2.0.0' - gem 'safe_yaml', '~> 1.0.4', :require => false - gem 'puppet-syntax', :require => false, git: 'https://github.com/gds-operations/puppet-syntax.git' - gem 'pry', :require => false - gem 'rb-readline', :require => false - gem 'redis', '3.3.3', :require => false - gem 'mock_redis', :require => false - gem 'rack', '1.6.8', :require => false - gem 'simp-rake-helpers', '3.6.0', :require => false + gem 'voxpupuli-test', '>= 1.0.0', :require => false + gem 'coveralls', :require => false + gem 'simplecov-console', :require => false + gem 'redis', :require => false + gem 'mock_redis', :require => false end group :development do - gem 'puppet-blacksmith' - gem 'github_changelog_generator', '1.13.2' + gem 'travis', :require => false + gem 'travis-lint', :require => false + gem 'guard-rake', :require => false + gem 'overcommit', '>= 0.39.1', :require => false end group :system_tests do - gem "beaker" - gem "beaker-rspec" - gem 'beaker-puppet_install_helper', :require => false - gem 'beaker-module_install_helper' - gem 'vagrant-wrapper' - gem 'simp-beaker-helpers', :git => 'https://github.com/petems/rubygem-simp-beaker-helpers' + gem 'winrm', :require => false + if beaker_version = ENV['BEAKER_VERSION'] + gem 'beaker', *location_for(beaker_version) + else + gem 'beaker', '>= 4.2.0', :require => false + end + if beaker_rspec_version = ENV['BEAKER_RSPEC_VERSION'] + gem 'beaker-rspec', *location_for(beaker_rspec_version) + else + gem 'beaker-rspec', :require => false + end + gem 'serverspec', :require => false + gem 'beaker-hostgenerator', '>= 1.1.22', :require => false + gem 'beaker-docker', :require => false + gem 'beaker-puppet', :require => false + gem 'beaker-puppet_install_helper', :require => false + gem 'beaker-module_install_helper', :require => false + gem 'rbnacl', '>= 4', :require => false + gem 'rbnacl-libsodium', :require => false + gem 'bcrypt_pbkdf', :require => false + gem 'ed25519', :require => false +end + +group :release do + gem 'github_changelog_generator', :require => false, :git => 'https://github.com/voxpupuli/github-changelog-generator', :branch => 'voxpupuli_essential_fixes' + gem 'puppet-blacksmith', :require => false + gem 'voxpupuli-release', :require => false + gem 'puppet-strings', '>= 2.2', :require => false +end + + + +if facterversion = ENV['FACTER_GEM_VERSION'] + gem 'facter', facterversion.to_s, :require => false, :groups => [:test] +else + gem 'facter', :require => false, :groups => [:test] end -ENV['PUPPET_GEM_VERSION'].nil? ? puppetversion = '~> 4.0' : puppetversion = ENV['PUPPET_GEM_VERSION'].to_s +ENV['PUPPET_VERSION'].nil? ? puppetversion = '~> 6.0' : puppetversion = ENV['PUPPET_VERSION'].to_s gem 'puppet', puppetversion, :require => false, :groups => [:test] # vim: syntax=ruby diff --git a/LICENSE b/LICENSE index e525903..dce386a 100644 --- a/LICENSE +++ b/LICENSE @@ -1,15 +1,204 @@ Copyright (C) 2012 Tom De Vylder Copyright (C) 2017 Garrett Honeycutt + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/README.md b/README.md index 971ad27..8762cda 100644 --- a/README.md +++ b/README.md @@ -1,160 +1,147 @@ # Puppet Redis -## Build status - -[![Build Status](https://travis-ci.org/arioch/puppet-redis.png?branch=master)](https://travis-ci.org/arioch/puppet-redis) +[![License](https://img.shields.io/github/license/voxpupuli/puppet-redis.svg)](https://github.com/voxpupuli/puppet-redis/blob/master/LICENSE) +[![Build Status](https://travis-ci.org/voxpupuli/puppet-redis.png?branch=master)](https://travis-ci.org/voxpupuli/puppet-redis) +[![Code Coverage](https://coveralls.io/repos/github/voxpupuli/puppet-redis/badge.svg?branch=master)](https://coveralls.io/github/voxpupuli/puppet-redis) +[![Puppet Forge](https://img.shields.io/puppetforge/v/puppet/redis.svg)](https://forge.puppetlabs.com/puppet/redis) +[![Puppet Forge - downloads](https://img.shields.io/puppetforge/dt/puppet/redis.svg)](https://forge.puppetlabs.com/puppet/redis) +[![Puppet Forge - endorsement](https://img.shields.io/puppetforge/e/puppet/redis.svg)](https://forge.puppetlabs.com/puppet/redis) +[![Puppet Forge - scores](https://img.shields.io/puppetforge/f/puppet/redis.svg)](https://forge.puppetlabs.com/puppet/redis) ## Example usage ### Standalone ```puppet include ::redis ``` ### Master node ```puppet class { '::redis': bind => '10.0.1.1', } ``` With authentication ```puppet class { '::redis': bind => '10.0.1.1', masterauth => 'secret', } ``` ### Slave node ```puppet class { '::redis': bind => '10.0.1.2', slaveof => '10.0.1.1 6379', } ``` With authentication ```puppet class { '::redis': bind => '10.0.1.2', slaveof => '10.0.1.1 6379', masterauth => 'secret', } ``` ### Redis 3.0 Clustering ```puppet class { '::redis': bind => '10.0.1.2', appendonly => true, cluster_enabled => true, cluster_config_file => 'nodes.conf', cluster_node_timeout => 5000, } ``` +### Multiple instances + + +```puppet +$listening_ports = [6379,6380,6381,6382] + +class { '::redis': + default_install => false, + service_enable => false, + service_ensure => 'stopped', +} + +$listening_ports.each |$port| { + $port_string = sprintf('%d',$port) + redis::instance { $port_string: + service_enable => true, + service_ensure => 'running', + port => $port, + bind => $facts['networking']['ip'], + dbfilename => "${port}-dump.rdb", + appendfilename => "${port}-appendonly.aof", + appendfsync => 'always', + require => Class['Redis'], + } +} +``` + ### Manage repositories Disabled by default but if you really want the module to manage the required repositories you can use this snippet: ```puppet class { '::redis': manage_repo => true, } ``` On Ubuntu, "chris-lea/redis-server" ppa repo will be added. You can change it by using ppa_repo parameter: ```puppet class { '::redis': manage_repo => true, ppa_repo => 'ppa:rwky/redis', } ``` +**Warning** note that it requires [puppetlabs/apt](https://forge.puppet.com/puppetlabs/apt) on Debian or Ubuntu distros. On Red Hat [puppet/epel](https://forge.puppet.com/puppet/epel) is needed unless the installation is using Software Collections. In that case will install `centos-release-scl-rh` from CentOS extras. For RHEL or other RHEL-derivatives this isn't managed. + ### Redis Sentinel Optionally install and configuration a redis-sentinel server. With default settings: ```puppet include ::redis::sentinel ``` With adjustments: ```puppet class { '::redis::sentinel': master_name => 'cow', redis_host => '192.168.1.5', failover_timeout => 30000, } ``` -## `redisget()` function +### Soft dependency -`redisget()` takes two or three arguments that are strings. The first is the key -to be looked up, the second is the URL to the Redis service and the -optional third argument is a default value to use if the key is not -found or connection to the Redis service cannot be made. +This module requires [camptocamp/systemd](https://forge.puppet.com/camptocamp/systemd) on Puppet versions older than 6.1.0. -Example of basic usage. +When managing the repo, it either needs [puppetlabs/apt](https://forge.puppet.com/puppetlabs/apt) or [puppet/epel](https://forge.puppet.com/puppet/epel). -```puppet -$version = redisget('version.myapp', 'redis://redis.example.com:6379') -``` - -Example with default value specified. This is useful to allow for cached -data in case Redis is not available. - -```puppet -$version = redisget('version.myapp', 'redis://redis.example.com:6379', $::myapp_version) -``` +## `redis::get()` function +This function is used to get data from redis. You must have the 'redis' gem installed on your puppet master. -## Unit testing - -Plain RSpec: - - $ rake spec - -Using bundle: - - $ bundle exec rake spec - -Test against a specific Puppet or Facter version: - - $ PUPPET_VERSION=3.2.1 bundle update && bundle exec rake spec - $ PUPPET_VERSION=4.10.0 bundle update && bundle exec rake spec - $ FACTER_VERSION=1.6.8 bundle update && bundle exec rake spec - -## Puppet 3 Support - -Puppet 3 is EOL as-of January 2017. The last release of this module that will -support Puppet 3.X and earlier will be the 3.X.X module releases. - -Module versions from 4.X.X onwards will use Puppet 4 only features and will not work with -earlier versions. - -We would recommend upgrading your Puppet agent to the latest release, as Puppet 4 comes with a load of awesome new features. - -If you're stuck with older Puppet, you could also fork the module from 3.0.0 and use your fork as a Puppet 3 supported version. - -## Contributing - -* Fork it -* Create a feature branch (`git checkout -b my-new-feature`) -* Run rspec tests (`bundle exec rake spec`) -* Commit your changes (`git commit -am 'Added some feature'`) -* Push to the branch (`git push origin my-new-feature`) -* Create new Pull Request +Functions are documented in [REFERENCE.md](REFERENCE.md) diff --git a/REFERENCE.md b/REFERENCE.md new file mode 100644 index 0000000..2ef6df2 --- /dev/null +++ b/REFERENCE.md @@ -0,0 +1,2090 @@ +# Reference + + +## Table of Contents + +**Classes** + +_Public Classes_ + +* [`redis`](#redis): This class installs redis +* [`redis::administration`](#redisadministration): Allows various adminstrative settings for Redis As documented in the FAQ and https://redis.io/topics/admin +* [`redis::globals`](#redisglobals): Set a global config for Redis +* [`redis::sentinel`](#redissentinel): Install redis-sentinel + +_Private Classes_ + +* `redis::config`: This class provides configuration for Redis. +* `redis::install`: This class installs the application. +* `redis::params`: This class provides a number of parameters. +* `redis::preinstall`: Provides anything required by the install class, such as package +repositories. +* `redis::service`: This class manages the Redis daemon. +* `redis::ulimit`: Redis class for configuring ulimit Used to DRY up the config class, and move the logic for ulimit changes all into one place. Parameters are + +**Defined types** + +* [`redis::instance`](#redisinstance): Allows the configuration of multiple redis configurations on one machine + +**Functions** + +* [`redis::get`](#redisget): Returns the value of the key being looked up or `undef` if the key does not exist. Takes two arguments with an optional third. The first bein + +**Data types** + +* [`Redis::LogLevel`](#redisloglevel): Specify the server verbosity level. +* [`Redis::RedisUrl`](#redisredisurl): + +**Tasks** + +* [`redis_cli`](#redis_cli): Executes a redis-cli command on the target system + +## Classes + +### redis + +This class installs redis + +#### Examples + +##### Default install + +```puppet +include redis +``` + +##### Slave Node + +```puppet +class { '::redis': + bind => '10.0.1.2', + slaveof => '10.0.1.1 6379', +} +``` + +##### Binding on multiple interfaces + +```puppet +class { 'redis': + bind => ['127.0.0.1', '10.0.0.1', '10.1.0.1'], +} +``` + +##### Binding on all interfaces + +```puppet +class { 'redis': + bind => [], +} +``` + +#### Parameters + +The following parameters are available in the `redis` class. + +##### `activerehashing` + +Data type: `Boolean` + +Enable/disable active rehashing. + +Default value: `true` + +##### `aof_load_truncated` + +Data type: `Boolean` + +Enable/disable loading truncated AOF file + +Default value: `true` + +##### `aof_rewrite_incremental_fsync` + +Data type: `Boolean` + +Enable/disable fsync for AOF file + +Default value: `true` + +##### `appendfilename` + +Data type: `String[1]` + +The name of the append only file + +Default value: 'appendonly.aof' + +##### `appendfsync` + +Data type: `Enum['no', 'always', 'everysec']` + +Adjust fsync mode + +Default value: 'everysec' + +##### `appendonly` + +Data type: `Boolean` + +Enable/disable appendonly mode. + +Default value: `false` + +##### `auto_aof_rewrite_min_size` + +Data type: `String[1]` + +Adjust minimum size for auto-aof-rewrite. + +Default value: '64mb' + +##### `auto_aof_rewrite_percentage` + +Data type: `Integer[0]` + +Adjust percentatge for auto-aof-rewrite. + +Default value: 100 + +##### `bind` + +Data type: `Variant[Stdlib::IP::Address, Array[Stdlib::IP::Address]]` + +Configure which IP address(es) to listen on. To bind on all interfaces, use an empty array. + +Default value: ['127.0.0.1'] + +##### `config_dir` + +Data type: `Stdlib::Absolutepath` + +Directory containing the configuration files. + +Default value: $redis::params::config_dir + +##### `config_dir_mode` + +Data type: `Stdlib::Filemode` + +Adjust mode for directory containing configuration files. + +Default value: $redis::params::config_dir_mode + +##### `config_file_orig` + +Data type: `Stdlib::Absolutepath` + +The location and name of a config file that provides the source + +Default value: $redis::params::config_file_orig + +##### `config_file` + +Data type: `Stdlib::Absolutepath` + +Adjust main configuration file. + +Default value: $redis::params::config_file + +##### `config_file_mode` + +Data type: `Stdlib::Filemode` + +Adjust permissions for configuration files. + +Default value: '0644' + +##### `config_group` + +Data type: `String[1]` + +Adjust filesystem group for config files. + +Default value: $redis::params::config_group + +##### `config_owner` + +Data type: `String[1]` + +Adjust filesystem owner for config files. + +Default value: $redis::params::config_owner + +##### `conf_template` + +Data type: `String[1]` + +Define which template to use. + +Default value: 'redis/redis.conf.erb' + +##### `daemonize` + +Data type: `Boolean` + +Have Redis run as a daemon. + +Default value: $redis::params::daemonize + +##### `default_install` + +Data type: `Boolean` + +Configure a default install of redis. + +Default value: `true` + +##### `databases` + +Data type: `Integer[1]` + +Set the number of databases. + +Default value: 16 + +##### `dbfilename` + +Data type: `Variant[String[1], Boolean]` + +The filename where to dump the DB + +Default value: 'dump.rdb' + +##### `extra_config_file` + +Data type: `Optional[String]` + +Optional extra config file to include + +Default value: `undef` + +##### `hash_max_ziplist_entries` + +Data type: `Integer[0]` + +Set max ziplist entries for hashes. + +Default value: 512 + +##### `hash_max_ziplist_value` + +Data type: `Integer[0]` + +Set max ziplist values for hashes. + +Default value: 64 + +##### `hll_sparse_max_bytes` + +Data type: `Integer[0]` + +HyperLogLog sparse representation bytes limit + +Default value: 3000 + +##### `hz` + +Data type: `Integer[1, 500]` + +Set redis background tasks frequency + +Default value: 10 + +##### `latency_monitor_threshold` + +Data type: `Integer[0]` + +Latency monitoring threshold in milliseconds + +Default value: 0 + +##### `list_max_ziplist_entries` + +Data type: `Integer[0]` + +Set max ziplist entries for lists. + +Default value: 512 + +##### `list_max_ziplist_value` + +Data type: `Integer[0]` + +Set max ziplist values for lists. + +Default value: 64 + +##### `log_dir` + +Data type: `Stdlib::Absolutepath` + +Specify directory where to write log entries. + +Default value: '/var/log/redis' + +##### `log_dir_mode` + +Data type: `Stdlib::Filemode` + +Adjust mode for directory containing log files. + +Default value: $redis::params::log_dir_mode + +##### `log_file` + +Data type: `Stdlib::Absolutepath` + +Specify file where to write log entries. + +Default value: '/var/log/redis/redis.log' + +##### `log_level` + +Data type: `Redis::LogLevel` + +Specify the server verbosity level. + +Default value: 'notice' + +##### `manage_repo` + +Data type: `Boolean` + +Enable/disable upstream repository configuration. + +Default value: `false` + +##### `manage_package` + +Data type: `Boolean` + +Enable/disable management of package + +Default value: `true` + +##### `managed_by_cluster_manager` + +Data type: `Boolean` + +Choose if redis will be managed by a cluster manager such as pacemaker or rgmanager + +Default value: `false` + +##### `masterauth` + +Data type: `Optional[String[1]]` + +If the master is password protected (using the "requirepass" configuration + +Default value: `undef` + +##### `maxclients` + +Data type: `Integer[1]` + +Set the max number of connected clients at the same time. + +Default value: 10000 + +##### `maxmemory` + +Data type: `Any` + +Don't use more memory than the specified amount of bytes. + +Default value: `undef` + +##### `maxmemory_policy` + +Data type: `Any` + +How Redis will select what to remove when maxmemory is reached. + +Default value: `undef` + +##### `maxmemory_samples` + +Data type: `Any` + +Select as well the sample size to check. + +Default value: `undef` + +##### `min_slaves_max_lag` + +Data type: `Integer[0]` + +The lag in seconds + +Default value: 10 + +##### `min_slaves_to_write` + +Data type: `Integer[0]` + +Minimum number of slaves to be in "online" state + +Default value: 0 + +##### `no_appendfsync_on_rewrite` + +Data type: `Boolean` + +If you have latency problems turn this to 'true'. Otherwise leave it as + +Default value: `false` + +##### `notify_keyspace_events` + +Data type: `Optional[String[1]]` + +Which events to notify Pub/Sub clients about events happening + +Default value: `undef` + +##### `notify_service` + +Data type: `Boolean` + +You may disable service reloads when config files change if you + +Default value: `true` + +##### `package_ensure` + +Data type: `String[1]` + +Default action for package. + +Default value: 'present' + +##### `package_name` + +Data type: `String[1]` + +Upstream package name. + +Default value: $redis::params::package_name + +##### `pid_file` + +Data type: `Stdlib::Absolutepath` + +Where to store the pid. + +Default value: $redis::params::pid_file + +##### `port` + +Data type: `Stdlib::Port` + +Configure which port to listen on. + +Default value: 6379 + +##### `protected_mode` + +Data type: `Boolean` + +Whether protected mode is enabled or not. Only applicable when no bind is set. + +Default value: `true` + +##### `ppa_repo` + +Data type: `Optional[String]` + +Specify upstream (Ubuntu) PPA entry. + +Default value: $redis::params::ppa_repo + +##### `rdbcompression` + +Data type: `Boolean` + +Enable/disable compression of string objects using LZF when dumping. + +Default value: `true` + +##### `repl_backlog_size` + +Data type: `String[1]` + +The replication backlog size + +Default value: '1mb' + +##### `repl_backlog_ttl` + +Data type: `Integer[0]` + +The number of seconds to elapse before freeing backlog buffer + +Default value: 3600 + +##### `repl_disable_tcp_nodelay` + +Data type: `Boolean` + +Enable/disable TCP_NODELAY on the slave socket after SYNC + +Default value: `false` + +##### `repl_ping_slave_period` + +Data type: `Integer[1]` + +Slaves send PINGs to server in a predefined interval. It's possible + +Default value: 10 + +##### `repl_timeout` + +Data type: `Integer[1]` + +Set the replication timeout for: + +Default value: 60 + +##### `requirepass` + +Data type: `Optional[String]` + +Require clients to issue AUTH before processing any other commands. + +Default value: `undef` + +##### `save_db_to_disk` + +Data type: `Boolean` + +Set if save db to disk. + +Default value: `true` + +##### `save_db_to_disk_interval` + +Data type: `Hash` + +save the dataset every N seconds if there are at least M changes in the dataset + +Default value: {'900' =>'1', '300' => '10', '60' => '10000'} + +##### `service_manage` + +Data type: `Boolean` + +Specify if the service should be part of the catalog. + +Default value: `true` + +##### `service_enable` + +Data type: `Boolean` + +Enable/disable daemon at boot. + +Default value: `true` + +##### `service_ensure` + +Data type: `Stdlib::Ensure::Service` + +Specify if the server should be running. + +Default value: 'running' + +##### `service_group` + +Data type: `String[1]` + +Specify which group to run as. + +Default value: 'redis' + +##### `service_hasrestart` + +Data type: `Boolean` + +Does the init script support restart? + +Default value: `true` + +##### `service_hasstatus` + +Data type: `Boolean` + +Does the init script support status? + +Default value: `true` + +##### `service_name` + +Data type: `String[1]` + +Specify the service name for Init or Systemd. + +Default value: $redis::params::service_name + +##### `service_provider` + +Data type: `Optional[String]` + +Specify the service provider to use + +Default value: `undef` + +##### `service_user` + +Data type: `String[1]` + +Specify which user to run as. + +Default value: 'redis' + +##### `set_max_intset_entries` + +Data type: `Integer[0]` + +The following configuration setting sets the limit in the size of the set +in order to use this special memory saving encoding. + +Default value: 512 + +##### `slave_priority` + +Data type: `Integer[0]` + +The priority number for slave promotion by Sentinel + +Default value: 100 + +##### `slave_read_only` + +Data type: `Boolean` + +You can configure a slave instance to accept writes or not. + +Default value: `true` + +##### `slave_serve_stale_data` + +Data type: `Boolean` + +When a slave loses its connection with the master, or when the replication +is still in progress, the slave can act in two different ways: +1) if slave-serve-stale-data is set to 'yes' (the default) the slave will + still reply to client requests, possibly with out of date data, or the + data set may just be empty if this is the first synchronization. +2) if slave-serve-stale-data is set to 'no' the slave will reply with + an error "SYNC with master in progress" to all the kind of commands + but to INFO and SLAVEOF. + +Default value: `true` + +##### `slaveof` + +Data type: `Optional[String[1]]` + +Use slaveof to make a Redis instance a copy of another Redis server. + +Default value: `undef` + +##### `slowlog_log_slower_than` + +Data type: `Integer[0]` + +Tells Redis what is the execution time, in microseconds, to exceed in order +for the command to get logged. + +Default value: 10000 + +##### `slowlog_max_len` + +Data type: `Integer[0]` + +Tells Redis what is the length to exceed in order for the command to get +logged. + +Default value: 1024 + +##### `stop_writes_on_bgsave_error` + +Data type: `Boolean` + +If false then Redis will continue to work as usual even if there are +problems with disk, permissions, and so forth. + +Default value: `true` + +##### `syslog_enabled` + +Data type: `Boolean` + +Enable/disable logging to the system logger. + +Default value: `false` + +##### `syslog_facility` + +Data type: `Optional[String[1]]` + +Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. + +Default value: `undef` + +##### `tcp_backlog` + +Data type: `Integer[0]` + +Sets the TCP backlog + +Default value: 511 + +##### `tcp_keepalive` + +Data type: `Integer[0]` + +TCP keepalive. + +Default value: 0 + +##### `timeout` + +Data type: `Integer[0]` + +Close the connection after a client is idle for N seconds (0 to disable). + +Default value: 0 + +##### `ulimit` + +Data type: `Integer[0]` + +Limit the use of system-wide resources. + +Default value: 65536 + +##### `unixsocket` + +Data type: `Variant[Stdlib::Absolutepath, Enum['']]` + +Define unix socket path + +Default value: '/var/run/redis/redis.sock' + +##### `unixsocketperm` + +Data type: `Variant[Stdlib::Filemode, Enum['']]` + +Define unix socket file permissions + +Default value: '0755' + +##### `workdir` + +Data type: `Stdlib::Absolutepath` + +The DB will be written inside this directory, with the filename specified +above using the 'dbfilename' configuration directive. + +Default value: $redis::params::workdir + +##### `workdir_mode` + +Data type: `Stdlib::Filemode` + +Adjust mode for data directory. + +Default value: '0750' + +##### `zset_max_ziplist_entries` + +Data type: `Integer[0]` + +Set max entries for sorted sets. + +Default value: 128 + +##### `zset_max_ziplist_value` + +Data type: `Integer[0]` + +Set max values for sorted sets. + +Default value: 64 + +##### `cluster_enabled` + +Data type: `Boolean` + +Enables redis 3.0 cluster functionality + +Default value: `false` + +##### `cluster_config_file` + +Data type: `String[1]` + +Config file for saving cluster nodes configuration. This file is never +touched by humans. Only set if cluster_enabled is true + +Default value: 'nodes.conf' + +##### `cluster_node_timeout` + +Data type: `Integer[1]` + +Node timeout. Only set if cluster_enabled is true + +Default value: 5000 + +##### `cluster_slave_validity_factor` + +Data type: `Integer[0]` + +Control variable to disable promoting slave in case of disconnection from master +Only set if cluster_enabled is true + +Default value: 0 + +##### `cluster_require_full_coverage` + +Data type: `Boolean` + +If false Redis Cluster will server queries even if requests about a subset of keys can be processed +Only set if cluster_enabled is true + +Default value: `true` + +##### `cluster_migration_barrier` + +Data type: `Integer[0]` + +Minimum number of slaves master will remain connected with, for another +slave to migrate to a master which is no longer covered by any slave. +Only set if cluster_enabled is true + +Default value: 1 + +##### `instances` + +Data type: `Hash[String[1], Hash]` + +Iterate through multiple instance configurations + +Default value: {} + +##### `output_buffer_limit_slave` + +Data type: `String[1]` + + + +Default value: '256mb 64mb 60' + +##### `output_buffer_limit_pubsub` + +Data type: `String[1]` + + + +Default value: '32mb 8mb 60' + +##### `manage_service_file` + +Data type: `Boolean` + + + +Default value: `false` + +### redis::administration + +Allows various adminstrative settings for Redis +As documented in the FAQ and https://redis.io/topics/admin + +* **See also** +https://redis.io/topics/admin + +#### Examples + +##### + +```puppet +include redis::administration +``` + +##### + +```puppet +class {'redis::administration': + disable_thp => false, +} +``` + +#### Parameters + +The following parameters are available in the `redis::administration` class. + +##### `enable_overcommit_memory` + +Data type: `Boolean` + +Enable the overcommit memory setting + +Default value: `true` + +##### `disable_thp` + +Data type: `Boolean` + +Disable Transparent Huge Pages + +Default value: `true` + +##### `somaxconn` + +Data type: `Integer[0]` + +Set somaxconn value + +Default value: 65535 + +### redis::globals + +Set a global config for Redis + +#### Parameters + +The following parameters are available in the `redis::globals` class. + +##### `scl` + +Data type: `Optional[String]` + +Use a specific Software CoLlection on Red Hat based systems + +Default value: `undef` + +### redis::sentinel + +Install redis-sentinel + +#### Examples + +##### Basic inclusion + +```puppet +include redis::sentinel +``` + +##### Configuring options + +```puppet +class {'redis::sentinel': + down_after => 80000, + log_file => '/var/log/redis/sentinel.log', +} +``` + +#### Parameters + +The following parameters are available in the `redis::sentinel` class. + +##### `auth_pass` + +Data type: `Optional[String[1]]` + +The password to use to authenticate with the master and slaves. + +Default value: `undef` + +##### `config_file` + +Data type: `Stdlib::Absolutepath` + +The location and name of the sentinel config file. + +Default value: $redis::params::sentinel_config_file + +##### `config_file_orig` + +Data type: `Stdlib::Absolutepath` + +The location and name of a config file that provides the source +of the sentinel config file. Two different files are needed +because sentinel itself writes to its own config file and we do +not want override that when puppet is run unless there are +changes from the manifests. + +Default value: $redis::params::sentinel_config_file_orig + +##### `config_file_mode` + +Data type: `Stdlib::Filemode` + +Permissions of config file. + +Default value: '0644' + +##### `conf_template` + +Data type: `String[1]` + +Define which template to use. + +Default value: 'redis/redis-sentinel.conf.erb' + +##### `daemonize` + +Data type: `Boolean` + +Have Redis sentinel run as a daemon. + +Default value: $redis::params::sentinel_daemonize + +##### `down_after` + +Data type: `Integer[1]` + +Number of milliseconds the master (or any attached slave or sentinel) +should be unreachable (as in, not acceptable reply to PING, continuously, +for the specified period) in order to consider it in S_DOWN state. + +Default value: 30000 + +##### `failover_timeout` + +Data type: `Integer[1]` + +Specify the failover timeout in milliseconds. + +Default value: 180000 + +##### `init_script` + +Data type: `Optional[Stdlib::Absolutepath]` + +Specifiy the init script that will be created for sentinel. + +Default value: $redis::params::sentinel_init_script + +##### `log_file` + +Data type: `Stdlib::Absolutepath` + +Specify where to write log entries. + +Default value: $redis::params::sentinel_log_file + +##### `log_level` + +Data type: `Redis::LogLevel` + +Specify how much we should log. + +Default value: 'notice' + +##### `master_name` + +Data type: `String[1]` + +Specify the name of the master redis server. +The valid charset is A-z 0-9 and the three characters ".-_". + +Default value: 'mymaster' + +##### `redis_host` + +Data type: `Stdlib::Host` + +Specify the bound host of the master redis server. + +Default value: '127.0.0.1' + +##### `redis_port` + +Data type: `Stdlib::Port` + +Specify the port of the master redis server. + +Default value: 6379 + +##### `package_name` + +Data type: `String[1]` + +The name of the package that installs sentinel. + +Default value: $redis::params::sentinel_package_name + +##### `package_ensure` + +Data type: `String[1]` + +Do we ensure this package. + +Default value: 'present' + +##### `parallel_sync` + +Data type: `Integer[0]` + +How many slaves can be reconfigured at the same time to use a +new master after a failover. + +Default value: 1 + +##### `pid_file` + +Data type: `Stdlib::Absolutepath` + +If sentinel is daemonized it will write its pid at this location. + +Default value: $redis::params::sentinel_pid_file + +##### `quorum` + +Data type: `Integer[1]` + +Number of sentinels that must agree that a master is down to +signal sdown state. + +Default value: 2 + +##### `sentinel_bind` + +Data type: `Variant[Undef, Stdlib::IP::Address, Array[Stdlib::IP::Address]]` + +Allow optional sentinel server ip binding. Can help overcome +issues arising from protect-mode added Redis 3.2 + +Default value: `undef` + +##### `sentinel_port` + +Data type: `Stdlib::Port` + +The port of sentinel server. + +Default value: 26379 + +##### `service_group` + +Data type: `String[1]` + +The group of the config file. + +Default value: 'redis' + +##### `service_name` + +Data type: `String[1]` + +The name of the service (for puppet to manage). + +Default value: $redis::params::sentinel_service_name + +##### `service_user` + +Data type: `String[1]` + +The owner of the config file. + +Default value: 'redis' + +##### `service_enable` + +Data type: `Boolean` + +Enable the service at boot time. + +Default value: `true` + +##### `working_dir` + +Data type: `Stdlib::Absolutepath` + +The directory into which sentinel will change to avoid mount +conflicts. + +Default value: $redis::params::sentinel_working_dir + +##### `notification_script` + +Data type: `Optional[Stdlib::Absolutepath]` + +Path to the notification script + +Default value: `undef` + +##### `client_reconfig_script` + +Data type: `Optional[Stdlib::Absolutepath]` + +Path to the client-reconfig script + +Default value: `undef` + +##### `init_template` + +Data type: `String[1]` + + + +Default value: 'redis/redis-sentinel.init.erb' + +##### `service_ensure` + +Data type: `Stdlib::Ensure::Service` + + + +Default value: 'running' + +## Defined types + +### redis::instance + +This is an defined type to allow the configuration of +multiple redis instances on one machine without conflicts + +#### Examples + +##### + +```puppet +redis::instance {'6380': + port => 6380, +} +``` + +#### Parameters + +The following parameters are available in the `redis::instance` defined type. + +##### `activerehashing` + +Data type: `Boolean` + +Enable/disable active rehashing. + +Default value: $redis::activerehashing + +##### `aof_load_truncated` + +Data type: `Boolean` + +Enable/disable loading truncated AOF file + +Default value: $redis::aof_load_truncated + +##### `aof_rewrite_incremental_fsync` + +Data type: `Boolean` + +Enable/disable fsync for AOF file + +Default value: $redis::aof_rewrite_incremental_fsync + +##### `appendfilename` + +Data type: `String[1]` + +The name of the append only file + +Default value: $redis::appendfilename + +##### `appendfsync` + +Data type: `Enum['no', 'always', 'everysec']` + +Adjust fsync mode. Valid options: always, everysec, no. + +Default value: $redis::appendfsync + +##### `appendonly` + +Data type: `Boolean` + +Enable/disable appendonly mode. + +Default value: $redis::appendonly + +##### `auto_aof_rewrite_min_size` + +Data type: `String[1]` + +Adjust minimum size for auto-aof-rewrite. + +Default value: $redis::auto_aof_rewrite_min_size + +##### `auto_aof_rewrite_percentage` + +Data type: `Integer[0]` + +Adjust percentatge for auto-aof-rewrite. + +Default value: $redis::auto_aof_rewrite_percentage + +##### `bind` + +Data type: `Variant[Stdlib::IP::Address, Array[Stdlib::IP::Address]]` + +Configure which IP address(es) to listen on. To bind on all interfaces, use an empty array. + +Default value: $redis::bind + +##### `config_file_orig` + +Data type: `Stdlib::Absolutepath` + +The location and name of a config file that provides the source + +Default value: $redis::config_file_orig + +##### `config_file` + +Data type: `Stdlib::Absolutepath` + +Adjust main configuration file. + +Default value: $redis::config_file + +##### `config_file_mode` + +Data type: `Stdlib::Filemode` + +Adjust permissions for configuration files. + +Default value: $redis::config_file_mode + +##### `config_group` + +Data type: `String[1]` + +Adjust filesystem group for config files. + +Default value: $redis::config_group + +##### `config_owner` + +Data type: `String[1]` + +Adjust filesystem owner for config files. + +Default value: $redis::config_owner + +##### `conf_template` + +Data type: `String[1]` + +Define which template to use. + +Default value: $redis::conf_template + +##### `daemonize` + +Data type: `Boolean` + +Have Redis run as a daemon. + +Default value: `true` + +##### `databases` + +Data type: `Integer[1]` + +Set the number of databases. + +Default value: $redis::databases + +##### `dbfilename` + +Data type: `Variant[String[1], Boolean]` + +The filename where to dump the DB + +Default value: $redis::dbfilename + +##### `extra_config_file` + +Data type: `Optional[String]` + +Optional extra config file to include + +Default value: $redis::extra_config_file + +##### `hash_max_ziplist_entries` + +Data type: `Integer[0]` + +Set max ziplist entries for hashes. + +Default value: $redis::hash_max_ziplist_entries + +##### `hash_max_ziplist_value` + +Data type: `Integer[0]` + +Set max ziplist values for hashes. + +Default value: $redis::hash_max_ziplist_value + +##### `hll_sparse_max_bytes` + +Data type: `Integer[0]` + +HyperLogLog sparse representation bytes limit + +Default value: $redis::hll_sparse_max_bytes + +##### `hz` + +Data type: `Integer[1, 500]` + +Set redis background tasks frequency + +Default value: $redis::hz + +##### `latency_monitor_threshold` + +Data type: `Integer[0]` + +Latency monitoring threshold in milliseconds + +Default value: $redis::latency_monitor_threshold + +##### `list_max_ziplist_entries` + +Data type: `Integer[0]` + +Set max ziplist entries for lists. + +Default value: $redis::list_max_ziplist_entries + +##### `list_max_ziplist_value` + +Data type: `Integer[0]` + +Set max ziplist values for lists. + +Default value: $redis::list_max_ziplist_value + +##### `log_dir` + +Data type: `Stdlib::Absolutepath` + +Specify directory where to write log entries. + +Default value: $redis::log_dir + +##### `log_dir_mode` + +Data type: `Stdlib::Filemode` + +Adjust mode for directory containing log files. + +Default value: $redis::log_dir_mode + +##### `log_file` + +Data type: `Optional[Stdlib::Absolutepath]` + +Specify file where to write log entries. + +Default value: `undef` + +##### `log_level` + +Data type: `Redis::LogLevel` + +Specify the server verbosity level. + +Default value: $redis::log_level + +##### `masterauth` + +Data type: `Optional[String[1]]` + +If the master is password protected (using the "requirepass" configuration + +Default value: $redis::masterauth + +##### `maxclients` + +Data type: `Integer[1]` + +Set the max number of connected clients at the same time. + +Default value: $redis::maxclients + +##### `maxmemory` + +Data type: `Any` + +Don't use more memory than the specified amount of bytes. + +Default value: $redis::maxmemory + +##### `maxmemory_policy` + +Data type: `Any` + +How Redis will select what to remove when maxmemory is reached. + +Default value: $redis::maxmemory_policy + +##### `maxmemory_samples` + +Data type: `Any` + +Select as well the sample size to check. + +Default value: $redis::maxmemory_samples + +##### `min_slaves_max_lag` + +Data type: `Integer[0]` + +The lag in seconds + +Default value: $redis::min_slaves_max_lag + +##### `min_slaves_to_write` + +Data type: `Integer[0]` + +Minimum number of slaves to be in "online" state + +Default value: $redis::min_slaves_to_write + +##### `no_appendfsync_on_rewrite` + +Data type: `Boolean` + +If you have latency problems turn this to 'true'. Otherwise leave it as + +Default value: $redis::no_appendfsync_on_rewrite + +##### `notify_keyspace_events` + +Data type: `Optional[String[1]]` + +Which events to notify Pub/Sub clients about events happening + +Default value: $redis::notify_keyspace_events + +##### `pid_file` + +Data type: `Stdlib::Absolutepath` + +Where to store the pid. + +Default value: "/var/run/redis/redis-server-${name}.pid" + +##### `port` + +Data type: `Stdlib::Port` + +Configure which port to listen on. + +Default value: $redis::port + +##### `protected_mode` + +Data type: `Boolean` + +Whether protected mode is enabled or not. Only applicable when no bind is set. + +Default value: $redis::protected_mode + +##### `rdbcompression` + +Data type: `Boolean` + +Enable/disable compression of string objects using LZF when dumping. + +Default value: $redis::rdbcompression + +##### `repl_backlog_size` + +Data type: `String[1]` + +The replication backlog size + +Default value: $redis::repl_backlog_size + +##### `repl_backlog_ttl` + +Data type: `Integer[0]` + +The number of seconds to elapse before freeing backlog buffer + +Default value: $redis::repl_backlog_ttl + +##### `repl_disable_tcp_nodelay` + +Data type: `Boolean` + +Enable/disable TCP_NODELAY on the slave socket after SYNC + +Default value: $redis::repl_disable_tcp_nodelay + +##### `repl_ping_slave_period` + +Data type: `Integer[1]` + +Slaves send PINGs to server in a predefined interval. It's possible + +Default value: $redis::repl_ping_slave_period + +##### `repl_timeout` + +Data type: `Integer[1]` + +Set the replication timeout for: + +Default value: $redis::repl_timeout + +##### `requirepass` + +Data type: `Optional[String]` + +Require clients to issue AUTH before processing any other +commands. + +Default value: $redis::requirepass + +##### `save_db_to_disk` + +Data type: `Boolean` + +Set if save db to disk. + +Default value: $redis::save_db_to_disk + +##### `save_db_to_disk_interval` + +Data type: `Hash` + +save the dataset every N seconds if there are at least M changes in the dataset + +Default value: $redis::save_db_to_disk_interval + +##### `service_name` + +Data type: `String[1]` + +The service name for this instance + +Default value: "redis-server-${name}" + +##### `service_enable` + +Data type: `Boolean` + +Enable/disable daemon at boot. + +Default value: $redis::service_enable + +##### `service_ensure` + +Data type: `Stdlib::Ensure::Service` + +Specify if the server should be running. + +Default value: $redis::service_ensure + +##### `service_group` + +Data type: `String[1]` + +Specify which group to run as. + +Default value: $redis::service_group + +##### `service_hasrestart` + +Data type: `Boolean` + +Does the init script support restart? + +Default value: $redis::service_hasrestart + +##### `service_hasstatus` + +Data type: `Boolean` + +Does the init script support status? + +Default value: $redis::service_hasstatus + +##### `service_user` + +Data type: `String[1]` + +Specify which user to run as. + +Default value: $redis::service_user + +##### `set_max_intset_entries` + +Data type: `Integer[0]` + +The following configuration setting sets the limit in the size of the set +in order to use this special memory saving encoding. + +Default value: $redis::set_max_intset_entries + +##### `slave_priority` + +Data type: `Integer[0]` + +The priority number for slave promotion by Sentinel + +Default value: $redis::slave_priority + +##### `slave_read_only` + +Data type: `Boolean` + +You can configure a slave instance to accept writes or not. + +Default value: $redis::slave_read_only + +##### `slave_serve_stale_data` + +Data type: `Boolean` + +When a slave loses its connection with the master, or when the replication +is still in progress, the slave can act in two different ways: +1) if slave-serve-stale-data is set to 'yes' (the default) the slave will + still reply to client requests, possibly with out of date data, or the + data set may just be empty if this is the first synchronization. +2) if slave-serve-stale-data is set to 'no' the slave will reply with + an error "SYNC with master in progress" to all the kind of commands + but to INFO and SLAVEOF. + +Default value: $redis::slave_serve_stale_data + +##### `slaveof` + +Data type: `Optional[String[1]]` + +Use slaveof to make a Redis instance a copy of another Redis server. + +Default value: $redis::slaveof + +##### `slowlog_log_slower_than` + +Data type: `Integer[0]` + +Tells Redis what is the execution time, in microseconds, to exceed in order +for the command to get logged. + +Default value: $redis::slowlog_log_slower_than + +##### `slowlog_max_len` + +Data type: `Integer[0]` + +Tells Redis what is the length to exceed in order for the command +to get logged. + +Default value: $redis::slowlog_max_len + +##### `stop_writes_on_bgsave_error` + +Data type: `Boolean` + +If false then Redis will continue to work as usual even if there +are problems with disk, permissions, and so forth. + +Default value: $redis::stop_writes_on_bgsave_error + +##### `syslog_enabled` + +Data type: `Boolean` + +Enable/disable logging to the system logger. + +Default value: $redis::syslog_enabled + +##### `syslog_facility` + +Data type: `Optional[String[1]]` + +Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. + +Default value: $redis::syslog_facility + +##### `tcp_backlog` + +Data type: `Integer[0]` + +Sets the TCP backlog + +Default value: $redis::tcp_backlog + +##### `tcp_keepalive` + +Data type: `Integer[0]` + +TCP keepalive. + +Default value: $redis::tcp_keepalive + +##### `timeout` + +Data type: `Integer[0]` + +Close the connection after a client is idle for N seconds (0 to disable). + +Default value: $redis::timeout + +##### `ulimit` + +Data type: `Integer[0]` + +Limit the use of system-wide resources. + +Default value: $redis::ulimit + +##### `unixsocket` + +Data type: `Variant[Stdlib::Absolutepath, Enum['']]` + +Define unix socket path + +Default value: "/var/run/redis/redis-server-${name}.sock" + +##### `unixsocketperm` + +Data type: `Variant[Stdlib::Filemode , Enum['']]` + +Define unix socket file permissions + +Default value: $redis::unixsocketperm + +##### `workdir` + +Data type: `Stdlib::Absolutepath` + +The DB will be written inside this directory, with the filename specified +above using the 'dbfilename' configuration directive. + +Default value: "${redis::workdir}/redis-server-${name}" + +##### `workdir_mode` + +Data type: `Stdlib::Filemode` + +Adjust mode for data directory. + +Default value: $redis::workdir_mode + +##### `zset_max_ziplist_entries` + +Data type: `Integer[0]` + +Set max entries for sorted sets. + +Default value: $redis::zset_max_ziplist_entries + +##### `zset_max_ziplist_value` + +Data type: `Integer[0]` + +Set max values for sorted sets. + +Default value: $redis::zset_max_ziplist_value + +##### `cluster_enabled` + +Data type: `Boolean` + +Enables redis 3.0 cluster functionality + +Default value: $redis::cluster_enabled + +##### `cluster_config_file` + +Data type: `String[1]` + +Config file for saving cluster nodes configuration. This file is never +touched by humans. Only set if cluster_enabled is true + +Default value: $redis::cluster_config_file + +##### `cluster_node_timeout` + +Data type: `Integer[1]` + +Node timeout. Only set if cluster_enabled is true + +Default value: $redis::cluster_node_timeout + +##### `cluster_slave_validity_factor` + +Data type: `Integer[0]` + +Control variable to disable promoting slave in case of disconnection from +master Only set if cluster_enabled is true + +Default value: $redis::cluster_slave_validity_factor + +##### `cluster_require_full_coverage` + +Data type: `Boolean` + +If false Redis Cluster will server queries even if requests about a subset +of keys can be processed Only set if cluster_enabled is true + +Default value: $redis::cluster_require_full_coverage + +##### `cluster_migration_barrier` + +Data type: `Integer[0]` + +Minimum number of slaves master will remain connected with, for another +slave to migrate to a master which is no longer covered by any slave Only +set if cluster_enabled is true + +Default value: $redis::cluster_migration_barrier + +##### `output_buffer_limit_slave` + +Data type: `String[1]` + + + +Default value: $redis::output_buffer_limit_slave + +##### `output_buffer_limit_pubsub` + +Data type: `String[1]` + + + +Default value: $redis::output_buffer_limit_pubsub + +##### `minimum_version` + +Data type: `String[1]` + + + +Default value: $redis::minimum_version + +##### `managed_by_cluster_manager` + +Data type: `Boolean` + + + +Default value: $redis::managed_by_cluster_manager + +##### `package_ensure` + +Data type: `String[1]` + + + +Default value: $redis::package_ensure + +##### `manage_service_file` + +Data type: `Boolean` + + + +Default value: `true` + +## Functions + +### redis::get + +Type: Ruby 4.x API + +Returns the value of the key being looked up or `undef` if the key does not +exist. Takes two arguments with an optional third. The first being a string +value of the key to be looked up, the second is the URL to the Redis service +and the third optional argument is a default value to be used if the lookup +fails. + +example usage +``` +$version = redis::get('version.myapp', 'redis://redis.example.com:6379') +$version_with_default = redis::get('version.myapp', 'redis://redis.example.com:6379', $::myapp_version) +``` + +#### `redis::get(String[1] $key, Redis::RedisUrl $url, Optional[String] $default)` + +Returns the value of the key being looked up or `undef` if the key does not +exist. Takes two arguments with an optional third. The first being a string +value of the key to be looked up, the second is the URL to the Redis service +and the third optional argument is a default value to be used if the lookup +fails. + +example usage +``` +$version = redis::get('version.myapp', 'redis://redis.example.com:6379') +$version_with_default = redis::get('version.myapp', 'redis://redis.example.com:6379', $::myapp_version) +``` + +Returns: `Optional[String]` Returns the value of the key from Redis + +##### `key` + +Data type: `String[1]` + +The key to look up in redis + +##### `url` + +Data type: `Redis::RedisUrl` + +The endpoint of the Redis instance + +##### `default` + +Data type: `Optional[String]` + +The value to return if the key is not found or the connection to Redis fails + +## Data types + +### Redis::LogLevel + +This can be one of: +* debug (a lot of information, useful for development/testing) +* verbose (many rarely useful info, but not a mess like the debug level) +* notice (moderately verbose, what you want in production probably) +* warning (only very important / critical messages are logged) + +Alias of `Enum['debug', 'verbose', 'notice', 'warning']` + +### Redis::RedisUrl + +The Redis::RedisUrl data type. + +Alias of `Pattern[/(^redis:\/\/)/]` + +## Tasks + +### redis_cli + +Executes a redis-cli command on the target system + +**Supports noop?** false + +#### Parameters + +##### `command` + +Data type: `String[1]` + +The command to run, including all arguments + diff --git a/Rakefile b/Rakefile index 574450f..b450fe7 100644 --- a/Rakefile +++ b/Rakefile @@ -1,52 +1,54 @@ -require 'puppetlabs_spec_helper/rake_tasks' -require 'puppet-lint/tasks/puppet-lint' -require 'puppet-syntax/tasks/puppet-syntax' -require 'puppet-strings/tasks' -require 'simp/rake/pupmod/helpers' +require 'voxpupuli/test/rake' -Simp::Rake::Pupmod::Helpers.new(File.dirname(__FILE__)) - -# These two gems aren't always present, for instance -# on Travis with --without development +# load optional tasks for releases +# only available if gem group releases is installed begin - require 'puppet_blacksmith/rake_tasks' + require 'voxpupuli/release/rake_tasks' rescue LoadError end -PuppetLint.configuration.fail_on_warnings -PuppetLint.configuration.send('relative') -PuppetLint.configuration.send('disable_80chars') -PuppetLint.configuration.send('disable_140chars') -PuppetLint.configuration.send('disable_class_inherits_from_params_class') -PuppetLint.configuration.ignore_paths = ["spec/**/*.pp", "pkg/**/*.pp"] - -exclude_paths = [ - "pkg/**/*", - "vendor/**/*", - "spec/**/*", -] -PuppetLint.configuration.ignore_paths = exclude_paths -PuppetSyntax.exclude_paths = exclude_paths - -desc "Run acceptance tests" -RSpec::Core::RakeTask.new(:acceptance) do |t| - t.pattern = 'spec/acceptance' +desc "Run main 'test' task and report merged results to coveralls" +task test_with_coveralls: [:test] do + if Dir.exist?(File.expand_path('../lib', __FILE__)) + require 'coveralls/rake/task' + Coveralls::RakeTask.new + Rake::Task['coveralls:push'].invoke + else + puts 'Skipping reporting to coveralls. Module has no lib dir' + end end -desc "Run syntax, lint, and spec tests." -task :test => [ - :syntax, - :lint, - :spec, -] +desc 'Generate REFERENCE.md' +task :reference, [:debug, :backtrace] do |t, args| + patterns = '' + Rake::Task['strings:generate:reference'].invoke(patterns, args[:debug], args[:backtrace]) +end begin require 'github_changelog_generator/task' GitHubChangelogGenerator::RakeTask.new :changelog do |config| version = (Blacksmith::Modulefile.new).version - config.future_release = "v#{version}" - config.header = "# Change log\n\nAll notable changes to this project will be documented in this file." - config.exclude_labels = %w{duplicate question invalid wontfix modulesync} + config.future_release = "v#{version}" if version =~ /^\d+\.\d+.\d+$/ + config.header = "# Changelog\n\nAll notable changes to this project will be documented in this file.\nEach new release typically also includes the latest modulesync defaults.\nThese should not affect the functionality of the module." + config.exclude_labels = %w{duplicate question invalid wontfix wont-fix modulesync skip-changelog} + config.user = 'voxpupuli' + metadata_json = File.join(File.dirname(__FILE__), 'metadata.json') + metadata = JSON.load(File.read(metadata_json)) + config.project = metadata['name'] + end + + # Workaround for https://github.com/github-changelog-generator/github-changelog-generator/issues/715 + require 'rbconfig' + if RbConfig::CONFIG['host_os'] =~ /linux/ + task :changelog do + puts 'Fixing line endings...' + changelog_file = File.join(__dir__, 'CHANGELOG.md') + changelog_txt = File.read(changelog_file) + new_contents = changelog_txt.gsub(%r{\r\n}, "\n") + File.open(changelog_file, "w") {|file| file.puts new_contents } + end end + rescue LoadError end +# vim: syntax=ruby diff --git a/lib/facter/redis_server_version.rb b/lib/facter/redis_server_version.rb index f106722..e3ea86e 100644 --- a/lib/facter/redis_server_version.rb +++ b/lib/facter/redis_server_version.rb @@ -1,19 +1,18 @@ # Fact: redis_server_version # # Purpose: Retrieve redis-server version if installed # Facter.add(:redis_server_version) do - setcode do if Facter::Util::Resolution.which('redis-server') redis_server_version_output = Facter::Util::Resolution.exec('redis-server -v') - if redis_server_version_output =~ /v=([\w\.]+)/ + if redis_server_version_output =~ %r{v=([\w\.]+)} # Redis server v=2.8.17 sha=00000000:0 malloc=jemalloc-3.6.0 bits=64 build=4c1d5710660b9479 - redis_server_version_output.match(/Redis server v=([\w\.]+).+/)[1] - elsif redis_server_version_output =~ /version ([\w\.]+)/ + redis_server_version_output.match(%r{Redis server v=([\w\.]+).+})[1] + elsif redis_server_version_output =~ %r{version ([\w\.]+)} # Redis server version 2.4.10 (00000000:0) - redis_server_version_output.match(/Redis server version ([\w\.]+).+/)[1] + redis_server_version_output.match(%r{Redis server version ([\w\.]+).+})[1] end end end end diff --git a/lib/puppet/functions/redis/get.rb b/lib/puppet/functions/redis/get.rb new file mode 100644 index 0000000..ad6d484 --- /dev/null +++ b/lib/puppet/functions/redis/get.rb @@ -0,0 +1,32 @@ +require 'redis' +# Returns the value of the key being looked up or `undef` if the key does not +# exist. Takes two arguments with an optional third. The first being a string +# value of the key to be looked up, the second is the URL to the Redis service +# and the third optional argument is a default value to be used if the lookup +# fails. +# +# example usage +# ``` +# $version = redis::get('version.myapp', 'redis://redis.example.com:6379') +# $version_with_default = redis::get('version.myapp', 'redis://redis.example.com:6379', $::myapp_version) +# ``` +Puppet::Functions.create_function(:'redis::get') do + # @param key The key to look up in redis + # @param url The endpoint of the Redis instance + # @param default The value to return if the key is not found or the connection to Redis fails + # @return Returns the value of the key from Redis + dispatch :get do + param 'String[1]', :key + param 'Redis::RedisUrl', :url + optional_param 'String', :default + return_type 'Optional[String]' + end + + def get(key, url, default = nil) + Redis.new(url: url).get(key) || default + rescue Redis::CannotConnectError, SocketError => e + raise Puppet::Error, "connection to redis server failed - #{e}" unless default + Puppet.debug "Connection to redis failed with #{e} - Returning default value of #{default}" + default + end +end diff --git a/lib/puppet/parser/functions/redisget.rb b/lib/puppet/parser/functions/redisget.rb deleted file mode 100644 index 0f7edd6..0000000 --- a/lib/puppet/parser/functions/redisget.rb +++ /dev/null @@ -1,45 +0,0 @@ -require 'redis' - -module Puppet::Parser::Functions - newfunction(:redisget, :type => :rvalue, :doc => <<-DOC -Returns the value of the key being looked up or nil if the key does not -exist. Takes two arguments with an optional third. The first being a string -value of the key to be looked up, the second is the URL to the Redis service -and the third optional argument is a default value to be used if the lookup -fails. -@param redis_key [String] The key to look up in redis. -@param redis_uri [String] The endpoint of the Redis instance. -@param default_value [String] The value to return if the key is not found or - the connection to Redis fails -@return [String] The value of the key from redis -@return [String] An empty string eg. `''` -@example Calling the function. - $version = redisget('version.myapp', 'redis://redis.example.com:6379') -@example Calling the function with a default if failure occurs - $version = redisget('version.myapp', 'redis://redis.example.com:6379', $::myapp_version) -DOC - ) do |args| - - raise(Puppet::ParseError, "redisget(): Wrong number of arguments given (#{args.size} for 2 or 3)") if args.size != 2 and args.size != 3 - - key = args[0] - url = args[1] - - if args.size == 3 - default = args[2] - raise(Puppet::ParseError, "redisget(): Wrong argument type given (#{default.class} for String) for arg3 (default)") if default.is_a?(String) == false - end - - raise(Puppet::ParseError, "redisget(): Wrong argument type given (#{key.class} for String) for arg1 (key)") if key.is_a?(String) == false - raise(Puppet::ParseError, "redisget(): Wrong argument type given (#{url.class} for String) for arg2 (url)") if url.is_a?(String) == false - - begin - Redis.new(:url => url).get(key) || default - rescue Redis::CannotConnectError, SocketError => e - raise Puppet::Error, "connection to redis server failed - #{e}" unless default - debug "Connection to redis failed with #{e} - Returning default value of #{default}" - default - end - - end -end diff --git a/manifests/administration.pp b/manifests/administration.pp index c0474bf..e1d2975 100644 --- a/manifests/administration.pp +++ b/manifests/administration.pp @@ -1,47 +1,51 @@ # Allows various adminstrative settings for Redis # As documented in the FAQ and https://redis.io/topics/admin # # @example # include redis::administration # # @example # class {'redis::administration': # disable_thp => false, # } # -# @param [Boolean] enable_overcommit_memory Enable the overcommit memory setting (Defaults to true) -# @param [Boolean] disable_thp Disable Transparent Huge Pages (Defaults to true) -# @param [String] somaxconn Set somaxconn value (Defaults to '65535') +# @param enable_overcommit_memory +# Enable the overcommit memory setting +# @param disable_thp +# Disable Transparent Huge Pages +# @param somaxconn +# Set somaxconn value # # @author - Peter Souter +# @see https://redis.io/topics/admin # class redis::administration( - $enable_overcommit_memory = true, - $disable_thp = true, - $somaxconn = '65535', + Boolean $enable_overcommit_memory = true, + Boolean $disable_thp = true, + Integer[0] $somaxconn = 65535, ) { if $enable_overcommit_memory { sysctl { 'vm.overcommit_memory': ensure => 'present', value => '1', } } if $disable_thp { exec { 'Disable Hugepages': command => 'echo never > /sys/kernel/mm/transparent_hugepage/enabled', path => ['/sbin', '/usr/sbin', '/bin', '/usr/bin'], onlyif => 'test -f /sys/kernel/mm/transparent_hugepage/enabled', unless => 'cat /sys/kernel/mm/transparent_hugepage/enabled | grep "\[never\]"', } } - if $somaxconn { + if $somaxconn > 0 { sysctl { 'net.core.somaxconn': ensure => 'present', value => $somaxconn, } } } diff --git a/manifests/config.pp b/manifests/config.pp index 38f2306..ad06c6d 100644 --- a/manifests/config.pp +++ b/manifests/config.pp @@ -1,87 +1,71 @@ -# = Class: redis::config -# -# This class provides configuration for Redis. -# +# @summary This class provides configuration for Redis. +# @api private class redis::config { File { - owner => $::redis::config_owner, - group => $::redis::config_group, - mode => $::redis::config_file_mode, + owner => $redis::config_owner, + group => $redis::config_group, + mode => $redis::config_file_mode, } - file { $::redis::config_dir: + file { $redis::config_dir: ensure => directory, - mode => $::redis::config_dir_mode, + mode => $redis::config_dir_mode, } - file {$::redis::log_dir: + file {$redis::log_dir: ensure => directory, - group => $::redis::service_group, - mode => $::redis::log_dir_mode, - owner => $::redis::service_user, + group => $redis::service_group, + mode => $redis::log_dir_mode, + owner => $redis::service_user, } - file {$::redis::workdir: + file {$redis::workdir: ensure => directory, - group => $::redis::service_group, - mode => $::redis::workdir_mode, - owner => $::redis::service_user, + group => $redis::service_group, + mode => $redis::workdir_mode, + owner => $redis::service_user, } - if $::redis::default_install { + if $redis::default_install { redis::instance {'default': - pid_file => $::redis::pid_file, - log_file => $::redis::log_file, - manage_service_file => $::redis::manage_service_file, - unixsocket => $::redis::unixsocket, - workdir => $::redis::workdir, + pid_file => $redis::pid_file, + log_file => $redis::log_file, + unixsocket => $redis::unixsocket, + workdir => $redis::workdir, + daemonize => $redis::daemonize, + service_name => $redis::service_name, + manage_service_file => $redis::manage_service_file, } } - if $::redis::ulimit { - contain ::redis::ulimit + if $redis::ulimit { + contain redis::ulimit } - $service_provider_lookup = pick(getvar('service_provider'), false) - - if $service_provider_lookup != 'systemd' { - case $::operatingsystem { - 'Debian': { - if $::lsbdistcodename == 'wheezy' { - $var_run_redis_mode = '2755' - $var_run_redis_group = 'redis' - } else { - $var_run_redis_group = $::redis::config_group - $var_run_redis_mode = '2775' - } - } - default: { - $var_run_redis_mode = '0755' - $var_run_redis_group = $::redis::config_group - } - } + $service_provider_lookup = fact('service_provider') + unless $facts['os']['family'] == 'Debian' or $service_provider_lookup == 'systemd' { file { '/var/run/redis': ensure => 'directory', - owner => $::redis::config_owner, - group => $var_run_redis_group, - mode => $var_run_redis_mode, + owner => $redis::config_owner, + group => $redis::config_group, + mode => '0755', } } # Adjust /etc/default/redis-server on Debian systems - case $::osfamily { + case $facts['os']['family'] { 'Debian': { file { '/etc/default/redis-server': - ensure => present, - group => $::redis::config_group, - mode => $::redis::config_file_mode, - owner => $::redis::config_owner, + ensure => file, + group => $redis::config_group, + mode => $redis::config_file_mode, + owner => $redis::config_owner, } } default: { } } } diff --git a/manifests/globals.pp b/manifests/globals.pp new file mode 100644 index 0000000..fd607a7 --- /dev/null +++ b/manifests/globals.pp @@ -0,0 +1,11 @@ +# @summary Set a global config for Redis +# +# @param scl +# Use a specific Software CoLlection on Red Hat based systems +class redis::globals( + Optional[String] $scl = undef, +) { + if $scl and $facts['os']['family'] != 'RedHat' { + fail('SCLs are only supported on the Red Hat OS family') + } +} diff --git a/manifests/init.pp b/manifests/init.pp index c53f9b5..0a0f525 100644 --- a/manifests/init.pp +++ b/manifests/init.pp @@ -1,259 +1,356 @@ # This class installs redis # # @example Default install # include redis # # @example Slave Node # class { '::redis': # bind => '10.0.1.2', # slaveof => '10.0.1.1 6379', # } # -# @param [String] activerehashing Enable/disable active rehashing. -# @param [String] aof_load_truncated Enable/disable loading truncated AOF file -# @param [String] aof_rewrite_incremental_fsync Enable/disable fsync for AOF file -# @param [String] appendfilename The name of the append only file -# @param [String] appendfsync Adjust fsync mode. Valid options: always, everysec, no. Default: everysec -# @param [String] appendonly Enable/disable appendonly mode. -# @param [String] auto_aof_rewrite_min_size Adjust minimum size for auto-aof-rewrite. -# @param [String] auto_aof_rewrite_percentage Adjust percentatge for auto-aof-rewrite. -# @param [String] bind Configure which IP address to listen on. -# @param [String] config_dir Directory containing the configuration files. -# @param [String] config_dir_mode Adjust mode for directory containing configuration files. -# @param [String] config_file_orig The location and name of a config file that provides the source -# @param [String] config_file Adjust main configuration file. -# @param [String] config_file_mode Adjust permissions for configuration files. -# @param [String] config_group Adjust filesystem group for config files. -# @param [String] config_owner Adjust filesystem owner for config files. -# @param [String] conf_template Define which template to use. -# @param [String] daemonize Have Redis run as a daemon. -# @param [String] default_install Configure a default install of redis -# @param [String] databases Set the number of databases. -# @param [String] dbfilename The filename where to dump the DB -# @param [String] extra_config_file Description -# @param [String] hash_max_ziplist_entries Set max ziplist entries for hashes. -# @param [String] hash_max_ziplist_value Set max ziplist values for hashes. -# @param [String] hll_sparse_max_bytes HyperLogLog sparse representation bytes limit -# @param [String] hz Set redis background tasks frequency -# @param [String] latency_monitor_threshold Latency monitoring threshold in milliseconds -# @param [String] list_max_ziplist_entries Set max ziplist entries for lists. -# @param [String] list_max_ziplist_value Set max ziplist values for lists. -# @param [String] log_dir Specify directory where to write log entries. -# @param [String] log_dir_mode Adjust mode for directory containing log files. -# @param [String] log_file Specify file where to write log entries. -# @param [String] log_level Specify the server verbosity level. -# @param [String] manage_repo Enable/disable upstream repository configuration. -# @param [String] manage_package Enable/disable management of package -# @param [String] managed_by_cluster_manager Choose if redis will be managed by a cluster manager such as pacemaker or rgmanager -# @param [String] masterauth If the master is password protected (using the "requirepass" configuration -# @param [String] maxclients Set the max number of connected clients at the same time. -# @param [String] maxmemory Don't use more memory than the specified amount of bytes. -# @param [String] maxmemory_policy How Redis will select what to remove when maxmemory is reached. -# @param [String] maxmemory_samples Select as well the sample size to check. -# @param [String] min_slaves_max_lag The lag in seconds -# @param [String] min_slaves_to_write Minimum number of slaves to be in "online" state -# @param [String] no_appendfsync_on_rewrite If you have latency problems turn this to 'true'. Otherwise leave it as -# @param [String] notify_keyspace_events Which events to notify Pub/Sub clients about events happening -# @param [String] notify_service You may disable service reloads when config files change if you -# @param [String] package_ensure Default action for package. -# @param [String] package_name Upstream package name. -# @param [String] pid_file Where to store the pid. -# @param [String] port Configure which port to listen on. -# @param [String] protected_mode Whether protected mode is enabled or not. Only applicable when no bind is set. -# @param [String] ppa_repo Specify upstream (Ubuntu) PPA entry. -# @param [String] rdbcompression Enable/disable compression of string objects using LZF when dumping. -# @param [String] repl_backlog_size The replication backlog size -# @param [String] repl_backlog_ttl The number of seconds to elapse before freeing backlog buffer -# @param [String] repl_disable_tcp_nodelay Enable/disable TCP_NODELAY on the slave socket after SYNC -# @param [String] repl_ping_slave_period Slaves send PINGs to server in a predefined interval. It's possible -# @param [String] repl_timeout Set the replication timeout for: -# @param [String] requirepass Require clients to issue AUTH before processing any -# other commands. -# @param [String] save_db_to_disk Set if save db to disk. -# @param [String] save_db_to_disk_interval save the dataset every N seconds if there are at least M changes in the dataset -# @param [String] service_manage Specify if the service should be part of the catalog. -# @param [String] service_enable Enable/disable daemon at boot. -# @param [String] service_ensure Specify if the server should be running. -# @param [String] service_group Specify which group to run as. -# @param [String] service_hasrestart Does the init script support restart? -# @param [String] service_hasstatus Does the init script support status? -# @param [String] service_name Specify the service name for Init or Systemd. -# @param [String] service_provider Specify the service provider to use -# @param [String] service_user Specify which user to run as. -# @param [String] set_max_intset_entries The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -# Default: 512 -# @param [String] slave_priority The priority number for slave promotion by Sentinel -# @param [String] slave_read_only You can configure a slave instance to accept writes or not. -# @param [String] slave_serve_stale_data When a slave loses its connection with the master, or when the replication +# @example Binding on multiple interfaces +# class { 'redis': +# bind => ['127.0.0.1', '10.0.0.1', '10.1.0.1'], +# } +# +# @example Binding on all interfaces +# class { 'redis': +# bind => [], +# } +# +# @param activerehashing +# Enable/disable active rehashing. +# @param aof_load_truncated +# Enable/disable loading truncated AOF file +# @param aof_rewrite_incremental_fsync +# Enable/disable fsync for AOF file +# @param appendfilename +# The name of the append only file +# @param appendfsync +# Adjust fsync mode +# @param appendonly +# Enable/disable appendonly mode. +# @param auto_aof_rewrite_min_size +# Adjust minimum size for auto-aof-rewrite. +# @param auto_aof_rewrite_percentage +# Adjust percentatge for auto-aof-rewrite. +# @param bind +# Configure which IP address(es) to listen on. To bind on all interfaces, use an empty array. +# @param config_dir +# Directory containing the configuration files. +# @param config_dir_mode +# Adjust mode for directory containing configuration files. +# @param config_file_orig +# The location and name of a config file that provides the source +# @param config_file +# Adjust main configuration file. +# @param config_file_mode +# Adjust permissions for configuration files. +# @param config_group +# Adjust filesystem group for config files. +# @param config_owner +# Adjust filesystem owner for config files. +# @param conf_template +# Define which template to use. +# @param daemonize +# Have Redis run as a daemon. +# @param default_install +# Configure a default install of redis. +# @param databases +# Set the number of databases. +# @param dbfilename +# The filename where to dump the DB +# @param extra_config_file +# Optional extra config file to include +# @param hash_max_ziplist_entries +# Set max ziplist entries for hashes. +# @param hash_max_ziplist_value +# Set max ziplist values for hashes. +# @param hll_sparse_max_bytes +# HyperLogLog sparse representation bytes limit +# @param hz +# Set redis background tasks frequency +# @param latency_monitor_threshold +# Latency monitoring threshold in milliseconds +# @param list_max_ziplist_entries +# Set max ziplist entries for lists. +# @param list_max_ziplist_value +# Set max ziplist values for lists. +# @param log_dir +# Specify directory where to write log entries. +# @param log_dir_mode +# Adjust mode for directory containing log files. +# @param log_file +# Specify file where to write log entries. +# @param log_level +# Specify the server verbosity level. +# @param manage_repo +# Enable/disable upstream repository configuration. +# @param manage_package +# Enable/disable management of package +# @param managed_by_cluster_manager +# Choose if redis will be managed by a cluster manager such as pacemaker or rgmanager +# @param masterauth +# If the master is password protected (using the "requirepass" configuration +# @param maxclients +# Set the max number of connected clients at the same time. +# @param maxmemory +# Don't use more memory than the specified amount of bytes. +# @param maxmemory_policy +# How Redis will select what to remove when maxmemory is reached. +# @param maxmemory_samples +# Select as well the sample size to check. +# @param min_slaves_max_lag +# The lag in seconds +# @param min_slaves_to_write +# Minimum number of slaves to be in "online" state +# @param no_appendfsync_on_rewrite +# If you have latency problems turn this to 'true'. Otherwise leave it as +# @param notify_keyspace_events +# Which events to notify Pub/Sub clients about events happening +# @param notify_service +# You may disable service reloads when config files change if you +# @param package_ensure +# Default action for package. +# @param package_name +# Upstream package name. +# @param pid_file +# Where to store the pid. +# @param port +# Configure which port to listen on. +# @param protected_mode +# Whether protected mode is enabled or not. Only applicable when no bind is set. +# @param ppa_repo +# Specify upstream (Ubuntu) PPA entry. +# @param rdbcompression +# Enable/disable compression of string objects using LZF when dumping. +# @param repl_backlog_size +# The replication backlog size +# @param repl_backlog_ttl +# The number of seconds to elapse before freeing backlog buffer +# @param repl_disable_tcp_nodelay +# Enable/disable TCP_NODELAY on the slave socket after SYNC +# @param repl_ping_slave_period +# Slaves send PINGs to server in a predefined interval. It's possible +# @param repl_timeout +# Set the replication timeout for: +# @param requirepass +# Require clients to issue AUTH before processing any other commands. +# @param save_db_to_disk +# Set if save db to disk. +# @param save_db_to_disk_interval +# save the dataset every N seconds if there are at least M changes in the dataset +# @param service_manage +# Specify if the service should be part of the catalog. +# @param service_enable +# Enable/disable daemon at boot. +# @param service_ensure +# Specify if the server should be running. +# @param service_group +# Specify which group to run as. +# @param service_hasrestart +# Does the init script support restart? +# @param service_hasstatus +# Does the init script support status? +# @param service_name +# Specify the service name for Init or Systemd. +# @param service_provider +# Specify the service provider to use +# @param service_user +# Specify which user to run as. +# @param set_max_intset_entries +# The following configuration setting sets the limit in the size of the set +# in order to use this special memory saving encoding. +# @param slave_priority +# The priority number for slave promotion by Sentinel +# @param slave_read_only +# You can configure a slave instance to accept writes or not. +# @param slave_serve_stale_data +# When a slave loses its connection with the master, or when the replication # is still in progress, the slave can act in two different ways: # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will # still reply to client requests, possibly with out of date data, or the # data set may just be empty if this is the first synchronization. -# # 2) if slave-serve-stale-data is set to 'no' the slave will reply with # an error "SYNC with master in progress" to all the kind of commands # but to INFO and SLAVEOF. -# -# Default: true -# -# @param [String] slaveof Use slaveof to make a Redis instance a copy of another Redis server. -# @param [String] slowlog_log_slower_than Tells Redis what is the execution time, in microseconds, to exceed -# in order for the command to get logged. -# Default: 10000 -# -# @param [String] slowlog_max_len Tells Redis what is the length to exceed in order for the command -# to get logged. -# Default: 1024 -# -# @param [String] stop_writes_on_bgsave_error If false then Redis will continue to work as usual even if there -# are problems with disk, permissions, and so forth. -# Default: true -# -# @param [String] syslog_enabled Enable/disable logging to the system logger. -# @param [String] syslog_facility Specify the syslog facility. -# Must be USER or between LOCAL0-LOCAL7. -# Default: undef -# -# @param [String] tcp_backlog Sets the TCP backlog -# @param [String] tcp_keepalive TCP keepalive. -# @param [String] timeout Close the connection after a client is idle for N seconds (0 to disable). -# @param [String] ulimit Limit the use of system-wide resources. -# @param [String] unixsocket Define unix socket path -# @param [String] unixsocketperm Define unix socket file permissions -# @param [String] workdir The DB will be written inside this directory, with the filename specified +# @param slaveof +# Use slaveof to make a Redis instance a copy of another Redis server. +# @param slowlog_log_slower_than +# Tells Redis what is the execution time, in microseconds, to exceed in order +# for the command to get logged. +# @param slowlog_max_len +# Tells Redis what is the length to exceed in order for the command to get +# logged. +# @param stop_writes_on_bgsave_error +# If false then Redis will continue to work as usual even if there are +# problems with disk, permissions, and so forth. +# @param syslog_enabled +# Enable/disable logging to the system logger. +# @param syslog_facility +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# @param tcp_backlog +# Sets the TCP backlog +# @param tcp_keepalive +# TCP keepalive. +# @param timeout +# Close the connection after a client is idle for N seconds (0 to disable). +# @param ulimit +# Limit the use of system-wide resources. +# @param unixsocket +# Define unix socket path +# @param unixsocketperm +# Define unix socket file permissions +# @param workdir +# The DB will be written inside this directory, with the filename specified # above using the 'dbfilename' configuration directive. -# Default: /var/lib/redis/ -# @param [String] workdir_mode Adjust mode for data directory. -# @param [String] zset_max_ziplist_entries Set max entries for sorted sets. -# @param [String] zset_max_ziplist_value Set max values for sorted sets. -# @param [String] cluster_enabled Enables redis 3.0 cluster functionality -# @param [String] cluster_config_file Config file for saving cluster nodes configuration. This file is never touched by humans. +# @param workdir_mode +# Adjust mode for data directory. +# @param zset_max_ziplist_entries +# Set max entries for sorted sets. +# @param zset_max_ziplist_value +# Set max values for sorted sets. +# @param cluster_enabled +# Enables redis 3.0 cluster functionality +# @param cluster_config_file +# Config file for saving cluster nodes configuration. This file is never +# touched by humans. Only set if cluster_enabled is true +# @param cluster_node_timeout +# Node timeout. Only set if cluster_enabled is true +# @param cluster_slave_validity_factor +# Control variable to disable promoting slave in case of disconnection from master +# Only set if cluster_enabled is true +# @param cluster_require_full_coverage +# If false Redis Cluster will server queries even if requests about a subset of keys can be processed # Only set if cluster_enabled is true -# Default: nodes.conf -# @param [String] cluster_node_timeout Node timeout +# @param cluster_migration_barrier +# Minimum number of slaves master will remain connected with, for another +# slave to migrate to a master which is no longer covered by any slave. # Only set if cluster_enabled is true -# Default: 5000 +# @param instances +# Iterate through multiple instance configurations class redis ( - $activerehashing = $::redis::params::activerehashing, - $aof_load_truncated = $::redis::params::aof_load_truncated, - $aof_rewrite_incremental_fsync = $::redis::params::aof_rewrite_incremental_fsync, - $appendfilename = $::redis::params::appendfilename, - $appendfsync = $::redis::params::appendfsync, - $appendonly = $::redis::params::appendonly, - $auto_aof_rewrite_min_size = $::redis::params::auto_aof_rewrite_min_size, - $auto_aof_rewrite_percentage = $::redis::params::auto_aof_rewrite_percentage, - $bind = $::redis::params::bind, - $output_buffer_limit_slave = $::redis::params::output_buffer_limit_slave, - $output_buffer_limit_pubsub = $::redis::params::output_buffer_limit_pubsub, - $conf_template = $::redis::params::conf_template, - $config_dir = $::redis::params::config_dir, - $config_dir_mode = $::redis::params::config_dir_mode, - $config_file = $::redis::params::config_file, - $config_file_mode = $::redis::params::config_file_mode, - $config_file_orig = $::redis::params::config_file_orig, - $config_group = $::redis::params::config_group, - $config_owner = $::redis::params::config_owner, - $daemonize = $::redis::params::daemonize, - $databases = $::redis::params::databases, - $default_install = $::redis::params::default_install, - $dbfilename = $::redis::params::dbfilename, - $extra_config_file = $::redis::params::extra_config_file, - $hash_max_ziplist_entries = $::redis::params::hash_max_ziplist_entries, - $hash_max_ziplist_value = $::redis::params::hash_max_ziplist_value, - $hll_sparse_max_bytes = $::redis::params::hll_sparse_max_bytes, - $hz = $::redis::params::hz, - $latency_monitor_threshold = $::redis::params::latency_monitor_threshold, - $list_max_ziplist_entries = $::redis::params::list_max_ziplist_entries, - $list_max_ziplist_value = $::redis::params::list_max_ziplist_value, - $log_dir = $::redis::params::log_dir, - $log_dir_mode = $::redis::params::log_dir_mode, - $log_file = $::redis::params::log_file, - $log_level = $::redis::params::log_level, - $manage_package = $::redis::params::manage_package, - $manage_repo = $::redis::params::manage_repo, - $masterauth = $::redis::params::masterauth, - $maxclients = $::redis::params::maxclients, - $maxmemory = $::redis::params::maxmemory, - $maxmemory_policy = $::redis::params::maxmemory_policy, - $maxmemory_samples = $::redis::params::maxmemory_samples, - $min_slaves_max_lag = $::redis::params::min_slaves_max_lag, - $min_slaves_to_write = $::redis::params::min_slaves_to_write, - $no_appendfsync_on_rewrite = $::redis::params::no_appendfsync_on_rewrite, - $notify_keyspace_events = $::redis::params::notify_keyspace_events, - $notify_service = $::redis::params::notify_service, - $managed_by_cluster_manager = $::redis::params::managed_by_cluster_manager, - $package_ensure = $::redis::params::package_ensure, - $package_name = $::redis::params::package_name, - $pid_file = $::redis::params::pid_file, - $port = $::redis::params::port, - $protected_mode = $::redis::params::protected_mode, - $ppa_repo = $::redis::params::ppa_repo, - $rdbcompression = $::redis::params::rdbcompression, - $repl_backlog_size = $::redis::params::repl_backlog_size, - $repl_backlog_ttl = $::redis::params::repl_backlog_ttl, - $repl_disable_tcp_nodelay = $::redis::params::repl_disable_tcp_nodelay, - $repl_ping_slave_period = $::redis::params::repl_ping_slave_period, - $repl_timeout = $::redis::params::repl_timeout, - $requirepass = $::redis::params::requirepass, - $save_db_to_disk = $::redis::params::save_db_to_disk, - $save_db_to_disk_interval = $::redis::params::save_db_to_disk_interval, - $service_enable = $::redis::params::service_enable, - $service_ensure = $::redis::params::service_ensure, - $service_group = $::redis::params::service_group, - $service_hasrestart = $::redis::params::service_hasrestart, - $service_hasstatus = $::redis::params::service_hasstatus, - $service_manage = $::redis::params::service_manage, - $service_name = $::redis::params::service_name, - $service_provider = $::redis::params::service_provider, - $service_user = $::redis::params::service_user, - $set_max_intset_entries = $::redis::params::set_max_intset_entries, - $slave_priority = $::redis::params::slave_priority, - $slave_read_only = $::redis::params::slave_read_only, - $slave_serve_stale_data = $::redis::params::slave_serve_stale_data, - $slaveof = $::redis::params::slaveof, - $slowlog_log_slower_than = $::redis::params::slowlog_log_slower_than, - $slowlog_max_len = $::redis::params::slowlog_max_len, - $stop_writes_on_bgsave_error = $::redis::params::stop_writes_on_bgsave_error, - $syslog_enabled = $::redis::params::syslog_enabled, - $syslog_facility = $::redis::params::syslog_facility, - $tcp_backlog = $::redis::params::tcp_backlog, - $tcp_keepalive = $::redis::params::tcp_keepalive, - $timeout = $::redis::params::timeout, - $unixsocket = $::redis::params::unixsocket, - $unixsocketperm = $::redis::params::unixsocketperm, - $ulimit = $::redis::params::ulimit, - $workdir = $::redis::params::workdir, - $workdir_mode = $::redis::params::workdir_mode, - $zset_max_ziplist_entries = $::redis::params::zset_max_ziplist_entries, - $zset_max_ziplist_value = $::redis::params::zset_max_ziplist_value, - $cluster_enabled = $::redis::params::cluster_enabled, - $cluster_config_file = $::redis::params::cluster_config_file, - $cluster_node_timeout = $::redis::params::cluster_node_timeout, + Boolean $activerehashing = true, + Boolean $aof_load_truncated = true, + Boolean $aof_rewrite_incremental_fsync = true, + String[1] $appendfilename = 'appendonly.aof', + Enum['no', 'always', 'everysec'] $appendfsync = 'everysec', + Boolean $appendonly = false, + String[1] $auto_aof_rewrite_min_size = '64mb', + Integer[0] $auto_aof_rewrite_percentage = 100, + Variant[Stdlib::IP::Address, Array[Stdlib::IP::Address]] $bind = ['127.0.0.1'], + String[1] $output_buffer_limit_slave = '256mb 64mb 60', + String[1] $output_buffer_limit_pubsub = '32mb 8mb 60', + String[1] $conf_template = 'redis/redis.conf.erb', + Stdlib::Absolutepath $config_dir = $redis::params::config_dir, + Stdlib::Filemode $config_dir_mode = $redis::params::config_dir_mode, + Stdlib::Absolutepath $config_file = $redis::params::config_file, + Stdlib::Filemode $config_file_mode = '0644', + Stdlib::Absolutepath $config_file_orig = $redis::params::config_file_orig, + String[1] $config_group = $redis::params::config_group, + String[1] $config_owner = $redis::params::config_owner, + Boolean $daemonize = $redis::params::daemonize, + Integer[1] $databases = 16, + Boolean $default_install = true, + Variant[String[1], Boolean] $dbfilename = 'dump.rdb', + Optional[String] $extra_config_file = undef, + Integer[0] $hash_max_ziplist_entries = 512, + Integer[0] $hash_max_ziplist_value = 64, + Integer[0] $hll_sparse_max_bytes = 3000, + Integer[1, 500] $hz = 10, + Integer[0] $latency_monitor_threshold = 0, + Integer[0] $list_max_ziplist_entries = 512, + Integer[0] $list_max_ziplist_value = 64, + Stdlib::Absolutepath $log_dir = '/var/log/redis', + Stdlib::Filemode $log_dir_mode = $redis::params::log_dir_mode, + Stdlib::Absolutepath $log_file = '/var/log/redis/redis.log', + Redis::LogLevel $log_level = 'notice', + Boolean $manage_service_file = false, + Boolean $manage_package = true, + Boolean $manage_repo = false, + Optional[String[1]] $masterauth = undef, + Integer[1] $maxclients = 10000, + $maxmemory = undef, + $maxmemory_policy = undef, + $maxmemory_samples = undef, + Integer[0] $min_slaves_max_lag = 10, + Integer[0] $min_slaves_to_write = 0, + Boolean $no_appendfsync_on_rewrite = false, + Optional[String[1]] $notify_keyspace_events = undef, + Boolean $notify_service = true, + Boolean $managed_by_cluster_manager = false, + String[1] $package_ensure = 'present', + String[1] $package_name = $redis::params::package_name, + Stdlib::Absolutepath $pid_file = $redis::params::pid_file, + Stdlib::Port $port = 6379, + Boolean $protected_mode = true, + Optional[String] $ppa_repo = $redis::params::ppa_repo, + Boolean $rdbcompression = true, + String[1] $repl_backlog_size = '1mb', + Integer[0] $repl_backlog_ttl = 3600, + Boolean $repl_disable_tcp_nodelay = false, + Integer[1] $repl_ping_slave_period = 10, + Integer[1] $repl_timeout = 60, + Optional[String] $requirepass = undef, + Boolean $save_db_to_disk = true, + Hash $save_db_to_disk_interval = {'900' =>'1', '300' => '10', '60' => '10000'}, + Boolean $service_enable = true, + Stdlib::Ensure::Service $service_ensure = 'running', + String[1] $service_group = 'redis', + Boolean $service_hasrestart = true, + Boolean $service_hasstatus = true, + Boolean $service_manage = true, + String[1] $service_name = $redis::params::service_name, + Optional[String] $service_provider = undef, + String[1] $service_user = 'redis', + Integer[0] $set_max_intset_entries = 512, + Integer[0] $slave_priority = 100, + Boolean $slave_read_only = true, + Boolean $slave_serve_stale_data = true, + Optional[String[1]] $slaveof = undef, + Integer[0] $slowlog_log_slower_than = 10000, + Integer[0] $slowlog_max_len = 1024, + Boolean $stop_writes_on_bgsave_error = true, + Boolean $syslog_enabled = false, + Optional[String[1]] $syslog_facility = undef, + Integer[0] $tcp_backlog = 511, + Integer[0] $tcp_keepalive = 0, + Integer[0] $timeout = 0, + Variant[Stdlib::Absolutepath, Enum['']] $unixsocket = '/var/run/redis/redis.sock', + Variant[Stdlib::Filemode, Enum['']] $unixsocketperm = '0755', + Integer[0] $ulimit = 65536, + Stdlib::Absolutepath $workdir = $redis::params::workdir, + Stdlib::Filemode $workdir_mode = '0750', + Integer[0] $zset_max_ziplist_entries = 128, + Integer[0] $zset_max_ziplist_value = 64, + Boolean $cluster_enabled = false, + String[1] $cluster_config_file = 'nodes.conf', + Integer[1] $cluster_node_timeout = 5000, + Integer[0] $cluster_slave_validity_factor = 0, + Boolean $cluster_require_full_coverage = true, + Integer[0] $cluster_migration_barrier = 1, + Hash[String[1], Hash] $instances = {}, ) inherits redis::params { - contain ::redis::preinstall - contain ::redis::install - contain ::redis::config - contain ::redis::service + contain redis::preinstall + contain redis::install + contain redis::config + contain redis::service + + $instances.each | String $key, Hash $values | { + redis::instance { $key: + * => $values, + } + } Class['redis::preinstall'] -> Class['redis::install'] -> Class['redis::config'] - if $::redis::notify_service { + if $redis::notify_service { Class['redis::config'] ~> Class['redis::service'] } - if $::puppetversion and versioncmp($::puppetversion, '4.0.0') < 0 { - warning("Puppet 3 is EOL as of 01/01/2017, The 3.X.X releases of the module are the last that will support Puppet 3\nFor more information, see https://github.com/arioch/puppet-redis#puppet-3-support") - } - - exec { 'systemd-reload-redis': - command => 'systemctl daemon-reload', - refreshonly => true, - path => '/bin:/usr/bin:/usr/local/bin', - } - } diff --git a/manifests/install.pp b/manifests/install.pp index f0544cd..895efc1 100644 --- a/manifests/install.pp +++ b/manifests/install.pp @@ -1,12 +1,10 @@ -# = Class: redis::install -# -# This class installs the application. -# +# @summary This class installs the application. +# @api private class redis::install { - if $::redis::manage_package { - package { $::redis::package_name: - ensure => $::redis::package_ensure, + if $redis::manage_package { + package { $redis::package_name: + ensure => $redis::package_ensure, } } } diff --git a/manifests/instance.pp b/manifests/instance.pp index 0b817f8..01ad523 100644 --- a/manifests/instance.pp +++ b/manifests/instance.pp @@ -1,347 +1,409 @@ -# redis::instance -# # This is an defined type to allow the configuration of # multiple redis instances on one machine without conflicts # # @summary Allows the configuration of multiple redis configurations on one machine # # @example # redis::instance {'6380': -# port => '6380', +# port => 6380, # } # -# @param [String] activerehashing Enable/disable active rehashing. -# @param [String] aof_load_truncated Enable/disable loading truncated AOF file -# @param [String] aof_rewrite_incremental_fsync Enable/disable fsync for AOF file -# @param [String] appendfilename The name of the append only file -# @param [String] appendfsync Adjust fsync mode. Valid options: always, everysec, no. Default: everysec -# @param [String] appendonly Enable/disable appendonly mode. -# @param [String] auto_aof_rewrite_min_size Adjust minimum size for auto-aof-rewrite. -# @param [String] auto_aof_rewrite_percentage Adjust percentatge for auto-aof-rewrite. -# @param [String] bind Configure which IP address to listen on. -# @param [String] config_dir Directory containing the configuration files. -# @param [String] config_dir_mode Adjust mode for directory containing configuration files. -# @param [String] config_file_orig The location and name of a config file that provides the source -# @param [String] config_file Adjust main configuration file. -# @param [String] config_file_mode Adjust permissions for configuration files. -# @param [String] config_group Adjust filesystem group for config files. -# @param [String] config_owner Adjust filesystem owner for config files. -# @param [String] conf_template Define which template to use. -# @param [String] daemonize Have Redis run as a daemon. -# @param [String] databases Set the number of databases. -# @param [String] dbfilename The filename where to dump the DB -# @param [String] extra_config_file Description -# @param [String] hash_max_ziplist_entries Set max ziplist entries for hashes. -# @param [String] hash_max_ziplist_value Set max ziplist values for hashes. -# @param [String] hll_sparse_max_bytes HyperLogLog sparse representation bytes limit -# @param [String] hz Set redis background tasks frequency -# @param [String] latency_monitor_threshold Latency monitoring threshold in milliseconds -# @param [String] list_max_ziplist_entries Set max ziplist entries for lists. -# @param [String] list_max_ziplist_value Set max ziplist values for lists. -# @param [String] log_dir Specify directory where to write log entries. -# @param [String] log_dir_mode Adjust mode for directory containing log files. -# @param [String] log_file Specify file where to write log entries. -# @param [String] log_level Specify the server verbosity level. -# @param [String] masterauth If the master is password protected (using the "requirepass" configuration -# @param [String] maxclients Set the max number of connected clients at the same time. -# @param [String] maxmemory Don't use more memory than the specified amount of bytes. -# @param [String] maxmemory_policy How Redis will select what to remove when maxmemory is reached. -# @param [String] maxmemory_samples Select as well the sample size to check. -# @param [String] min_slaves_max_lag The lag in seconds -# @param [String] min_slaves_to_write Minimum number of slaves to be in "online" state -# @param [String] no_appendfsync_on_rewrite If you have latency problems turn this to 'true'. Otherwise leave it as -# @param [String] notify_keyspace_events Which events to notify Pub/Sub clients about events happening -# @param [String] pid_file Where to store the pid. -# @param [String] port Configure which port to listen on. -# @param [String] protected_mode Whether protected mode is enabled or not. Only applicable when no bind is set. -# @param [String] rdbcompression Enable/disable compression of string objects using LZF when dumping. -# @param [String] repl_backlog_size The replication backlog size -# @param [String] repl_backlog_ttl The number of seconds to elapse before freeing backlog buffer -# @param [String] repl_disable_tcp_nodelay Enable/disable TCP_NODELAY on the slave socket after SYNC -# @param [String] repl_ping_slave_period Slaves send PINGs to server in a predefined interval. It's possible -# @param [String] repl_timeout Set the replication timeout for: -# @param [String] requirepass Require clients to issue AUTH before processing any -# other commands. -# @param [String] save_db_to_disk Set if save db to disk. -# @param [String] save_db_to_disk_interval save the dataset every N seconds if there are at least M changes in the dataset -# @param [String] service_enable Enable/disable daemon at boot. -# @param [String] service_ensure Specify if the server should be running. -# @param [String] service_group Specify which group to run as. -# @param [String] service_hasrestart Does the init script support restart? -# @param [String] service_hasstatus Does the init script support status? -# @param [String] service_user Specify which user to run as. -# @param [String] set_max_intset_entries The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -# Default: 512 -# @param [String] slave_priority The priority number for slave promotion by Sentinel -# @param [String] slave_read_only You can configure a slave instance to accept writes or not. -# @param [String] slave_serve_stale_data When a slave loses its connection with the master, or when the replication +# @param activerehashing +# Enable/disable active rehashing. +# @param aof_load_truncated +# Enable/disable loading truncated AOF file +# @param aof_rewrite_incremental_fsync +# Enable/disable fsync for AOF file +# @param appendfilename +# The name of the append only file +# @param appendfsync +# Adjust fsync mode. Valid options: always, everysec, no. +# @param appendonly +# Enable/disable appendonly mode. +# @param auto_aof_rewrite_min_size +# Adjust minimum size for auto-aof-rewrite. +# @param auto_aof_rewrite_percentage +# Adjust percentatge for auto-aof-rewrite. +# @param bind +# Configure which IP address(es) to listen on. To bind on all interfaces, use an empty array. +# @param config_file_orig +# The location and name of a config file that provides the source +# @param config_file +# Adjust main configuration file. +# @param config_file_mode +# Adjust permissions for configuration files. +# @param config_group +# Adjust filesystem group for config files. +# @param config_owner +# Adjust filesystem owner for config files. +# @param conf_template +# Define which template to use. +# @param daemonize +# Have Redis run as a daemon. +# @param databases +# Set the number of databases. +# @param dbfilename +# The filename where to dump the DB +# @param extra_config_file +# Optional extra config file to include +# @param hash_max_ziplist_entries +# Set max ziplist entries for hashes. +# @param hash_max_ziplist_value +# Set max ziplist values for hashes. +# @param hll_sparse_max_bytes +# HyperLogLog sparse representation bytes limit +# @param hz +# Set redis background tasks frequency +# @param latency_monitor_threshold +# Latency monitoring threshold in milliseconds +# @param list_max_ziplist_entries +# Set max ziplist entries for lists. +# @param list_max_ziplist_value +# Set max ziplist values for lists. +# @param log_dir +# Specify directory where to write log entries. +# @param log_dir_mode +# Adjust mode for directory containing log files. +# @param log_file +# Specify file where to write log entries. +# @param log_level +# Specify the server verbosity level. +# @param masterauth +# If the master is password protected (using the "requirepass" configuration +# @param maxclients +# Set the max number of connected clients at the same time. +# @param maxmemory +# Don't use more memory than the specified amount of bytes. +# @param maxmemory_policy +# How Redis will select what to remove when maxmemory is reached. +# @param maxmemory_samples +# Select as well the sample size to check. +# @param min_slaves_max_lag +# The lag in seconds +# @param min_slaves_to_write +# Minimum number of slaves to be in "online" state +# @param no_appendfsync_on_rewrite +# If you have latency problems turn this to 'true'. Otherwise leave it as +# @param notify_keyspace_events +# Which events to notify Pub/Sub clients about events happening +# @param pid_file +# Where to store the pid. +# @param port +# Configure which port to listen on. +# @param protected_mode +# Whether protected mode is enabled or not. Only applicable when no bind is set. +# @param rdbcompression +# Enable/disable compression of string objects using LZF when dumping. +# @param repl_backlog_size +# The replication backlog size +# @param repl_backlog_ttl +# The number of seconds to elapse before freeing backlog buffer +# @param repl_disable_tcp_nodelay +# Enable/disable TCP_NODELAY on the slave socket after SYNC +# @param repl_ping_slave_period +# Slaves send PINGs to server in a predefined interval. It's possible +# @param repl_timeout +# Set the replication timeout for: +# @param requirepass +# Require clients to issue AUTH before processing any other +# commands. +# @param save_db_to_disk +# Set if save db to disk. +# @param save_db_to_disk_interval +# save the dataset every N seconds if there are at least M changes in the dataset +# @param service_name +# The service name for this instance +# @param service_enable +# Enable/disable daemon at boot. +# @param service_ensure +# Specify if the server should be running. +# @param service_group +# Specify which group to run as. +# @param service_hasrestart +# Does the init script support restart? +# @param service_hasstatus +# Does the init script support status? +# @param service_user +# Specify which user to run as. +# @param set_max_intset_entries +# The following configuration setting sets the limit in the size of the set +# in order to use this special memory saving encoding. +# @param slave_priority +# The priority number for slave promotion by Sentinel +# @param slave_read_only +# You can configure a slave instance to accept writes or not. +# @param slave_serve_stale_data +# When a slave loses its connection with the master, or when the replication # is still in progress, the slave can act in two different ways: # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will # still reply to client requests, possibly with out of date data, or the # data set may just be empty if this is the first synchronization. -# # 2) if slave-serve-stale-data is set to 'no' the slave will reply with # an error "SYNC with master in progress" to all the kind of commands # but to INFO and SLAVEOF. -# -# Default: true -# -# @param [String] slaveof Use slaveof to make a Redis instance a copy of another Redis server. -# @param [String] slowlog_log_slower_than Tells Redis what is the execution time, in microseconds, to exceed -# in order for the command to get logged. -# Default: 10000 -# -# @param [String] slowlog_max_len Tells Redis what is the length to exceed in order for the command +# @param slaveof +# Use slaveof to make a Redis instance a copy of another Redis server. +# @param slowlog_log_slower_than +# Tells Redis what is the execution time, in microseconds, to exceed in order +# for the command to get logged. +# @param slowlog_max_len +# Tells Redis what is the length to exceed in order for the command # to get logged. -# Default: 1024 -# -# @param [String] stop_writes_on_bgsave_error If false then Redis will continue to work as usual even if there +# @param stop_writes_on_bgsave_error +# If false then Redis will continue to work as usual even if there # are problems with disk, permissions, and so forth. -# Default: true -# -# @param [String] syslog_enabled Enable/disable logging to the system logger. -# @param [String] syslog_facility Specify the syslog facility. -# Must be USER or between LOCAL0-LOCAL7. -# Default: undef -# -# @param [String] tcp_backlog Sets the TCP backlog -# @param [String] tcp_keepalive TCP keepalive. -# @param [String] timeout Close the connection after a client is idle for N seconds (0 to disable). -# @param [String] ulimit Limit the use of system-wide resources. -# @param [String] unixsocket Define unix socket path -# @param [String] unixsocketperm Define unix socket file permissions -# @param [String] workdir The DB will be written inside this directory, with the filename specified +# @param syslog_enabled +# Enable/disable logging to the system logger. +# @param syslog_facility +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# @param tcp_backlog +# Sets the TCP backlog +# @param tcp_keepalive +# TCP keepalive. +# @param timeout +# Close the connection after a client is idle for N seconds (0 to disable). +# @param ulimit +# Limit the use of system-wide resources. +# @param unixsocket +# Define unix socket path +# @param unixsocketperm +# Define unix socket file permissions +# @param workdir +# The DB will be written inside this directory, with the filename specified # above using the 'dbfilename' configuration directive. -# Default: /var/lib/redis/ -# @param [String] workdir_mode Adjust mode for data directory. -# @param [String] zset_max_ziplist_entries Set max entries for sorted sets. -# @param [String] zset_max_ziplist_value Set max values for sorted sets. -# @param [String] cluster_enabled Enables redis 3.0 cluster functionality -# @param [String] cluster_config_file Config file for saving cluster nodes configuration. This file is never touched by humans. -# Only set if cluster_enabled is true -# Default: nodes.conf -# @param [String] cluster_node_timeout Node timeout -# Only set if cluster_enabled is true -# Default: 5000 -define redis::instance( - $activerehashing = $::redis::activerehashing, - $aof_load_truncated = $::redis::aof_load_truncated, - $aof_rewrite_incremental_fsync = $::redis::aof_rewrite_incremental_fsync, - $appendfilename = $::redis::appendfilename, - $appendfsync = $::redis::appendfsync, - $appendonly = $::redis::appendonly, - $auto_aof_rewrite_min_size = $::redis::auto_aof_rewrite_min_size, - $auto_aof_rewrite_percentage = $::redis::auto_aof_rewrite_percentage, - $bind = $::redis::bind, - $output_buffer_limit_slave = $::redis::output_buffer_limit_slave, - $output_buffer_limit_pubsub = $::redis::output_buffer_limit_pubsub, - $conf_template = $::redis::conf_template, - $config_dir = $::redis::config_dir, - $config_dir_mode = $::redis::config_dir_mode, - $config_file = $::redis::config_file, - $config_file_mode = $::redis::config_file_mode, - $config_file_orig = $::redis::config_file_orig, - $config_group = $::redis::config_group, - $config_owner = $::redis::config_owner, - $daemonize = $::redis::daemonize, - $databases = $::redis::databases, - $dbfilename = $::redis::dbfilename, - $extra_config_file = $::redis::extra_config_file, - $hash_max_ziplist_entries = $::redis::hash_max_ziplist_entries, - $hash_max_ziplist_value = $::redis::hash_max_ziplist_value, - $hll_sparse_max_bytes = $::redis::hll_sparse_max_bytes, - $hz = $::redis::hz, - $latency_monitor_threshold = $::redis::latency_monitor_threshold, - $list_max_ziplist_entries = $::redis::list_max_ziplist_entries, - $list_max_ziplist_value = $::redis::list_max_ziplist_value, - $log_dir = $::redis::log_dir, - $log_dir_mode = $::redis::log_dir_mode, - $log_level = $::redis::log_level, - $minimum_version = $::redis::minimum_version, - $masterauth = $::redis::masterauth, - $maxclients = $::redis::maxclients, - $maxmemory = $::redis::maxmemory, - $maxmemory_policy = $::redis::maxmemory_policy, - $maxmemory_samples = $::redis::maxmemory_samples, - $min_slaves_max_lag = $::redis::min_slaves_max_lag, - $min_slaves_to_write = $::redis::min_slaves_to_write, - $no_appendfsync_on_rewrite = $::redis::no_appendfsync_on_rewrite, - $notify_keyspace_events = $::redis::notify_keyspace_events, - $managed_by_cluster_manager = $::redis::managed_by_cluster_manager, - $package_ensure = $::redis::package_ensure, - $port = $::redis::port, - $protected_mode = $::redis::protected_mode, - $rdbcompression = $::redis::rdbcompression, - $repl_backlog_size = $::redis::repl_backlog_size, - $repl_backlog_ttl = $::redis::repl_backlog_ttl, - $repl_disable_tcp_nodelay = $::redis::repl_disable_tcp_nodelay, - $repl_ping_slave_period = $::redis::repl_ping_slave_period, - $repl_timeout = $::redis::repl_timeout, - $requirepass = $::redis::requirepass, - $save_db_to_disk = $::redis::save_db_to_disk, - $save_db_to_disk_interval = $::redis::save_db_to_disk_interval, - $service_user = $::redis::service_user, - $set_max_intset_entries = $::redis::set_max_intset_entries, - $slave_priority = $::redis::slave_priority, - $slave_read_only = $::redis::slave_read_only, - $slave_serve_stale_data = $::redis::slave_serve_stale_data, - $slaveof = $::redis::slaveof, - $slowlog_log_slower_than = $::redis::slowlog_log_slower_than, - $slowlog_max_len = $::redis::slowlog_max_len, - $stop_writes_on_bgsave_error = $::redis::stop_writes_on_bgsave_error, - $syslog_enabled = $::redis::syslog_enabled, - $syslog_facility = $::redis::syslog_facility, - $tcp_backlog = $::redis::tcp_backlog, - $tcp_keepalive = $::redis::tcp_keepalive, - $timeout = $::redis::timeout, - $unixsocketperm = $::redis::unixsocketperm, - $ulimit = $::redis::ulimit, - $workdir_mode = $::redis::workdir_mode, - $zset_max_ziplist_entries = $::redis::zset_max_ziplist_entries, - $zset_max_ziplist_value = $::redis::zset_max_ziplist_value, - $cluster_enabled = $::redis::cluster_enabled, - $cluster_config_file = $::redis::cluster_config_file, - $cluster_node_timeout = $::redis::cluster_node_timeout, - $service_ensure = $::redis::service_ensure, - $service_enable = $::redis::service_enable, - $service_group = $::redis::service_group, - $service_hasrestart = $::redis::service_hasrestart, - $service_hasstatus = $::redis::service_hasstatus, - # Defaults for redis::instance - $manage_service_file = true, - $log_file = undef, - $pid_file = "/var/run/redis/redis-server-${name}.pid", - $unixsocket = "/var/run/redis/redis-server-${name}.sock", - $workdir = "${::redis::workdir}/redis-server-${name}", +# @param workdir_mode +# Adjust mode for data directory. +# @param zset_max_ziplist_entries +# Set max entries for sorted sets. +# @param zset_max_ziplist_value +# Set max values for sorted sets. +# @param cluster_enabled +# Enables redis 3.0 cluster functionality +# @param cluster_config_file +# Config file for saving cluster nodes configuration. This file is never +# touched by humans. Only set if cluster_enabled is true +# @param cluster_node_timeout +# Node timeout. Only set if cluster_enabled is true +# @param cluster_slave_validity_factor +# Control variable to disable promoting slave in case of disconnection from +# master Only set if cluster_enabled is true +# @param cluster_require_full_coverage +# If false Redis Cluster will server queries even if requests about a subset +# of keys can be processed Only set if cluster_enabled is true +# @param cluster_migration_barrier +# Minimum number of slaves master will remain connected with, for another +# slave to migrate to a master which is no longer covered by any slave Only +# set if cluster_enabled is true +define redis::instance ( + Boolean $activerehashing = $redis::activerehashing, + Boolean $aof_load_truncated = $redis::aof_load_truncated, + Boolean $aof_rewrite_incremental_fsync = $redis::aof_rewrite_incremental_fsync, + String[1] $appendfilename = $redis::appendfilename, + Enum['no', 'always', 'everysec'] $appendfsync = $redis::appendfsync, + Boolean $appendonly = $redis::appendonly, + String[1] $auto_aof_rewrite_min_size = $redis::auto_aof_rewrite_min_size, + Integer[0] $auto_aof_rewrite_percentage = $redis::auto_aof_rewrite_percentage, + Variant[Stdlib::IP::Address, Array[Stdlib::IP::Address]] $bind = $redis::bind, + String[1] $output_buffer_limit_slave = $redis::output_buffer_limit_slave, + String[1] $output_buffer_limit_pubsub = $redis::output_buffer_limit_pubsub, + String[1] $conf_template = $redis::conf_template, + Stdlib::Absolutepath $config_file = $redis::config_file, + Stdlib::Filemode $config_file_mode = $redis::config_file_mode, + Stdlib::Absolutepath $config_file_orig = $redis::config_file_orig, + String[1] $config_group = $redis::config_group, + String[1] $config_owner = $redis::config_owner, + Boolean $daemonize = true, + Integer[1] $databases = $redis::databases, + Variant[String[1], Boolean] $dbfilename = $redis::dbfilename, + Optional[String] $extra_config_file = $redis::extra_config_file, + Integer[0] $hash_max_ziplist_entries = $redis::hash_max_ziplist_entries, + Integer[0] $hash_max_ziplist_value = $redis::hash_max_ziplist_value, + Integer[0] $hll_sparse_max_bytes = $redis::hll_sparse_max_bytes, + Integer[1, 500] $hz = $redis::hz, + Integer[0] $latency_monitor_threshold = $redis::latency_monitor_threshold, + Integer[0] $list_max_ziplist_entries = $redis::list_max_ziplist_entries, + Integer[0] $list_max_ziplist_value = $redis::list_max_ziplist_value, + Stdlib::Absolutepath $log_dir = $redis::log_dir, + Stdlib::Filemode $log_dir_mode = $redis::log_dir_mode, + Redis::LogLevel $log_level = $redis::log_level, + String[1] $minimum_version = $redis::minimum_version, + Optional[String[1]] $masterauth = $redis::masterauth, + Integer[1] $maxclients = $redis::maxclients, + $maxmemory = $redis::maxmemory, + $maxmemory_policy = $redis::maxmemory_policy, + $maxmemory_samples = $redis::maxmemory_samples, + Integer[0] $min_slaves_max_lag = $redis::min_slaves_max_lag, + Integer[0] $min_slaves_to_write = $redis::min_slaves_to_write, + Boolean $no_appendfsync_on_rewrite = $redis::no_appendfsync_on_rewrite, + Optional[String[1]] $notify_keyspace_events = $redis::notify_keyspace_events, + Boolean $managed_by_cluster_manager = $redis::managed_by_cluster_manager, + String[1] $package_ensure = $redis::package_ensure, + Stdlib::Port $port = $redis::port, + Boolean $protected_mode = $redis::protected_mode, + Boolean $rdbcompression = $redis::rdbcompression, + String[1] $repl_backlog_size = $redis::repl_backlog_size, + Integer[0] $repl_backlog_ttl = $redis::repl_backlog_ttl, + Boolean $repl_disable_tcp_nodelay = $redis::repl_disable_tcp_nodelay, + Integer[1] $repl_ping_slave_period = $redis::repl_ping_slave_period, + Integer[1] $repl_timeout = $redis::repl_timeout, + Optional[String] $requirepass = $redis::requirepass, + Boolean $save_db_to_disk = $redis::save_db_to_disk, + Hash $save_db_to_disk_interval = $redis::save_db_to_disk_interval, + String[1] $service_user = $redis::service_user, + Integer[0] $set_max_intset_entries = $redis::set_max_intset_entries, + Integer[0] $slave_priority = $redis::slave_priority, + Boolean $slave_read_only = $redis::slave_read_only, + Boolean $slave_serve_stale_data = $redis::slave_serve_stale_data, + Optional[String[1]] $slaveof = $redis::slaveof, + Integer[0] $slowlog_log_slower_than = $redis::slowlog_log_slower_than, + Integer[0] $slowlog_max_len = $redis::slowlog_max_len, + Boolean $stop_writes_on_bgsave_error = $redis::stop_writes_on_bgsave_error, + Boolean $syslog_enabled = $redis::syslog_enabled, + Optional[String[1]] $syslog_facility = $redis::syslog_facility, + Integer[0] $tcp_backlog = $redis::tcp_backlog, + Integer[0] $tcp_keepalive = $redis::tcp_keepalive, + Integer[0] $timeout = $redis::timeout, + Variant[Stdlib::Filemode , Enum['']] $unixsocketperm = $redis::unixsocketperm, + Integer[0] $ulimit = $redis::ulimit, + Stdlib::Filemode $workdir_mode = $redis::workdir_mode, + Integer[0] $zset_max_ziplist_entries = $redis::zset_max_ziplist_entries, + Integer[0] $zset_max_ziplist_value = $redis::zset_max_ziplist_value, + Boolean $cluster_enabled = $redis::cluster_enabled, + String[1] $cluster_config_file = $redis::cluster_config_file, + Integer[1] $cluster_node_timeout = $redis::cluster_node_timeout, + Integer[0] $cluster_slave_validity_factor = $redis::cluster_slave_validity_factor, + Boolean $cluster_require_full_coverage = $redis::cluster_require_full_coverage, + Integer[0] $cluster_migration_barrier = $redis::cluster_migration_barrier, + String[1] $service_name = "redis-server-${name}", + Stdlib::Ensure::Service $service_ensure = $redis::service_ensure, + Boolean $service_enable = $redis::service_enable, + String[1] $service_group = $redis::service_group, + Boolean $service_hasrestart = $redis::service_hasrestart, + Boolean $service_hasstatus = $redis::service_hasstatus, + Boolean $manage_service_file = true, + Optional[Stdlib::Absolutepath] $log_file = undef, + Stdlib::Absolutepath $pid_file = "/var/run/redis/redis-server-${name}.pid", + Variant[Stdlib::Absolutepath, Enum['']] $unixsocket = "/var/run/redis/redis-server-${name}.sock", + Stdlib::Absolutepath $workdir = "${redis::workdir}/redis-server-${name}", ) { - if $title == 'default' { $redis_file_name_orig = $config_file_orig $redis_file_name = $config_file } else { - $redis_server_name = "redis-server-${name}" - $redis_file_name_orig = sprintf('%s/%s.%s', dirname($config_file_orig), $redis_server_name, 'conf.puppet') - $redis_file_name = sprintf('%s/%s.%s', dirname($config_file), $redis_server_name, 'conf') + $redis_file_name_orig = sprintf('%s/%s.%s', dirname($config_file_orig), $service_name, 'conf.puppet') + $redis_file_name = sprintf('%s/%s.%s', dirname($config_file), $service_name, 'conf') } - if $log_dir != $::redis::log_dir { + if $log_dir != $redis::log_dir { file { $log_dir: ensure => directory, group => $service_group, mode => $log_dir_mode, owner => $service_user, } } $_real_log_file = $log_file ? { undef => "${log_dir}/redis-server-${name}.log", default => $log_file, } - if $workdir != $::redis::workdir { + if $workdir != $redis::workdir { file { $workdir: ensure => directory, group => $service_group, mode => $workdir_mode, owner => $service_user, } } if $manage_service_file { $service_provider_lookup = pick(getvar('service_provider'), false) if $service_provider_lookup == 'systemd' { - file { "/etc/systemd/system/${redis_server_name}.service": + file { "/etc/systemd/system/${service_name}.service": ensure => file, owner => 'root', group => 'root', mode => '0644', content => template('redis/service_templates/redis.service.erb'), } - ~> Exec['systemd-reload-redis'] + + # Only necessary for Puppet < 6.1.0, + # See https://github.com/puppetlabs/puppet/commit/f8d5c60ddb130c6429ff12736bfdb4ae669a9fd4 + if versioncmp($facts['puppetversion'],'6.1.0') < 0 { + include systemd::systemctl::daemon_reload + File["/etc/systemd/system/${service_name}.service"] ~> Class['systemd::systemctl::daemon_reload'] + } if $title != 'default' { - service { $redis_server_name: + service { $service_name: ensure => $service_ensure, enable => $service_enable, hasrestart => $service_hasrestart, hasstatus => $service_hasstatus, subscribe => [ - File["/etc/systemd/system/${redis_server_name}.service"], + File["/etc/systemd/system/${service_name}.service"], Exec["cp -p ${redis_file_name_orig} ${redis_file_name}"], ], } } } else { - file { "/etc/init.d/${redis_server_name}": + file { "/etc/init.d/${service_name}": ensure => file, mode => '0755', - content => template("redis/service_templates/redis.${::osfamily}.erb"), + content => template("redis/service_templates/redis.${facts['os']['family']}.erb"), } if $title != 'default' { - service { $redis_server_name: + service { $service_name: ensure => $service_ensure, enable => $service_enable, hasrestart => $service_hasrestart, hasstatus => $service_hasstatus, subscribe => [ - File["/etc/init.d/${redis_server_name}"], + File["/etc/init.d/${service_name}"], Exec["cp -p ${redis_file_name_orig} ${redis_file_name}"], ], } } } } File { owner => $config_owner, group => $config_group, mode => $config_file_mode, } file {$redis_file_name_orig: ensure => file, } exec {"cp -p ${redis_file_name_orig} ${redis_file_name}": path => '/usr/bin:/bin', subscribe => File[$redis_file_name_orig], refreshonly => true, } + $bind_arr = [$bind].flatten + if $package_ensure =~ /^([0-9]+:)?[0-9]+\.[0-9]/ { if ':' in $package_ensure { $_redis_version_real = split($package_ensure, ':') $redis_version_real = $_redis_version_real[1] } else { $redis_version_real = $package_ensure } } else { $redis_version_real = pick(getvar('redis_server_version'), $minimum_version) } - if ($redis_version_real and $conf_template == 'redis/redis.conf.erb') { - case $redis_version_real { - /^2.4./: { - File[$redis_file_name_orig] { content => template('redis/redis.conf.2.4.10.erb') } - } - /^2.8./: { - File[$redis_file_name_orig] { content => template('redis/redis.conf.2.8.erb') } - } - /^3.2./: { - File[$redis_file_name_orig] { content => template('redis/redis.conf.3.2.erb') } - } - default: { - File[$redis_file_name_orig] { content => template($conf_template) } - } - } - } else { - File[$redis_file_name_orig] { content => template($conf_template) } - } + $supports_protected_mode = !$redis_version_real or versioncmp($redis_version_real, '3.2.0') >= 0 + + File[$redis_file_name_orig] { content => template($conf_template) } } diff --git a/manifests/params.pp b/manifests/params.pp index da33491..accc8a1 100644 --- a/manifests/params.pp +++ b/manifests/params.pp @@ -1,324 +1,209 @@ -# = Class: redis::params -# -# This class provides a number of parameters. -# -class redis::params { - # Generic - $manage_repo = false - $manage_package = true - $managed_by_cluster_manager = false - - # redis.conf.erb - $activerehashing = true - $aof_load_truncated = true - $aof_rewrite_incremental_fsync = true - $appendfilename = 'appendonly.aof' - $appendfsync = 'everysec' - $appendonly = false - $auto_aof_rewrite_min_size = '64mb' - $auto_aof_rewrite_percentage = 100 - $bind = '127.0.0.1' - $output_buffer_limit_slave = '256mb 64mb 60' - $output_buffer_limit_pubsub = '32mb 8mb 60' - $conf_template = 'redis/redis.conf.erb' - $default_install = true - $databases = 16 - $dbfilename = 'dump.rdb' - $extra_config_file = undef - $hash_max_ziplist_entries = 512 - $hash_max_ziplist_value = 64 - $hll_sparse_max_bytes = 3000 - $hz = 10 - $latency_monitor_threshold = 0 - $list_max_ziplist_entries = 512 - $list_max_ziplist_value = 64 - $log_dir = '/var/log/redis' - $log_file = '/var/log/redis/redis.log' - $log_level = 'notice' - $manage_service_file = false - $maxclients = 10000 - $maxmemory = undef - $maxmemory_policy = undef - $maxmemory_samples = undef - $no_appendfsync_on_rewrite = false - $notify_keyspace_events = undef - $notify_service = true - $port = 6379 - $protected_mode = 'yes' - $rdbcompression = true - $requirepass = undef - $save_db_to_disk = true - $save_db_to_disk_interval = {'900' =>'1', '300' => '10', '60' => '10000'} - $sentinel_auth_pass = undef - $sentinel_bind = undef - $sentinel_config_file_mode = '0644' - $sentinel_config_group = 'root' - $sentinel_config_owner = 'redis' - $sentinel_conf_template = 'redis/redis-sentinel.conf.erb' - $sentinel_down_after = 30000 - $sentinel_failover_timeout = 180000 - $sentinel_master_name = 'mymaster' - $sentinel_parallel_sync = 1 - $sentinel_port = 26379 - $sentinel_protected_mode = 'yes' - $sentinel_quorum = 2 - $sentinel_service_name = 'redis-sentinel' - $sentinel_working_dir = '/tmp' - $sentinel_init_template = 'redis/redis-sentinel.init.erb' - $sentinel_pid_file = '/var/run/redis/redis-sentinel.pid' - $sentinel_notification_script = undef - $sentinel_client_reconfig_script = undef - $service_provider = undef - $set_max_intset_entries = 512 - $slave_priority = 100 - $slowlog_log_slower_than = 10000 - $slowlog_max_len = 1024 - $stop_writes_on_bgsave_error = true - $syslog_enabled = undef - $syslog_facility = undef - $tcp_backlog = 511 - $tcp_keepalive = 0 - $timeout = 0 - $ulimit = 65536 - $unixsocket = '/var/run/redis/redis.sock' - $unixsocketperm = 755 - $zset_max_ziplist_entries = 128 - $zset_max_ziplist_value = 64 - - # redis.conf.erb - replication - $masterauth = undef - $min_slaves_to_write = 0 - $min_slaves_max_lag = 10 - $repl_backlog_size = '1mb' - $repl_backlog_ttl = 3600 - $repl_disable_tcp_nodelay = false - $repl_ping_slave_period = 10 - $repl_timeout = 60 - $slave_read_only = true - $slave_serve_stale_data = true - $slaveof = undef - - # redis.conf.erb - redis 3.0 clustering - $cluster_enabled = false - $cluster_config_file = 'nodes.conf' - $cluster_node_timeout = 5000 - - case $::osfamily { +# @summary This class provides a number of parameters. +# @api private +class redis::params inherits redis::globals { + case $facts['os']['family'] { 'Debian': { + $ppa_repo = 'ppa:chris-lea/redis-server' + $config_dir = '/etc/redis' $config_dir_mode = '0755' $config_file = '/etc/redis/redis.conf' - $config_file_mode = '0644' $config_file_orig = '/etc/redis/redis.conf.puppet' - $config_owner = 'redis' - $daemonize = true $log_dir_mode = '0755' - $package_ensure = 'present' $package_name = 'redis-server' $pid_file = '/var/run/redis/redis-server.pid' + $workdir = '/var/lib/redis' + $daemonize = true + $service_name = 'redis-server' + $sentinel_config_file = '/etc/redis/sentinel.conf' $sentinel_config_file_orig = '/etc/redis/redis-sentinel.conf.puppet' + $sentinel_service_name = 'redis-sentinel' $sentinel_daemonize = true $sentinel_init_script = '/etc/init.d/redis-sentinel' $sentinel_package_name = 'redis-sentinel' - $sentinel_package_ensure = 'present' - $service_manage = true - $service_enable = true - $service_ensure = 'running' - $service_group = 'redis' - $service_hasrestart = true - $service_hasstatus = true - $service_name = 'redis-server' - $service_user = 'redis' - $ppa_repo = 'ppa:chris-lea/redis-server' - $workdir = '/var/lib/redis' - $workdir_mode = '0750' + $sentinel_log_file = '/var/log/redis/redis-sentinel.log' + $sentinel_working_dir = '/var/lib/redis' + $sentinel_protected_mode = 'yes' - case $::operatingsystem { + case $facts['os']['name'] { 'Ubuntu': { - $config_group = 'redis' - - case $::operatingsystemmajrelease { - '14.04': { - # upstream package is 2.8.4 - $minimum_version = '2.8.4' - } - '16.04': { - # upstream package is 3.0.3 - $minimum_version = '3.0.3' - } - default: { - warning("Ubuntu release ${::operatingsystemmajrelease} isn't 'officially' supported by module, but will git it a shot") - $minimum_version = '2.8.5' - } + $config_group = 'redis' + $minimum_version = $facts['os']['release']['major'] ? { + '16.04' => '3.0.5', + '18.04' => '4.0.9', + default => '5.0.7', + } + $sentinel_pid_file = $facts['os']['release']['major'] ? { + '16.04' => '/var/run/redis/redis-sentinel.pid', + default => '/var/run/sentinel/redis-sentinel.pid', } } default: { $config_group = 'root' - # Debian standard package is 2.4.14 - # But we have dotdeb repo which is 3.2.5 $minimum_version = '3.2.5' + if versioncmp($facts['os']['release']['major'], '10') >= 0 { + $sentinel_pid_file = '/run/sentinel/redis-sentinel.pid' + } else { + $sentinel_pid_file = '/var/run/redis/redis-sentinel.pid' + } } } } 'RedHat': { - $config_dir = '/etc/redis' - $config_dir_mode = '0755' - $config_file = '/etc/redis.conf' - $config_file_mode = '0644' - $config_file_orig = '/etc/redis.conf.puppet' - $config_group = 'root' - $config_owner = 'redis' - $daemonize = true - $log_dir_mode = '0755' - $package_ensure = 'present' - $package_name = 'redis' - $pid_file = '/var/run/redis/redis.pid' - $sentinel_config_file = '/etc/redis-sentinel.conf' - $sentinel_config_file_orig = '/etc/redis-sentinel.conf.puppet' - $sentinel_daemonize = false - $sentinel_init_script = undef - $sentinel_package_name = 'redis' - $sentinel_package_ensure = 'present' - $service_manage = true - $service_enable = true - $service_ensure = 'running' - $service_hasrestart = true - $service_hasstatus = true - $service_name = 'redis' - $service_user = 'redis' - $ppa_repo = undef - $workdir = '/var/lib/redis' - $workdir_mode = '0755' - - case $::operatingsystemmajrelease { - '6': { - # CentOS 6 EPEL package is just updated to 3.2.10 - # https://bugzilla.redhat.com/show_bug.cgi?id=923970 - $minimum_version = '3.2.10' - - $service_group = 'root' + $ppa_repo = undef + $daemonize = false + $config_owner = 'redis' + $config_group = 'root' + $config_dir_mode = '0755' + $log_dir_mode = '0750' + + $sentinel_daemonize = false + $sentinel_init_script = undef + $sentinel_working_dir = '/tmp' + $sentinel_protected_mode = 'yes' + + $scl = $redis::globals::scl + if $scl { + $config_dir = "/etc/opt/rh/${scl}/redis" + $config_file = "/etc/opt/rh/${scl}/redis.conf" + $config_file_orig = "/etc/opt/rh/${scl}/redis.conf.puppet" + $package_name = "${scl}-redis" + $pid_file = "/var/opt/rh/${scl}/run/redis_6379.pid" + $service_name = "${scl}-redis" + $workdir = "/var/opt/rh/${scl}/lib/redis" + + $sentinel_config_file = "${config_dir}/redis-sentinel.conf" + $sentinel_config_file_orig = "${config_dir}/redis-sentinel.conf.puppet" + $sentinel_service_name = "${scl}-redis-sentinel" + $sentinel_package_name = $package_name + $sentinel_pid_file = "/var/opt/rh/${scl}/run/redis-sentinel.pid" + $sentinel_log_file = "/var/opt/rh/${scl}/log/redis/sentinel.log" + + $minimum_version = $scl ? { + 'rh-redis32' => '3.2.13', + default => '5.0.5', } - '7': { - # CentOS 7 EPEL package is 3.2.3 - $minimum_version = '3.2.3' - - $service_group = 'redis' - } - default: { - fail("Not sure what Redis version is avaliable upstream on your release: ${::operatingsystemmajrelease}") + } else { + $config_dir = '/etc/redis' + $config_file = '/etc/redis.conf' + $config_file_orig = '/etc/redis.conf.puppet' + $package_name = 'redis' + $pid_file = $facts['os']['release']['major'] ? { + '6' => '/var/run/redis/redis.pid', + default => '/var/run/redis_6379.pid', } + $service_name = 'redis' + $workdir = '/var/lib/redis' + + $sentinel_config_file = '/etc/redis-sentinel.conf' + $sentinel_config_file_orig = '/etc/redis-sentinel.conf.puppet' + $sentinel_service_name = 'redis-sentinel' + $sentinel_package_name = 'redis' + $sentinel_pid_file = '/var/run/redis/redis-sentinel.pid' + $sentinel_log_file = '/var/log/redis/sentinel.log' + + # EPEL 6 and newer have 3.2 so we can assume all EL is 3.2+ + $minimum_version = '3.2.10' } } 'FreeBSD': { + $ppa_repo = undef + $config_dir = '/usr/local/etc/redis' $config_dir_mode = '0755' $config_file = '/usr/local/etc/redis.conf' - $config_file_mode = '0644' $config_file_orig = '/usr/local/etc/redis.conf.puppet' $config_group = 'wheel' $config_owner = 'redis' - $daemonize = true $log_dir_mode = '0755' - $package_ensure = 'present' $package_name = 'redis' $pid_file = '/var/run/redis/redis.pid' + $daemonize = true + $service_name = 'redis' + $workdir = '/var/db/redis' + $sentinel_config_file = '/usr/local/etc/redis-sentinel.conf' $sentinel_config_file_orig = '/usr/local/etc/redis-sentinel.conf.puppet' + $sentinel_service_name = 'redis-sentinel' $sentinel_daemonize = true $sentinel_init_script = undef $sentinel_package_name = 'redis' - $sentinel_package_ensure = 'present' - $service_manage = true - $service_enable = true - $service_ensure = 'running' - $service_group = 'redis' - $service_hasrestart = true - $service_hasstatus = true - $service_name = 'redis' - $service_user = 'redis' - $ppa_repo = undef - $workdir = '/var/db/redis' - $workdir_mode = '0750' + $sentinel_pid_file = '/var/run/redis/redis-sentinel.pid' + $sentinel_log_file = '/var/log/redis/sentinel.log' + $sentinel_working_dir = '/tmp' + $sentinel_protected_mode = 'yes' # pkg version $minimum_version = '3.2.4' } 'Suse': { + $ppa_repo = undef + $config_dir = '/etc/redis' $config_dir_mode = '0750' $config_file = '/etc/redis/redis-server.conf' - $config_file_mode = '0644' $config_group = 'redis' $config_owner = 'redis' - $daemonize = true $log_dir_mode = '0750' - $package_ensure = 'present' $package_name = 'redis' $pid_file = '/var/run/redis/redis-server.pid' + $daemonize = true + $service_name = 'redis' + $workdir = '/var/lib/redis' + $sentinel_config_file = '/etc/redis/redis-sentinel.conf' $sentinel_config_file_orig = '/etc/redis/redis-sentinel.conf.puppet' + $sentinel_service_name = 'redis-sentinel' $sentinel_daemonize = true $sentinel_init_script = undef $sentinel_package_name = 'redis' - $sentinel_package_ensure = 'present' - $service_manage = true - $service_enable = true - $service_ensure = 'running' - $service_group = 'redis' - $service_hasrestart = true - $service_hasstatus = true - $service_name = 'redis' - $service_user = 'redis' - $ppa_repo = undef - $workdir = '/var/lib/redis' - $workdir_mode = '0750' + $sentinel_pid_file = '/var/run/redis/redis-sentinel.pid' + $sentinel_log_file = '/var/log/redis/sentinel.log' + $sentinel_working_dir = '/tmp' + $sentinel_protected_mode = 'yes' # suse package version $minimum_version = '3.0.5' } 'Archlinux': { + $ppa_repo = undef + $config_dir = '/etc/redis' $config_dir_mode = '0755' $config_file = '/etc/redis/redis.conf' - $config_file_mode = '0644' $config_file_orig = '/etc/redis/redis.conf.puppet' $config_group = 'root' $config_owner = 'root' - $daemonize = true $log_dir_mode = '0755' - $package_ensure = 'present' $package_name = 'redis' $pid_file = '/var/run/redis.pid' + $daemonize = true + $service_name = 'redis' + $workdir = '/var/lib/redis' + $sentinel_config_file = '/etc/redis/redis-sentinel.conf' $sentinel_config_file_orig = '/etc/redis/redis-sentinel.conf.puppet' + $sentinel_service_name = 'redis-sentinel' $sentinel_daemonize = true $sentinel_init_script = undef $sentinel_package_name = 'redis' - $sentinel_package_ensure = 'present' - $service_manage = true - $service_enable = true - $service_ensure = 'running' - $service_group = 'redis' - $service_hasrestart = true - $service_hasstatus = true - $service_name = 'redis' - $service_user = 'redis' - $ppa_repo = undef - $workdir = '/var/lib/redis' - $workdir_mode = '0750' + $sentinel_pid_file = '/var/run/redis/redis-sentinel.pid' + $sentinel_log_file = '/var/log/redis/sentinel.log' + $sentinel_working_dir = '/tmp' + $sentinel_protected_mode = 'yes' # pkg version $minimum_version = '3.2.4' } default: { - fail "Operating system ${::operatingsystem} is not supported yet." + fail "Operating system ${facts['os']['name']} is not supported yet." } } } diff --git a/manifests/preinstall.pp b/manifests/preinstall.pp index 0bbd090..3378c30 100644 --- a/manifests/preinstall.pp +++ b/manifests/preinstall.pp @@ -1,47 +1,35 @@ -# = Class: redis::preinstall -# -# This class provides anything required by the install class. -# Such as package repositories. -# +# @summary Provides anything required by the install class, such as package +# repositories. +# @api private class redis::preinstall { - if $::redis::manage_repo { - case $::operatingsystem { - 'RedHat', 'CentOS', 'Scientific', 'OEL', 'Amazon': { - require ::epel - } - - 'Debian': { - contain ::apt - apt::source { 'dotdeb': - location => 'http://packages.dotdeb.org/', - release => $::lsbdistcodename, - repos => 'all', - key => { - id => '6572BBEF1B5FF28B28B706837E3F070089DF5277', - source => 'http://www.dotdeb.org/dotdeb.gpg', - }, - include => { 'src' => true }, - before => [ - Class['apt::update'], - Package[$::redis::package_name], - ], + if $redis::manage_repo { + if $facts['os']['family'] == 'RedHat' { + if $facts['os']['name'] != 'Fedora' { + if $redis::scl { + if $facts['os']['name'] == 'CentOS' { + ensure_packages(['centos-release-scl-rh']) + Package['centos-release-scl-rh'] -> Package[$redis::package_name] + } + } else { + require 'epel' } - } - - 'Ubuntu': { - contain ::apt - apt::ppa { $::redis::ppa_repo: - before => [ - Class['apt::update'], - Package[$::redis::package_name], - ], - } + } elsif $facts['os']['name'] == 'Debian' { + contain 'apt' + apt::source { 'dotdeb': + location => 'http://packages.dotdeb.org/', + repos => 'all', + key => { + id => '6572BBEF1B5FF28B28B706837E3F070089DF5277', + source => 'http://www.dotdeb.org/dotdeb.gpg', + }, + include => { 'src' => true }, } - - default: { + } elsif $facts['os']['name'] == 'Ubuntu' { + contain 'apt' + apt::ppa { $redis::ppa_repo: } } } } diff --git a/manifests/sentinel.pp b/manifests/sentinel.pp index d9c2bc2..9e628af 100644 --- a/manifests/sentinel.pp +++ b/manifests/sentinel.pp @@ -1,264 +1,195 @@ -# = Class: redis::sentinel +# @summary Install redis-sentinel # -# This class installs redis-sentinel -# -# == Parameters: -# -# -# [*auth_pass*] +# @param auth_pass # The password to use to authenticate with the master and slaves. # -# Default: undef -# -# [*config_file*] +# @param config_file # The location and name of the sentinel config file. # -# Default for deb: /etc/redis/redis-sentinel.conf -# Default for rpm: /etc/redis-sentinel.conf -# -# [*config_file_orig*] +# @param config_file_orig # The location and name of a config file that provides the source # of the sentinel config file. Two different files are needed # because sentinel itself writes to its own config file and we do # not want override that when puppet is run unless there are # changes from the manifests. # -# Default for deb: /etc/redis/redis-sentinel.conf.puppet -# Default for rpm: /etc/redis-sentinel.conf.puppet -# -# [*config_file_mode*] +# @param config_file_mode # Permissions of config file. # -# Default: 0644 -# -# [*conf_template*] +# @param conf_template # Define which template to use. # -# Default: redis/redis-sentinel.conf.erb -# -# [*daemonize*] +# @param daemonize # Have Redis sentinel run as a daemon. # -# Default: true -# -# [*down_after*] +# @param down_after # Number of milliseconds the master (or any attached slave or sentinel) # should be unreachable (as in, not acceptable reply to PING, continuously, # for the specified period) in order to consider it in S_DOWN state. # -# Default: 30000 -# -# [*failover_timeout*] +# @param failover_timeout # Specify the failover timeout in milliseconds. # -# Default: 180000 -# -# [*init_script*] +# @param init_script # Specifiy the init script that will be created for sentinel. # -# Default: undef on rpm, /etc/init.d/redis-sentinel on apt. -# -# [*log_file*] +# @param log_file # Specify where to write log entries. # -# Default: /var/log/redis/redis.log -# -# [*log_level*] +# @param log_level # Specify how much we should log. # -# Default: notice -# -# [*master_name*] +# @param master_name # Specify the name of the master redis server. # The valid charset is A-z 0-9 and the three characters ".-_". # -# Default: mymaster -# -# [*redis_host*] +# @param redis_host # Specify the bound host of the master redis server. # -# Default: 127.0.0.1 -# -# [*redis_port*] +# @param redis_port # Specify the port of the master redis server. # -# Default: 6379 +# @param protected_mode +# Whether protected mode is enabled or not. Only applicable when no bind is set. # -# [*package_name*] +# @param package_name # The name of the package that installs sentinel. # -# Default: 'redis-server' on apt, 'redis' on rpm -# -# [*package_ensure*] +# @param package_ensure # Do we ensure this package. # -# Default: 'present' -# -# [*parallel_sync*] +# @param parallel_sync # How many slaves can be reconfigured at the same time to use a # new master after a failover. # -# Default: 1 -# -# [*pid_file*] +# @param pid_file # If sentinel is daemonized it will write its pid at this location. # -# Default: /var/run/redis/redis-sentinel.pid -# -# [*quorum*] +# @param quorum # Number of sentinels that must agree that a master is down to # signal sdown state. # -# Default: 2 -# -# [*sentinel_bind*] +# @param sentinel_bind # Allow optional sentinel server ip binding. Can help overcome # issues arising from protect-mode added Redis 3.2 # -# Default: undef -# -# [*sentinel_port*] +# @param sentinel_port # The port of sentinel server. # -# Default: 26379 -# -# [*service_group*] +# @param service_group # The group of the config file. # -# Default: redis -# -# [*service_name*] +# @param service_name # The name of the service (for puppet to manage). # -# Default: redis-sentinel -# -# [*service_owner*] +# @param service_user # The owner of the config file. # -# Default: redis +# @param service_enable +# Enable the service at boot time. # -# [*working_dir*] +# @param working_dir # The directory into which sentinel will change to avoid mount # conflicts. # -# Default: /tmp -# -# [*notification_script*] +# @param notification_script # Path to the notification script # -# Default: undef -# -# [*client_reconfig_script*] +# @param client_reconfig_script # Path to the client-reconfig script -# [*protected_mode*] -# Whether protected mode is enabled or not. Only applicable when no bind is set. -# -# Default: undef -# == Actions: -# - Install and configure Redis Sentinel -# -# == Sample Usage: # -# class { 'redis::sentinel': } +# @example Basic inclusion +# include redis::sentinel # +# @example Configuring options # class {'redis::sentinel': # down_after => 80000, -# log_file => '/var/log/redis/sentinel.log', +# log_file => '/var/log/redis/sentinel.log', # } # class redis::sentinel ( - $auth_pass = $::redis::params::sentinel_auth_pass, - $config_file = $::redis::params::sentinel_config_file, - $config_file_orig = $::redis::params::sentinel_config_file_orig, - $config_file_mode = $::redis::params::sentinel_config_file_mode, - $conf_template = $::redis::params::sentinel_conf_template, - $daemonize = $::redis::params::sentinel_daemonize, - $down_after = $::redis::params::sentinel_down_after, - $failover_timeout = $::redis::params::sentinel_failover_timeout, - $init_script = $::redis::params::sentinel_init_script, - $init_template = $::redis::params::sentinel_init_template, - $log_level = $::redis::params::log_level, - $log_file = $::redis::params::log_file, - $master_name = $::redis::params::sentinel_master_name, - $redis_host = $::redis::params::bind, - $redis_port = $::redis::params::port, - $package_name = $::redis::params::sentinel_package_name, - $package_ensure = $::redis::params::sentinel_package_ensure, - $parallel_sync = $::redis::params::sentinel_parallel_sync, - $pid_file = $::redis::params::sentinel_pid_file, - $quorum = $::redis::params::sentinel_quorum, - $sentinel_bind = $::redis::params::sentinel_bind, - $sentinel_port = $::redis::params::sentinel_port, - $protected_mode = $::redis::params::sentinel_protected_mode, - $service_group = $::redis::params::service_group, - $service_name = $::redis::params::sentinel_service_name, - $service_ensure = $::redis::params::service_ensure, - $service_user = $::redis::params::service_user, - $working_dir = $::redis::params::sentinel_working_dir, - $notification_script = $::redis::params::sentinel_notification_script, - $client_reconfig_script = $::redis::params::sentinel_client_reconfig_script, + Optional[String[1]] $auth_pass = undef, + Stdlib::Absolutepath $config_file = $redis::params::sentinel_config_file, + Stdlib::Absolutepath $config_file_orig = $redis::params::sentinel_config_file_orig, + Stdlib::Filemode $config_file_mode = '0644', + String[1] $conf_template = 'redis/redis-sentinel.conf.erb', + Boolean $daemonize = $redis::params::sentinel_daemonize, + Boolean $protected_mode = $redis::params::sentinel_protected_mode, + Integer[1] $down_after = 30000, + Integer[1] $failover_timeout = 180000, + Optional[Stdlib::Absolutepath] $init_script = $redis::params::sentinel_init_script, + String[1] $init_template = 'redis/redis-sentinel.init.erb', + Redis::LogLevel $log_level = 'notice', + Stdlib::Absolutepath $log_file = $redis::params::sentinel_log_file, + String[1] $master_name = 'mymaster', + Stdlib::Host $redis_host = '127.0.0.1', + Stdlib::Port $redis_port = 6379, + String[1] $package_name = $redis::params::sentinel_package_name, + String[1] $package_ensure = 'present', + Integer[0] $parallel_sync = 1, + Stdlib::Absolutepath $pid_file = $redis::params::sentinel_pid_file, + Integer[1] $quorum = 2, + Variant[Undef, Stdlib::IP::Address, Array[Stdlib::IP::Address]] $sentinel_bind = undef, + Stdlib::Port $sentinel_port = 26379, + String[1] $service_group = 'redis', + String[1] $service_name = $redis::params::sentinel_service_name, + Stdlib::Ensure::Service $service_ensure = 'running', + Boolean $service_enable = true, + String[1] $service_user = 'redis', + Stdlib::Absolutepath $working_dir = $redis::params::sentinel_working_dir, + Optional[Stdlib::Absolutepath] $notification_script = undef, + Optional[Stdlib::Absolutepath] $client_reconfig_script = undef, ) inherits redis::params { - require ::redis + require 'redis' - if $::osfamily == 'Debian' { - # Debian flavour machines have a dedicated redis-sentinel package - # This is default in Xenial or Stretch onwards or PPA/other upstream - # See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=775414 for context - if ( - (versioncmp($::operatingsystemmajrelease, '16.04') >= 0 and $::operatingsystem == 'Ubuntu') or - (versioncmp($::operatingsystemmajrelease, '9') >= 0 and $::operatingsystem == 'Debian') or - $::redis::manage_repo - ) { - package { $package_name: - ensure => $package_ensure, - } + if $facts['os']['family'] == 'Debian' { + package { $package_name: + ensure => $package_ensure, + before => File[$config_file_orig], + } + + if $init_script { + Package[$package_name] -> File[$init_script] } } - file { - $config_file_orig: - ensure => present, - owner => $service_user, - group => $service_group, - mode => $config_file_mode, - content => template($conf_template), - require => Package[$package_name]; + file { $config_file_orig: + ensure => file, + owner => $service_user, + group => $service_group, + mode => $config_file_mode, + content => template($conf_template), } - exec { - "cp -p ${config_file_orig} ${config_file}": - path => '/usr/bin:/bin', - subscribe => File[$config_file_orig], - notify => Service[$service_name], - refreshonly => true; + exec { "cp -p ${config_file_orig} ${config_file}": + path => '/usr/bin:/bin', + subscribe => File[$config_file_orig], + notify => Service[$service_name], + refreshonly => true, } if $init_script { - file { - $init_script: - ensure => present, - owner => 'root', - group => 'root', - mode => '0755', - content => template($init_template), - require => Package[$package_name]; + file { $init_script: + ensure => file, + owner => 'root', + group => 'root', + mode => '0755', + content => template($init_template), } - exec { - '/usr/sbin/update-rc.d redis-sentinel defaults': - subscribe => File[$init_script], - refreshonly => true; + exec { '/usr/sbin/update-rc.d redis-sentinel defaults': + subscribe => File[$init_script], + refreshonly => true, + notify => Service[$service_name], } } service { $service_name: - ensure => $service_ensure, - enable => $::redis::params::service_enable, - hasrestart => $::redis::params::service_hasrestart, - hasstatus => $::redis::params::service_hasstatus, + ensure => $service_ensure, + enable => $service_enable, } } diff --git a/manifests/service.pp b/manifests/service.pp index 9d86f02..0145f27 100644 --- a/manifests/service.pp +++ b/manifests/service.pp @@ -1,16 +1,14 @@ -# = Class: redis::service -# -# This class manages the Redis daemon. -# +# @summary This class manages the Redis daemon. +# @api private class redis::service { - if $::redis::service_manage { - service { $::redis::service_name: - ensure => $::redis::service_ensure, - enable => $::redis::service_enable, - hasrestart => $::redis::service_hasrestart, - hasstatus => $::redis::service_hasstatus, - provider => $::redis::service_provider, + if $redis::service_manage { + service { $redis::service_name: + ensure => $redis::service_ensure, + enable => $redis::service_enable, + hasrestart => $redis::service_hasrestart, + hasstatus => $redis::service_hasstatus, + provider => $redis::service_provider, } } } diff --git a/manifests/ulimit.pp b/manifests/ulimit.pp index 3b0bdbf..f7fa88f 100644 --- a/manifests/ulimit.pp +++ b/manifests/ulimit.pp @@ -1,77 +1,78 @@ # Redis class for configuring ulimit # Used to DRY up the config class, and # move the logic for ulimit changes all # into one place. # # Parameters are not required as it's a # private class only referencable from # the redis module, where the variables # would already be defined # # @example # contain redis::ulimit # # @author - Peter Souter # # @api private class redis::ulimit { assert_private('The redis::ulimit class is only to be called from the redis::config class') $service_provider_lookup = pick(getvar('service_provider'), false) - if $::redis::managed_by_cluster_manager { + if $redis::managed_by_cluster_manager { file { '/etc/security/limits.d/redis.conf': ensure => 'file', owner => 'root', group => 'root', mode => '0644', - content => "redis soft nofile ${::redis::ulimit}\nredis hard nofile ${::redis::ulimit}\n", + content => "redis soft nofile ${redis::ulimit}\nredis hard nofile ${redis::ulimit}\n", } } if $service_provider_lookup == 'systemd' { - file { "/etc/systemd/system/${::redis::service_name}.service.d/": + file { "/etc/systemd/system/${redis::service_name}.service.d/": ensure => 'directory', owner => 'root', group => 'root', selinux_ignore_defaults => true, } - file { "/etc/systemd/system/${::redis::service_name}.service.d/limit.conf": + file { "/etc/systemd/system/${redis::service_name}.service.d/limit.conf": ensure => file, owner => 'root', group => 'root', mode => '0444', } augeas { 'Systemd redis ulimit' : - incl => "/etc/systemd/system/${::redis::service_name}.service.d/limits.conf", + incl => "/etc/systemd/system/${redis::service_name}.service.d/limit.conf", lens => 'Systemd.lns', - context => "/etc/systemd/system/${::redis::service_name}.service.d/limits.conf", changes => [ "defnode nofile Service/LimitNOFILE \"\"", - "set \$nofile/value \"${::redis::ulimit}\""], - notify => [ - Exec['systemd-reload-redis'], + "set \$nofile/value \"${redis::ulimit}\"", ], } - } else { - augeas { 'redis ulimit': - changes => "set ULIMIT ${::redis::ulimit}", + # Only necessary for Puppet < 6.1.0, + # See https://github.com/puppetlabs/puppet/commit/f8d5c60ddb130c6429ff12736bfdb4ae669a9fd4 + if versioncmp($facts['puppetversion'],'6.1.0') < 0 { + include systemd::systemctl::daemon_reload + Augeas['Systemd redis ulimit'] ~> Class['systemd::systemctl::daemon_reload'] } - case $::osfamily { + } else { + case $facts['os']['family'] { 'Debian': { - Augeas['redis ulimit'] { + augeas { 'redis ulimit': context => '/files/etc/default/redis-server', + changes => "set ULIMIT ${redis::ulimit}", } } 'RedHat': { - Augeas['redis ulimit'] { + augeas { 'redis ulimit': context => '/files/etc/sysconfig/redis', + changes => "set ULIMIT ${redis::ulimit}", } } default: { - warning("Not sure how to set ULIMIT on non-systemd OSFamily ${::osfamily}, PR's welcome") + warning("Not sure how to set ULIMIT on non-systemd OSFamily ${facts['os']['family']}, PR's welcome") } } } - } diff --git a/metadata.json b/metadata.json index 1782993..bb834fc 100644 --- a/metadata.json +++ b/metadata.json @@ -1,64 +1,74 @@ { - "name": "arioch-redis", - "version": "3.2.0", - "author": "Tom De Vylder", + "name": "puppet-redis", + "version": "6.0.1-rc0", + "author": "Vox Pupuli", "summary": "Redis module", "license": "Apache-2.0", - "source": "https://github.com/arioch/puppet-redis", - "project_page": "http://arioch.github.io/puppet-redis/", - "issues_url": "https://github.com/arioch/puppet-redis/issues", + "source": "https://github.com/voxpupuli/puppet-redis.git", + "project_page": "http://github.com/voxpupuli/puppet-redis", + "issues_url": "https://github.com/voxpupuli/puppet-redis/issues", "dependencies": [ - {"name":"puppetlabs/apt","version_requirement":">= 2.3.0 <5.0.0"}, - {"name":"puppetlabs/stdlib","version_requirement":">= 1.0.2 <5.0.0"}, - {"name":"stahnma/epel","version_requirement":">= 1.2.2 <2.0.0"}, + { + "name": "puppetlabs/stdlib", + "version_requirement": ">= 4.25.0 < 7.0.0" + }, { "name": "herculesteam/augeasproviders_sysctl", - "version_requirement": ">=2.1.0 < 3.0.0" + "version_requirement": ">= 2.1.0 < 3.0.0" }, { "name": "herculesteam/augeasproviders_core", - "version_requirement": ">=2.1.0 < 3.0.0" + "version_requirement": ">= 2.1.0 < 3.0.0" } ], - "data_provider": null, "description": "Redis module with cluster support", "tags": [ "cluster", "failover", "loadbalancing", "redis", "sentinel" ], + "requirements": [ + { + "name": "puppet", + "version_requirement": ">= 5.5.8 < 7.0.0" + } + ], "operatingsystem_support": [ { "operatingsystem": "Debian", "operatingsystemrelease": [ - "8" + "9", + "10" ] }, { "operatingsystem": "Ubuntu", "operatingsystemrelease": [ - "14.04", - "16.04" + "16.04", + "18.04", + "20.04" ] }, { "operatingsystem": "RedHat", "operatingsystemrelease": [ "6", - "7" + "7", + "8" ] }, { "operatingsystem": "CentOS", "operatingsystemrelease": [ "6", - "7" + "7", + "8" ] }, { "operatingsystem": "Archlinux" } ] } diff --git a/spec/acceptance/nodesets/centos-6-docker.yml b/spec/acceptance/nodesets/centos-6-docker.yml deleted file mode 100644 index 21462c1..0000000 --- a/spec/acceptance/nodesets/centos-6-docker.yml +++ /dev/null @@ -1,13 +0,0 @@ -HOSTS: - centos-6-x64: - roles: - - master - platform: el-6-x86_64 - hypervisor : docker - image: petems/docker-centos-6-ssh-locale:centos-6 - docker_preserve_image: true - docker_cmd: '["/sbin/init"]' - docker_preserve_image: true -CONFIG: - type: foss - log_level: debug diff --git a/spec/acceptance/nodesets/centos-6-x64.yml b/spec/acceptance/nodesets/centos-6-x64.yml deleted file mode 100644 index 2cf68ca..0000000 --- a/spec/acceptance/nodesets/centos-6-x64.yml +++ /dev/null @@ -1,11 +0,0 @@ -HOSTS: - master: - roles: - - default - - master - platform: el-6-x86_64 - box: puppetlabs/centos-6.6-64-nocm - hypervisor: vagrant -CONFIG: - type: foss - log_level: debug diff --git a/spec/acceptance/nodesets/centos-7-docker.yml b/spec/acceptance/nodesets/centos-7-docker.yml deleted file mode 100644 index 6eae300..0000000 --- a/spec/acceptance/nodesets/centos-7-docker.yml +++ /dev/null @@ -1,13 +0,0 @@ -HOSTS: - centos-7-x64: - roles: - - master - platform: el-7-x86_64 - hypervisor : docker - image: petems/docker-centos-7-ssh-locale:centos-7 - docker_preserve_image: true - docker_cmd: '["/sbin/init"]' - docker_preserve_image: true -CONFIG: - type: foss - log_level: debug diff --git a/spec/acceptance/nodesets/centos-7-x64.yml b/spec/acceptance/nodesets/centos-7-x64.yml deleted file mode 100644 index 6dff831..0000000 --- a/spec/acceptance/nodesets/centos-7-x64.yml +++ /dev/null @@ -1,11 +0,0 @@ -HOSTS: - master: - roles: - - default - - master - platform: el-6-x86_64 - box: puppetlabs/centos-7.2-64-nocm - hypervisor: vagrant -CONFIG: - type: foss - log_level: debug diff --git a/spec/acceptance/nodesets/debian-7-docker.yml b/spec/acceptance/nodesets/debian-7-docker.yml deleted file mode 100644 index 3735eab..0000000 --- a/spec/acceptance/nodesets/debian-7-docker.yml +++ /dev/null @@ -1,15 +0,0 @@ -HOSTS: - debian-7: - roles: - - master - platform: debian-7-amd64 - image: debian:7 - hypervisor: docker - docker_cmd: ["/sbin/init"] - docker_preserve_image: true - docker_image_commands: - - apt-get install -yq wget libssl-dev net-tools locales apt-transport-https software-properties-common - - echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && DEBIAN_FRONTEND=noninteractive locale-gen en_US.UTF-8 && DEBIAN_FRONTEND=noninteractive dpkg-reconfigure locales && DEBIAN_FRONTEND=noninteractive /usr/sbin/update-locale LANG=en_US.UTF-8 -CONFIG: - type: foss - log_level: debug diff --git a/spec/acceptance/nodesets/debian-8-docker.yml b/spec/acceptance/nodesets/debian-8-docker.yml deleted file mode 100644 index 9d2a2bc..0000000 --- a/spec/acceptance/nodesets/debian-8-docker.yml +++ /dev/null @@ -1,17 +0,0 @@ -HOSTS: - debian-8: - roles: - - master - platform: debian-8-amd64 - image: debian:8 - hypervisor: docker - docker_cmd: ["/bin/systemd"] - docker_preserve_image: true - docker_image_commands: - - apt-get install -yq wget net-tools locales apt-transport-https software-properties-common - - echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && DEBIAN_FRONTEND=noninteractive locale-gen en_US.UTF-8 && DEBIAN_FRONTEND=noninteractive dpkg-reconfigure locales && DEBIAN_FRONTEND=noninteractive /usr/sbin/update-locale LANG=en_US.UTF-8 - - rm /lib/systemd/system/systemd*udev* - - rm /lib/systemd/system/getty.target -CONFIG: - type: foss - log_level: debug diff --git a/spec/acceptance/nodesets/default.yml b/spec/acceptance/nodesets/default.yml deleted file mode 100644 index 780db9e..0000000 --- a/spec/acceptance/nodesets/default.yml +++ /dev/null @@ -1,11 +0,0 @@ -HOSTS: - centos-65-x64: - roles: - - master - platform: el-6-x86_64 - box : centos-65-x64-vbox436-nocm - box_url : http://puppet-vagrant-boxes.puppetlabs.com/centos-65-x64-virtualbox-nocm.box - hypervisor : vagrant - -CONFIG: - type: foss diff --git a/spec/acceptance/nodesets/multi-node-master-slave.yml b/spec/acceptance/nodesets/multi-node-master-slave.yml deleted file mode 100644 index 0307846..0000000 --- a/spec/acceptance/nodesets/multi-node-master-slave.yml +++ /dev/null @@ -1,26 +0,0 @@ -HOSTS: - master: - roles: - - default - - master - platform: el-7-x86_64 - box: puppetlabs/centos-7.2-64-nocm - ip: '10.255.33.129' - hypervisor: vagrant - slave1: - roles: - - slave - platform: el-7-x86_64 - box: puppetlabs/centos-7.2-64-nocm - ip: '10.255.33.130' - hypervisor: vagrant - slave2: - roles: - - slave - platform: el-7-x86_64 - box: puppetlabs/centos-7.2-64-nocm - ip: '10.255.33.131' - hypervisor: vagrant -CONFIG: - type: foss - log_level: debug diff --git a/spec/acceptance/nodesets/ubuntu-1404-docker.yml b/spec/acceptance/nodesets/ubuntu-1404-docker.yml deleted file mode 100644 index 61c6087..0000000 --- a/spec/acceptance/nodesets/ubuntu-1404-docker.yml +++ /dev/null @@ -1,16 +0,0 @@ -HOSTS: - ubuntu-14-04: - roles: - - master - platform: ubuntu-14.04-amd64 - image: electrical/ubuntu:14.04 - hypervisor: docker - docker_cmd: '["/sbin/init"]' - docker_image_commands: - - 'apt-get update' - - 'apt-get install -yq lsb-release wget net-tools curl' - docker_preserve_image: true -CONFIG: - type: foss - log_level: debug - :trace_limit: 100 # Get more than 10 lines of trace when something fails. diff --git a/spec/acceptance/nodesets/ubuntu-1404-x64.yml b/spec/acceptance/nodesets/ubuntu-1404-x64.yml deleted file mode 100644 index 8c7262d..0000000 --- a/spec/acceptance/nodesets/ubuntu-1404-x64.yml +++ /dev/null @@ -1,11 +0,0 @@ -HOSTS: - ubuntu-14-04: - roles: - - default - - master - platform: ubuntu-14.04-amd64 - box: puppetlabs/ubuntu-14.04-64-nocm - hypervisor: vagrant -CONFIG: - type: foss - log_level: debug diff --git a/spec/acceptance/nodesets/ubuntu-1604-docker.yml b/spec/acceptance/nodesets/ubuntu-1604-docker.yml deleted file mode 100644 index 7f29a22..0000000 --- a/spec/acceptance/nodesets/ubuntu-1604-docker.yml +++ /dev/null @@ -1,15 +0,0 @@ -HOSTS: - ubuntu-16-04: - roles: - - master - platform: ubuntu-16.04-amd64 - image: ubuntu:16.04 - hypervisor: docker - docker_cmd: '["/sbin/init"]' - docker_image_commands: - - 'apt-get install -y net-tools wget curl locales apt-transport-https software-properties-common' - - 'locale-gen en en_US en_US.UTF-8' - docker_preserve_image: true -CONFIG: - type: foss - log_level: debug diff --git a/spec/acceptance/nodesets/ubuntu-1604-x64.yml b/spec/acceptance/nodesets/ubuntu-1604-x64.yml deleted file mode 100644 index 4fc271a..0000000 --- a/spec/acceptance/nodesets/ubuntu-1604-x64.yml +++ /dev/null @@ -1,10 +0,0 @@ -HOSTS: - ubuntu-1604-x64: - roles: - - default - platform: ubuntu-1604-amd64 - box: puppetlabs/ubuntu-16.04-64-nocm - hypervisor: vagrant -CONFIG: - log_level: debug - type: foss diff --git a/spec/acceptance/redis_cli_task_spec.rb b/spec/acceptance/redis_cli_task_spec.rb index d9f2f8f..59019b9 100644 --- a/spec/acceptance/redis_cli_task_spec.rb +++ b/spec/acceptance/redis_cli_task_spec.rb @@ -1,37 +1,47 @@ -# run a test task require 'spec_helper_acceptance' describe 'redis-cli task' do - it 'install redis-cli with the class' do pp = <<-EOS - Exec { - path => [ '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', ] - } - - class { '::redis': - manage_repo => true, - } + include redis EOS - # Apply twice to ensure no errors the second time. - apply_manifest(pp, :catch_failures => true) + apply_manifest(pp, catch_failures: true) + apply_manifest(pp, catch_changes: true) end + subject do + on(master, "bolt task run --modulepath /etc/puppetlabs/code/modules --targets localhost #{task_name} #{params}", acceptable_exit_codes: [0, 1]).stdout + end + + let(:task_name) { 'redis::redis_cli' } + describe 'ping' do + let(:params) { 'command="ping"' } + it 'execute ping' do - result = run_task(task_name: 'redis::redis_cli', params: 'command="ping"') - expect_multiple_regexes(result: result, regexes: [%r{{"status":"PONG"}}, %r{Ran on 1 node in .+ seconds}]) + is_expected.to match(%r{{\s*"status":\s*"PONG"\s*}}) + is_expected.to match(%r{Ran on 1 target in .+ sec}) end end describe 'security' do - it 'stops script injections and escapes' do - result = run_task(task_name: 'redis::redis_cli', params: 'command="ping; cat /etc/passwd"') - expect_multiple_regexes(result: result, regexes: [%r{{"status":"ERR unknown command 'ping; cat /etc/passwd'"}}, %r{Ran on 1 node in .+ seconds}]) + describe 'command with semi colon' do + let(:params) { 'command="ping; cat /etc/passwd"' } + + it 'stops script injections and escapes' do + is_expected.to match(%r!{\s*"status":\s*"ERR unknown command ('|`)ping; cat /etc/passwd('|`)!) + is_expected.to match(%r{Ran on 1 target in .+ sec}) + end + end + + describe 'command with double ampersand' do + let(:params) { 'command="ping && cat /etc/passwd"' } - result = run_task(task_name: 'redis::redis_cli', params: 'command="ping && cat /etc/passwd"') - expect_multiple_regexes(result: result, regexes: [%r{{"status":"ERR unknown command 'ping && cat /etc/passwd'"}}, %r{Ran on 1 node in .+ seconds}]) + it 'stops script injections and escapes' do + is_expected.to match(%r!{\s*"status":\s*"ERR unknown command ('|`)ping && cat /etc/passwd('|`)!) + is_expected.to match(%r{Ran on 1 target in .+ sec}) + end end end end diff --git a/spec/acceptance/suites/default/redis_adminstration_spec.rb b/spec/acceptance/suites/default/redis_adminstration_spec.rb index e0db9aa..89b800b 100644 --- a/spec/acceptance/suites/default/redis_adminstration_spec.rb +++ b/spec/acceptance/suites/default/redis_adminstration_spec.rb @@ -1,38 +1,32 @@ require 'spec_helper_acceptance' # systcl settings are untestable in docker -unless default['hypervisor'] =~ /docker/ - describe 'redis::administration' do - it 'should run successfully' do - pp = <<-EOS - include redis - include redis::administration - EOS +describe 'redis::administration', unless: default['hypervisor'] =~ %r{docker} do + it 'runs successfully' do + pp = <<-EOS + include redis + include redis::administration + EOS - # Apply twice to ensure no errors the second time. - apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) - end - it 'should set overcommit_memory to 1 in a seperate sysctl file' do - shell('/bin/cat /proc/sys/vm/overcommit_memory') do |result| - expect(result.stdout).to match(/^1$/) - end - end - it 'should disable thp' do - shell('/bin/cat /sys/kernel/mm/transparent_hugepage/enabled') do |result| - expect(result.stdout).to match(/^always madvise \[never\]$/) - end - end - it 'should set somaxconn to 65535' do - shell('/bin/cat /proc/sys/net/core/somaxconn') do |result| - expect(result.stdout).to match(/^65535$/) - end - end - it 'should show no warnings about kernel settings in logs' do - shell('timeout 1s redis-server --port 7777 --loglevel verbose', { :acceptable_exit_codes => [0,124] }) do |result| - expect(result.stdout).not_to match(/WARNING/) - expect(result.exit_code).to match(124) - end - end + # Apply twice to ensure no errors the second time. + apply_manifest(pp, catch_failures: true) + apply_manifest(pp, catch_changes: true) + end + + describe file('/proc/sys/vm/overcommit_memory') do + its(:content) { is_expected.to eq("1\n") } + end + + describe file('/sys/kernel/mm/transparent_hugepage/enabled') do + its(:content) { is_expected.to eq("always madvise [never]\n") } + end + + describe file('/proc/sys/net/core/somaxconn') do + its(:content) { is_expected.to eq("65535\n") } + end + + describe command('timeout 1s redis-server --port 7777 --loglevel verbose') do + its(:stderr) { is_expected.not_to match(%r{WARNING}) } + its(:exit_status) { is_expected.to eq(124) } end end diff --git a/spec/acceptance/suites/default/redis_debian_run_dir_spec.rb b/spec/acceptance/suites/default/redis_debian_run_dir_spec.rb index aed5022..ab43635 100644 --- a/spec/acceptance/suites/default/redis_debian_run_dir_spec.rb +++ b/spec/acceptance/suites/default/redis_debian_run_dir_spec.rb @@ -1,55 +1,55 @@ require 'spec_helper_acceptance' # since this test polutes others, we'll only run it if specifically asked if ENV['RUN_BACKPORT_TEST'] == 'yes' - describe 'redis', :if => (fact('operatingsystem') == 'Debian') do - it 'should run with newer Debian package' do + describe 'redis', if: (fact('operatingsystem') == 'Debian') do + it 'runs with newer Debian package' do pp = <<-EOS include ::apt class {'::apt::backports':} -> file { '/usr/sbin/policy-rc.d': ensure => present, content => "/usr/bin/env sh\nexit 101", mode => '0755', } -> package { 'redis-server': ensure => 'latest', install_options => { '-t' => "${::lsbdistcodename}-backports", }, } -> class { 'redis': manage_package => false, } EOS - apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_change => true) + apply_manifest(pp, catch_failures: true) + apply_manifest(pp, catch_change: true) end describe package('redis-server') do - it { should be_installed } + it { is_expected.to be_installed } end describe service('redis-server') do - it { should be_running } + it { is_expected.to be_running } end context 'redis should respond to ping command' do describe command('redis-cli ping') do - its(:stdout) { should match /PONG/ } + its(:stdout) { is_expected.to match %r{PONG} } end end context 'redis log should be clean' do describe command('journalctl --no-pager') do - its(:stdout) { should_not match /Failed at step RUNTIME_DIRECTORY/ } + its(:stdout) { is_expected.not_to match %r{Failed at step RUNTIME_DIRECTORY} } end end end end diff --git a/spec/acceptance/suites/default/redis_multi_instances_one_host_spec.rb b/spec/acceptance/suites/default/redis_multi_instances_one_host_spec.rb index 5473d02..9d01b46 100644 --- a/spec/acceptance/suites/default/redis_multi_instances_one_host_spec.rb +++ b/spec/acceptance/suites/default/redis_multi_instances_one_host_spec.rb @@ -1,71 +1,77 @@ require 'spec_helper_acceptance' -# Cant get this to work on Debian, add exception for now -describe 'redis::instance', :unless => (fact('operatingsystem') == 'Debian') do +describe 'redis::instance example' do + instances = [6379, 6380, 6381, 6382] case fact('osfamily') when 'Debian' - config_path = '/etc/redis' - manage_repo = false + config_path = '/etc/redis' redis_name = 'redis-server' else + config_path = '/etc' redis_name = 'redis' - config_path = '/etc' - manage_repo = true end - it 'should run successfully' do + it 'runs successfully' do pp = <<-EOS - Exec { - path => [ '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', ] - } + $listening_ports = #{instances} class { '::redis': - manage_repo => #{manage_repo}, default_install => false, + service_enable => false, + service_ensure => 'stopped', } - redis::instance {'redis1': - port => '7777', - } - - redis::instance {'redis2': - port => '8888', + $listening_ports.each |$port| { + $port_string = sprintf('%d',$port) + redis::instance { $port_string: + service_enable => true, + service_ensure => 'running', + port => $port, + bind => $facts['networking']['ip'], + dbfilename => "${port}-dump.rdb", + appendfilename => "${port}-appendonly.aof", + appendfsync => 'always', + require => Class['Redis'], + } } EOS # Apply twice to ensure no errors the second time. - apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + apply_manifest(pp, catch_failures: true) + apply_manifest(pp, catch_changes: true) end describe package(redis_name) do - it { should be_installed } + it { is_expected.to be_installed } end - describe service('redis-server-redis1') do - it { should be_running } + describe service(redis_name) do + it { is_expected.not_to be_enabled } + it { is_expected.not_to be_running } end - describe service('redis-server-redis2') do - it { should be_running } - end + instances.each do |instance| + describe file("/etc/systemd/system/redis-server-#{instance}.service"), if: (fact('service_provider') == 'systemd') do + its(:content) { is_expected.to match %r{redis-server-#{instance}.conf} } + end - describe file("#{config_path}/redis-server-redis1.conf") do - its(:content) { should match /port 7777/ } - end + describe service("redis-server-#{instance}") do + it { is_expected.to be_enabled } + end - describe file("#{config_path}/redis-server-redis2.conf") do - its(:content) { should match /port 8888/ } - end + describe service("redis-server-#{instance}") do + it { is_expected.to be_running } + end - context 'redis should respond to ping command' do - describe command('redis-cli -h 127.0.0.1 -p 7777 ping') do - its(:stdout) { should match /PONG/ } + describe file("#{config_path}/redis-server-#{instance}.conf") do + its(:content) { is_expected.to match %r{port #{instance}} } end - describe command('redis-cli -h 127.0.0.1 -p 8888 ping') do - its(:stdout) { should match /PONG/ } + context "redis instance #{instance} should respond to ping command" do + describe command("redis-cli -h #{fact('networking.ip')} -p #{instance} ping") do + its(:stdout) { is_expected.to match %r{PONG} } + end end end end diff --git a/spec/acceptance/suites/default/redis_multi_node_spec.rb b/spec/acceptance/suites/default/redis_multi_node_spec.rb index fe5d01e..6a44c3d 100644 --- a/spec/acceptance/suites/default/redis_multi_node_spec.rb +++ b/spec/acceptance/suites/default/redis_multi_node_spec.rb @@ -1,78 +1,71 @@ require 'spec_helper_acceptance' if hosts.length >= 3 - describe "configuring master and slave redis hosts" do - + describe 'configuring master and slave redis hosts' do let(:master_ip_address) do # hosts_as('master').inject({}) do |memo,host| # memo[host] = fact_on host, "ipaddress_enp0s8" # end '10.255.33.129' # hardcoding as vagrant ip for now end hosts_as('master').each do |host| context "should be able to configure a host as master on #{host}" do - it 'should work idempotently with no errors' do + it 'works idempotently with no errors' do pp = <<-EOS # Stop firewall so we can easily connect service {'firewalld': ensure => 'stopped', } class { 'redis': - manage_repo => true, bind => '#{master_ip_address}', requirepass => 'foobared', } EOS - apply_manifest_on(host, pp, :catch_failures => true) + apply_manifest_on(host, pp, catch_failures: true) command_to_check = "redis-cli -h #{master_ip_address} -a foobared info replication" on host, command_to_check do - expect(stdout).to match(/^role:master/) + expect(stdout).to match(%r{^role:master}) end - end end end hosts_as('slave').each do |host| context "should be able to configure a host as master on #{host}" do - it 'should work idempotently with no errors' do + it 'works idempotently with no errors' do pp = <<-EOS class { 'redis': - manage_repo => true, bind => '127.0.0.1', masterauth => 'foobared', slaveof => '#{master_ip_address} 6379' } EOS - apply_manifest_on(host, pp, :catch_failures => true) + apply_manifest_on(host, pp, catch_failures: true) on host, 'redis-cli -h $(facter ipaddress_enp0s8) info replication' do - expect(stdout).to match(/^role:slave/) + expect(stdout).to match(%r{^role:slave}) end - end end end hosts_as('master').each do |host| context "should be able to configure a host as master on #{host}" do - it 'should work idempotently with no errors' do + it 'works idempotently with no errors' do command_to_check = "redis-cli -h #{master_ip_address} -a foobared info replication" sleep(5) on host, command_to_check do - expect(stdout).to match(/^connected_slaves:2/) + expect(stdout).to match(%r{^connected_slaves:2}) end - end end end - end end diff --git a/spec/acceptance/suites/default/redis_sentinel_one_node_spec.rb b/spec/acceptance/suites/default/redis_sentinel_one_node_spec.rb index 1e2351e..277242f 100644 --- a/spec/acceptance/suites/default/redis_sentinel_one_node_spec.rb +++ b/spec/acceptance/suites/default/redis_sentinel_one_node_spec.rb @@ -1,64 +1,54 @@ require 'spec_helper_acceptance' -# CentOS 6 Redis package is too old for Sentinel (2.4.10, needs 2.8+) -describe 'redis::sentinel', :unless => (fact('osfamily') == 'RedHat' && (fact('operatingsystemmajrelease') == '6')) do - case fact('osfamily') - when 'Debian' - redis_name = 'redis-server' - else - redis_name = 'redis' - end - - it 'should run successfully' do +describe 'redis::sentinel' do + redis_name = case fact('osfamily') + when 'Debian' + 'redis-server' + else + 'redis' + end + + it 'runs successfully' do pp = <<-EOS - - $master_name = 'mymaster' - $redis_master = '127.0.0.1' - $failover_timeout = '10000' - - class { 'redis': - manage_repo => true, - } - -> class { 'redis::sentinel': - master_name => $master_name, - redis_host => $redis_master, - failover_timeout => $failover_timeout, + master_name => 'mymaster', + redis_host => '127.0.0.1', + failover_timeout => 10000, } EOS - apply_manifest(pp, :catch_failures => true) + apply_manifest(pp, catch_failures: true) + apply_manifest(pp, catch_changes: true) end describe package(redis_name) do - it { should be_installed } + it { is_expected.to be_installed } end describe service(redis_name) do - it { should be_running } + it { is_expected.to be_running } end describe service('redis-sentinel') do - it { should be_running } + it { is_expected.to be_running } end case fact('osfamily') when 'Debian' describe package('redis-sentinel') do - it { should be_installed } + it { is_expected.to be_installed } end end context 'redis should respond to ping command' do describe command('redis-cli ping') do - its(:stdout) { should match /PONG/ } + its(:stdout) { is_expected.to match %r{PONG} } end end context 'redis-sentinel should return correct sentinel master' do describe command('redis-cli -p 26379 SENTINEL masters') do - its(:stdout) { should match /^mymaster/ } + its(:stdout) { is_expected.to match %r{^mymaster} } end end - end diff --git a/spec/acceptance/suites/default/redis_spec.rb b/spec/acceptance/suites/default/redis_spec.rb index ef8c78e..5026973 100644 --- a/spec/acceptance/suites/default/redis_spec.rb +++ b/spec/acceptance/suites/default/redis_spec.rb @@ -1,53 +1,45 @@ require 'spec_helper_acceptance' describe 'redis' do - case fact('osfamily') - when 'Debian' - redis_name = 'redis-server' - manage_repo = false - else - redis_name = 'redis' - manage_repo = true - end - - it 'should run successfully' do + redis_name = case fact('osfamily') + when 'Debian' + 'redis-server' + else + 'redis' + end + + it 'runs successfully' do pp = <<-EOS - Exec { - path => [ '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', ] - } - - class { '::redis': - manage_repo => #{manage_repo}, - } + include redis EOS # Apply twice to ensure no errors the second time. - apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + apply_manifest(pp, catch_failures: true) + apply_manifest(pp, catch_changes: true) end - it 'should return a fact' do + it 'returns a fact' do pp = <<-EOS notify{"Redis Version: ${::redis_server_version}":} EOS # Check output for fact string - apply_manifest(pp, :catch_failures => true) do |r| - expect(r.stdout).to match(/Redis Version: [\d+.]+/) + apply_manifest(pp, catch_failures: true) do |r| + expect(r.stdout).to match(%r{Redis Version: [\d+.]+}) end end describe package(redis_name) do - it { should be_installed } + it { is_expected.to be_installed } end describe service(redis_name) do - it { should be_running } + it { is_expected.to be_running } end context 'redis should respond to ping command' do describe command('redis-cli ping') do - its(:stdout) { should match /PONG/ } + its(:stdout) { is_expected.to match %r{PONG} } end end end diff --git a/spec/acceptance/suites/default/redisget_spec.rb b/spec/acceptance/suites/default/redisget_spec.rb index 429c341..5d9391c 100644 --- a/spec/acceptance/suites/default/redisget_spec.rb +++ b/spec/acceptance/suites/default/redisget_spec.rb @@ -1,118 +1,111 @@ require 'spec_helper_acceptance' -describe 'redisget() function' do - - it 'should run successfully' do +describe 'redis::get() function' do + it 'runs successfully' do pp = <<-EOS - Exec { - path => [ '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', ] - } - - class { '::redis': - manage_repo => true, - } + include redis package { 'redis-rubygem' : ensure => '3.3.3', name => 'redis', provider => 'puppet_gem', } - EOS # Apply twice to ensure no errors the second time. - apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + apply_manifest(pp, catch_failures: true) + apply_manifest(pp, catch_changes: true) + end - shell('redis-cli SET mykey "Hello"') do |result| - expect(result.stdout).to match('OK') - end + describe command('redis-cli SET mykey "Hello"') do + its(:stdout) { is_expected.to match(%r{OK}) } + end - shell('redis-cli GET mykey') do |result| - expect(result.stdout).to match('Hello') - end + describe command('redis-cli GET mykey') do + its(:stdout) { is_expected.to match('Hello') } end - it 'should return a value from MyKey with the redisget() function' do - pp = <<-EOS - $mykey = redisget('mykey', 'redis://127.0.0.1:6379') + context 'with mykey set to Hello' do + it 'returns a value from MyKey with the redis::get() function' do + pp = <<-EOS + $mykey = redis::get('mykey', 'redis://127.0.0.1:6379') - notify{"mykey value: ${mykey}":} - EOS + notify{"mykey value: ${mykey}":} + EOS - # Check output for function return value - apply_manifest(pp, :catch_failures => true) do |r| - expect(r.stdout).to match(/mykey value: Hello/) + # Check output for function return value + apply_manifest(pp, catch_failures: true) do |r| + expect(r.stdout).to match(%r{mykey value: Hello}) + end end - end - it 'should return a value from valid MyKey with the redisget() function while specifying a default' do - pp = <<-EOS - $mykey = redisget('mykey', 'redis://127.0.0.1:6379', 'default_value') + it 'returns a value from valid MyKey with the redis::get() function while specifying a default' do + pp = <<-EOS + $mykey = redis::get('mykey', 'redis://127.0.0.1:6379', 'default_value') - notify{"mykey value: ${mykey}":} - EOS + notify{"mykey value: ${mykey}":} + EOS - # Check output for function return value - apply_manifest(pp, :catch_failures => true) do |r| - expect(r.stdout).to match(/mykey value: Hello/) + # Check output for function return value + apply_manifest(pp, catch_failures: true) do |r| + expect(r.stdout).to match(%r{mykey value: Hello}) + end end - end - it 'should return an empty string when value not present with redisget() function' do - pp = <<-EOS - $foo_key = redisget('foo', 'redis://127.0.0.1:6379') + it 'returns an empty string when value not present with redis::get() function' do + pp = <<-EOS + $foo_key = redis::get('foo', 'redis://127.0.0.1:6379') - if empty($foo_key){ - notify{"foo_key value was empty string":} - } - EOS + if empty($foo_key){ + notify{"foo_key value was empty string":} + } + EOS - # Check output for function return value - apply_manifest(pp, :catch_failures => true) do |r| - expect(r.stdout).to match(/foo_key value was empty string/) + # Check output for function return value + apply_manifest(pp, catch_failures: true) do |r| + expect(r.stdout).to match(%r{foo_key value was empty string}) + end end - end - it 'should return the specified default value when key not present with redisget() function' do - pp = <<-EOS - $foo_key = redisget('foo', 'redis://127.0.0.1:6379', 'default_value') + it 'returns the specified default value when key not present with redis::get() function' do + pp = <<-EOS + $foo_key = redis::get('foo', 'redis://127.0.0.1:6379', 'default_value') - notify { $foo_key: } - EOS + notify { $foo_key: } + EOS - # Check output for function return value - apply_manifest(pp, :catch_failures => true) do |r| - expect(r.stdout).to match(/default_value/) + # Check output for function return value + apply_manifest(pp, catch_failures: true) do |r| + expect(r.stdout).to match(%r{default_value}) + end end - end - it 'should return the specified default value when connection to redis server fails' do - pp = <<-EOS - # Bogus port for redis server - $foo_key = redisget('foo', 'redis://127.0.0.1:12345', 'default_value') + it 'returns the specified default value when connection to redis server fails' do + pp = <<-EOS + # Bogus port for redis server + $foo_key = redis::get('foo', 'redis://127.0.0.1:12345', 'default_value') - notify { $foo_key: } - EOS + notify { $foo_key: } + EOS - # Check output for function return value - apply_manifest(pp, :catch_failures => true) do |r| - expect(r.stdout).to match(/default_value/) + # Check output for function return value + apply_manifest(pp, catch_failures: true) do |r| + expect(r.stdout).to match(%r{default_value}) + end end - end - it 'should return an error when specifying a non connectable redis server' do - pp = <<-EOS - # Bogus port for redis server - $foo_key = redisget('foo', 'redis://127.0.0.1:12345') + it 'returns an error when specifying a non connectable redis server' do + pp = <<-EOS + # Bogus port for redis server + $foo_key = redis::get('foo', 'redis://127.0.0.1:12345') - notify { $foo_key: } - EOS + notify { $foo_key: } + EOS - # Check output for error when can't connect to bogus redis - apply_manifest(pp, :acceptable_exit_codes => [1]) do |r| - expect(r.stderr).to match(/Error connecting to Redis on 127.0.0.1:12345 \(Errno::ECONNREFUSED\)/) + # Check output for error when can't connect to bogus redis + apply_manifest(pp, acceptable_exit_codes: [1]) do |r| + expect(r.stderr).to match(%r{Error connecting to Redis on 127.0.0.1:12345 \(Errno::ECONNREFUSED\)}) + end end end - end diff --git a/spec/acceptance/suites/scl/redis5_spec.rb b/spec/acceptance/suites/scl/redis5_spec.rb new file mode 100644 index 0000000..4a19041 --- /dev/null +++ b/spec/acceptance/suites/scl/redis5_spec.rb @@ -0,0 +1,41 @@ +require 'spec_helper_acceptance' + +describe 'redis', if: %w[centos redhat].include?(os[:family]) && os[:release].to_i == 7 do + before(:all) do + on hosts, puppet_resource('service', 'redis', 'ensure=stopped', 'enable=false') + end + + after(:all) do + on hosts, puppet_resource('service', 'rh-redis5-redis', 'ensure=stopped', 'enable=false') + end + + it 'runs successfully' do + pp = <<-PUPPET + class { 'redis::globals': + scl => 'rh-redis5', + } + class { 'redis': + manage_repo => true, + } + PUPPET + + # Apply twice to ensure no errors the second time. + apply_manifest(pp, catch_failures: true) + apply_manifest(pp, catch_changes: true) + end + + describe package('rh-redis5-redis') do + it { is_expected.to be_installed } + end + + describe service('rh-redis5-redis') do + it { is_expected.to be_running } + it { is_expected.to be_enabled } + end + + context 'redis should respond to ping command' do + describe command('scl enable rh-redis5 -- redis-cli ping') do + its(:stdout) { is_expected.to match %r{PONG} } + end + end +end diff --git a/spec/classes/redis_administration_spec.rb b/spec/classes/redis_administration_spec.rb index 96fdc14..9f486fd 100644 --- a/spec/classes/redis_administration_spec.rb +++ b/spec/classes/redis_administration_spec.rb @@ -1,37 +1,28 @@ require 'spec_helper' describe 'redis::administration' do context 'should set kernel and system values' do - it do is_expected.to contain_sysctl('vm.overcommit_memory').with( - { - 'ensure'=>'present', - 'value'=>'1' - } + 'ensure' => 'present', + 'value' => '1' ) end it do - is_expected.to contain_exec("Disable Hugepages").with( - { - "command" => "echo never > /sys/kernel/mm/transparent_hugepage/enabled", - "path" => ["/sbin", "/usr/sbin", "/bin", "/usr/bin"], - "onlyif" => "test -f /sys/kernel/mm/transparent_hugepage/enabled", - "unless" => "cat /sys/kernel/mm/transparent_hugepage/enabled | grep \"\\[never\\]\"", - } + is_expected.to contain_exec('Disable Hugepages').with( + 'command' => 'echo never > /sys/kernel/mm/transparent_hugepage/enabled', + 'path' => ['/sbin', '/usr/sbin', '/bin', '/usr/bin'], + 'onlyif' => 'test -f /sys/kernel/mm/transparent_hugepage/enabled', + 'unless' => 'cat /sys/kernel/mm/transparent_hugepage/enabled | grep "\\[never\\]"' ) end it do - is_expected.to contain_sysctl("net.core.somaxconn").with( - { - "ensure" => "present", - "value" => "65535", - } + is_expected.to contain_sysctl('net.core.somaxconn').with( + 'ensure' => 'present', + 'value' => '65535' ) end - end - end diff --git a/spec/classes/redis_centos_6_spec.rb b/spec/classes/redis_centos_6_spec.rb deleted file mode 100644 index b78ee6b..0000000 --- a/spec/classes/redis_centos_6_spec.rb +++ /dev/null @@ -1,79 +0,0 @@ -require 'spec_helper' - -describe 'redis' do - context 'on CentOS 6' do - - let(:facts) { - centos_6_facts - } - - context 'should set CentOS specific values' do - - context 'when $::redis_server_version fact is not present and package_ensure is a newer version(3.2.1) (older features enabled)' do - - let(:facts) { - centos_6_facts.merge({ - :redis_server_version => nil, - :puppetversion => Puppet.version, - }) - } - let (:params) { { :package_ensure => '3.2.1' } } - - it { should contain_file('/etc/redis.conf.puppet').without('content' => /^hash-max-zipmap-entries/) } - it { should contain_file('/etc/redis.conf.puppet').with('content' => /^hash-max-ziplist-entries/) } - it { should contain_file('/etc/redis.conf.puppet').with('content' => /^protected-mode/) } - it { should contain_file('/etc/redis.conf.puppet').with('content' => /^tcp-backlog/) } - end - - context 'when $::redis_server_version fact is not present and package_ensure is a newer version(4.0-rc3) (older features enabled)' do - - let(:facts) { - centos_6_facts.merge({ - :redis_server_version => nil, - :puppetversion => Puppet.version, - }) - } - let (:params) { { :package_ensure => '4.0-rc3' } } - - it { should contain_file('/etc/redis.conf.puppet').without('content' => /^hash-max-zipmap-entries/) } - it { should contain_file('/etc/redis.conf.puppet').with('content' => /^hash-max-ziplist-entries/) } - it { should contain_file('/etc/redis.conf.puppet').without('content' => /^protected-mode/) } - it { should contain_file('/etc/redis.conf.puppet').with('content' => /^tcp-backlog/) } - end - - context 'when $::redis_server_version fact is present but the older version (older features not enabled)' do - - let(:facts) { - centos_6_facts.merge({ - :redis_server_version => '2.4.10', - :puppetversion => Puppet.version, - }) - } - - it { should contain_file('/etc/redis.conf.puppet').with('content' => /^hash-max-zipmap-entries/) } - it { should contain_file('/etc/redis.conf.puppet').without('content' => /^hash-max-ziplist-entries/) } - it { should contain_file('/etc/redis.conf.puppet').without('content' => /^protected-mode/) } - it { should contain_file('/etc/redis.conf.puppet').without('content' => /^tcp-backlog/) } - - end - - context 'when $::redis_server_version fact is present but a newer version (older features enabled)' do - - let(:facts) { - centos_6_facts.merge({ - :redis_server_version => '3.2.1', - :puppetversion => Puppet.version, - }) - } - - it { should contain_file('/etc/redis.conf.puppet').without('content' => /^hash-max-zipmap-entries/) } - it { should contain_file('/etc/redis.conf.puppet').with('content' => /^hash-max-ziplist-entries/) } - it { should contain_file('/etc/redis.conf.puppet').with('content' => /^protected-mode/) } - it { should contain_file('/etc/redis.conf.puppet').with('content' => /^tcp-backlog/) } - - end - end - - end - -end diff --git a/spec/classes/redis_debian_wheezy_spec.rb b/spec/classes/redis_debian_wheezy_spec.rb deleted file mode 100644 index 5665b9f..0000000 --- a/spec/classes/redis_debian_wheezy_spec.rb +++ /dev/null @@ -1,19 +0,0 @@ -require 'spec_helper' - -describe 'redis' do - context 'on Debian Wheezy' do - - let(:facts) { - debian_wheezy_facts - } - - context 'should set Wheezy specific values' do - - context 'should set redis rundir correctly to Wheezy requirements' do - it { should contain_file('/var/run/redis').with('mode' => '2755') } - it { should contain_file('/var/run/redis').with('group' => 'redis') } - end - end - end - -end diff --git a/spec/classes/redis_freebsd_spec.rb b/spec/classes/redis_freebsd_spec.rb deleted file mode 100644 index d294e24..0000000 --- a/spec/classes/redis_freebsd_spec.rb +++ /dev/null @@ -1,15 +0,0 @@ -require 'spec_helper' - -describe 'redis' do - context 'on FreeBSD' do - let(:facts) { - freebsd_facts - } - - context 'should set FreeBSD specific values' do - it { should contain_file('/usr/local/etc/redis.conf.puppet').with('content' => /dir \/var\/db\/redis/) } - it { should contain_file('/usr/local/etc/redis.conf.puppet').with('content' => /pidfile \/var\/run\/redis\/redis.pid/) } - end - end - -end diff --git a/spec/classes/redis_sentinel_spec.rb b/spec/classes/redis_sentinel_spec.rb index 6d3108d..f906e4d 100644 --- a/spec/classes/redis_sentinel_spec.rb +++ b/spec/classes/redis_sentinel_spec.rb @@ -1,112 +1,115 @@ require 'spec_helper' -$expected_noparams_content = < :class do - let (:facts) { debian_facts } - - let :pre_condition do - [ - 'class { redis: }' - ] - end - - describe 'without parameters' do - - it { should create_class('redis::sentinel') } - - it { should contain_file('/etc/redis/redis-sentinel.conf.puppet').with( - 'ensure' => 'present', - 'mode' => '0644', - 'owner' => 'redis', - 'content' => $expected_noparams_content - ) - } - - it { should contain_service('redis-sentinel').with( - 'ensure' => 'running', - 'enable' => 'true', - 'hasrestart' => 'true', - 'hasstatus' => 'true', - ) - } - - end - - describe 'with custom parameters' do - let (:params) { - { - :auth_pass => 'password', - :sentinel_bind => '1.2.3.4', - :master_name => 'cow', - :down_after => 6000, - :log_file => '/tmp/barn-sentinel.log', - :failover_timeout => 28000, - :notification_script => 'bar.sh', - :client_reconfig_script => 'foo.sh' - } - } - - it { should create_class('redis::sentinel') } - - it { should contain_file('/etc/redis/redis-sentinel.conf.puppet').with( - 'content' => $expected_params_content - ) - } - end - - describe 'on Debian Jessie' do - - let (:facts) { debian_facts.merge({ - :operatingsystemmajrelease => '8', - }) } - - it { should create_class('redis::sentinel') } - - it { should_not contain_package('redis-sentinel').with_ensure('present') } +CONFIG + end + + it { is_expected.to compile.with_all_deps } + it { is_expected.to create_class('redis::sentinel') } + it { is_expected.to contain_file(config_file_orig).with_content(expected_content) } + end + end end - - describe 'on Debian Stretch' do - - let (:facts) { debian_facts.merge({ - :operatingsystemmajrelease => '9', - }) } - - it { should create_class('redis::sentinel') } - - it { should contain_package('redis-sentinel').with_ensure('present') } - end - end diff --git a/spec/classes/redis_spec.rb b/spec/classes/redis_spec.rb index 6424525..d6890d6 100644 --- a/spec/classes/redis_spec.rb +++ b/spec/classes/redis_spec.rb @@ -1,1150 +1,1421 @@ require 'spec_helper' -describe 'redis', :type => :class do +describe 'redis' do + let(:service_file) do + if facts['service_provider'] == 'systemd' + "/etc/systemd/system/#{service_name}.service" + else + "/etc/init.d/#{service_name}" + end + end + let(:package_name) { manifest_vars[:package_name] } + let(:service_name) { manifest_vars[:service_name] } + let(:config_file) { manifest_vars[:config_file] } + let(:config_file_orig) { manifest_vars[:config_file_orig] } on_supported_os.each do |os, facts| context "on #{os}" do - let(:facts) { - facts.merge({ - :redis_server_version => '3.2.3', - }) - } + let(:facts) { facts } - let(:package_name) { manifest_vars[:package_name] } - let(:service_name) { manifest_vars[:service_name] } - let(:config_file_orig) { manifest_vars[:config_file_orig] } + if facts[:operatingsystem] == 'Ubuntu' && facts[:operatingsystemmajrelease] == '16.04' + let(:systemd) { '' } + let(:servicetype) { 'forking' } + else + let(:systemd) { ' --supervised systemd' } + let(:servicetype) { 'notify' } + end describe 'without parameters' do + it { is_expected.to compile.with_all_deps } it { is_expected.to create_class('redis') } it { is_expected.to contain_class('redis::preinstall') } it { is_expected.to contain_class('redis::install') } it { is_expected.to contain_class('redis::config') } it { is_expected.to contain_class('redis::service') } it { is_expected.to contain_package(package_name).with_ensure('present') } - it { is_expected.to contain_file(config_file_orig).with_ensure('file') } - - it { is_expected.to contain_file(config_file_orig).without_content(/undef/) } + it do + is_expected.to contain_file(config_file_orig). + with_ensure('file'). + without_content(%r{undef}) + + if facts[:osfamily] == 'FreeBSD' + is_expected.to contain_file(config_file_orig). + with_content(%r{dir /var/db/redis}). + with_content(%r{pidfile /var/run/redis/redis\.pid}) + end + end it do is_expected.to contain_service(service_name).with( 'ensure' => 'running', 'enable' => 'true', 'hasrestart' => 'true', 'hasstatus' => 'true' ) end - case facts[:operatingsystem] - when 'Debian' + context 'with SCL', if: facts[:osfamily] == 'RedHat' && facts[:operatingsystemmajrelease] < '8' do + let(:pre_condition) do + <<-PUPPET + class { 'redis::globals': + scl => 'rh-redis5', + } + PUPPET + end - context 'on Debian' do + it { is_expected.to compile.with_all_deps } + it do + is_expected.to create_class('redis'). + with_package_name('rh-redis5-redis'). + with_config_file('/etc/opt/rh/rh-redis5/redis.conf'). + with_service_name('rh-redis5-redis') + end - it do - is_expected.to contain_file('/var/run/redis').with({ - :ensure => 'directory', - :owner => 'redis', - :group => 'root', - :mode => '2775', - }) - end + context 'manage_repo => true', if: facts[:operatingsystem] == 'CentOS' do + let(:params) { { manage_repo: true } } + it { is_expected.to compile.with_all_deps } + it { is_expected.to contain_package('centos-release-scl-rh') } end + end + end - when 'Ubuntu' + context 'with managed_by_cluster_manager true' do + let(:params) { { managed_by_cluster_manager: true } } + it { is_expected.to compile.with_all_deps } + it do + is_expected.to contain_file('/etc/security/limits.d/redis.conf').with( + 'ensure' => 'file', + 'owner' => 'root', + 'group' => 'root', + 'mode' => '0644', + 'content' => "redis soft nofile 65536\nredis hard nofile 65536\n" + ) + end + + context 'when not managing service' do + let(:params) { super().merge(service_manage: false, notify_service: false) } + + it { is_expected.to compile.with_all_deps } it do - is_expected.to contain_file('/var/run/redis').with({ - :ensure => 'directory', - :owner => 'redis', - :group => 'redis', - :mode => '0755', - }) + is_expected.to contain_file('/etc/security/limits.d/redis.conf').with( + 'ensure' => 'file', + 'owner' => 'root', + 'group' => 'root', + 'mode' => '0644', + 'content' => "redis soft nofile 65536\nredis hard nofile 65536\n" + ) end + end + end + + context 'with ulimit' do + let(:params) { { ulimit: 7777 } } + it { is_expected.to compile.with_all_deps } + it do + if facts['service_provider'] == 'systemd' + is_expected.to contain_file("/etc/systemd/system/#{service_name}.service.d/limit.conf"). + with_ensure('file'). + with_owner('root'). + with_group('root'). + with_mode('0444') + # Only necessary for Puppet < 6.1.0, + # See https://github.com/puppetlabs/puppet/commit/f8d5c60ddb130c6429ff12736bfdb4ae669a9fd4 + if Puppet.version < '6.1' + is_expected.to contain_augeas('Systemd redis ulimit'). + with_incl("/etc/systemd/system/#{service_name}.service.d/limit.conf"). + with_lens('Systemd.lns'). + with_changes(['defnode nofile Service/LimitNOFILE ""', 'set $nofile/value "7777"']). + that_notifies('Class[systemd::systemctl::daemon_reload]') + else + is_expected.to contain_augeas('Systemd redis ulimit'). + with_incl("/etc/systemd/system/#{service_name}.service.d/limit.conf"). + with_lens('Systemd.lns'). + with_changes(['defnode nofile Service/LimitNOFILE ""', 'set $nofile/value "7777"']) + end + else + is_expected.not_to contain_file('/etc/systemd/system/redis-server.service.d/limit.conf') + is_expected.not_to contain_augeas('Systemd redis ulimit') + if %w[Debian RedHat].include?(facts[:osfamily]) + ulimit_context = case facts[:osfamily] + when 'Debian' + '/files/etc/default/redis-server' + when 'RedHat' + '/files/etc/sysconfig/redis' + end + + if ulimit_context + is_expected.to contain_augeas('redis ulimit'). + with_changes('set ULIMIT 7777'). + with_context(ulimit_context) + else + is_expected.not_to contain_augeas('redis ulimit') + end + end + end end end describe 'with parameter activerehashing' do - let (:params) { + let(:params) do { - :activerehashing => true + activerehashing: true } - } + end - it { is_expected.to contain_file(config_file_orig).with_content(/activerehashing.*yes/) } + it { is_expected.to contain_file(config_file_orig).with_content(%r{activerehashing.*yes}) } end describe 'with parameter aof_load_truncated' do - let (:params) { + let(:params) do { - :aof_load_truncated => true + aof_load_truncated: true } - } + end - it { is_expected.to contain_file(config_file_orig).with_content(/aof-load-truncated.*yes/) } + it { is_expected.to contain_file(config_file_orig).with_content(%r{aof-load-truncated.*yes}) } end describe 'with parameter aof_rewrite_incremental_fsync' do - let (:params) { + let(:params) do { - :aof_rewrite_incremental_fsync => true + aof_rewrite_incremental_fsync: true } - } + end - it { is_expected.to contain_file(config_file_orig).with_content(/aof-rewrite-incremental-fsync.*yes/) } + it { is_expected.to contain_file(config_file_orig).with_content(%r{aof-rewrite-incremental-fsync.*yes}) } end describe 'with parameter appendfilename' do - let (:params) { + let(:params) do { - :appendfilename => '_VALUE_' + appendfilename: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with_content(/appendfilename.*_VALUE_/) } + it { is_expected.to contain_file(config_file_orig).with_content(%r{appendfilename.*_VALUE_}) } end describe 'with parameter appendfsync' do - let (:params) { + let(:params) do { - :appendfsync => '_VALUE_' + appendfsync: 'no' } - } + end - it { is_expected.to contain_file(config_file_orig).with_content(/appendfsync.*_VALUE_/) } + it { is_expected.to contain_file(config_file_orig).with_content(%r{^appendfsync no$}) } end describe 'with parameter appendonly' do - let (:params) { + let(:params) do { - :appendonly => true + appendonly: true } - } + end - it { is_expected.to contain_file(config_file_orig).with_content(/appendonly.*yes/) } + it { is_expected.to contain_file(config_file_orig).with_content(%r{appendonly.*yes}) } end describe 'with parameter auto_aof_rewrite_min_size' do - let (:params) { + let(:params) do { - :auto_aof_rewrite_min_size => '_VALUE_' + auto_aof_rewrite_min_size: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with_content(/auto-aof-rewrite-min-size.*_VALUE_/) } + it { is_expected.to contain_file(config_file_orig).with_content(%r{auto-aof-rewrite-min-size.*_VALUE_}) } end describe 'with parameter auto_aof_rewrite_percentage' do - let (:params) { + let(:params) do { - :auto_aof_rewrite_percentage => '_VALUE_' + auto_aof_rewrite_percentage: 75 } - } + end - it { is_expected.to contain_file(config_file_orig).with_content(/auto-aof-rewrite-percentage.*_VALUE_/) } + it { is_expected.to contain_file(config_file_orig).with_content(%r{auto-aof-rewrite-percentage 75}) } end - describe 'with parameter bind' do - let (:params) { - { - :bind => '_VALUE_' - } - } + describe 'parameter bind' do + context 'by default' do + it 'binds to localhost' do + is_expected.to contain_file(config_file_orig).with_content(%r{bind 127\.0\.0\.1$}) + end + end + context 'with a single IP address' do + let(:params) { { bind: '10.0.0.1' } } + + it { is_expected.to contain_file(config_file_orig).with_content(%r{bind 10\.0\.0\.1$}) } + end + context 'with array of IP addresses' do + let(:params) do + { + bind: ['127.0.0.1', '::1'] + } + end + + it { is_expected.to contain_file(config_file_orig).with_content(%r{bind 127\.0\.0\.1 ::1}) } + end + context 'with empty array' do + let(:params) { { bind: [] } } - it { is_expected.to contain_file(config_file_orig).with_content(/bind.*_VALUE_/) } + it { is_expected.not_to contain_file(config_file_orig).with_content(%r{^bind}) } + end end describe 'with parameter output_buffer_limit_slave' do - let (:params) { + let(:params) do { - :output_buffer_limit_slave => '_VALUE_' + output_buffer_limit_slave: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with_content(/client-output-buffer-limit slave.*_VALUE_/) } + it { is_expected.to contain_file(config_file_orig).with_content(%r{client-output-buffer-limit slave.*_VALUE_}) } end describe 'with parameter output_buffer_limit_pubsub' do - let (:params) { + let(:params) do { - :output_buffer_limit_pubsub => '_VALUE_' + output_buffer_limit_pubsub: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with_content(/client-output-buffer-limit pubsub.*_VALUE_/) } + it { is_expected.to contain_file(config_file_orig).with_content(%r{client-output-buffer-limit pubsub.*_VALUE_}) } end describe 'with parameter: config_dir' do - let (:params) { { :config_dir => '_VALUE_' } } + let(:params) { { config_dir: '/etc/config_dir' } } - it { is_expected.to contain_file('_VALUE_').with_ensure('directory') } + it { is_expected.to contain_file('/etc/config_dir').with_ensure('directory') } end describe 'with parameter: config_dir_mode' do - let (:params) { { :config_dir_mode => '_VALUE_' } } + let(:params) { { config_dir_mode: '0700' } } - it { is_expected.to contain_file('/etc/redis').with_mode('_VALUE_') } + it { is_expected.to contain_file('/etc/redis').with_mode('0700') } end describe 'with parameter: log_dir_mode' do - let (:params) { { :log_dir_mode => '_VALUE_' } } + let(:params) { { log_dir_mode: '0660' } } - it { is_expected.to contain_file('/var/log/redis').with_mode('_VALUE_') } + it { is_expected.to contain_file('/var/log/redis').with_mode('0660') } end describe 'with parameter: config_file_orig' do - let (:params) { { :config_file_orig => '_VALUE_' } } + let(:params) { { config_file_orig: '/path/to/orig' } } - it { is_expected.to contain_file('_VALUE_') } + it { is_expected.to contain_file('/path/to/orig') } end describe 'with parameter: config_file_mode' do - let (:params) { { :config_file_mode => '_VALUE_' } } + let(:params) { { config_file_mode: '0600' } } - it { is_expected.to contain_file(config_file_orig).with_mode('_VALUE_') } + it { is_expected.to contain_file(config_file_orig).with_mode('0600') } end describe 'with parameter: config_group' do - let (:params) { { :config_group => '_VALUE_' } } + let(:params) { { config_group: '_VALUE_' } } it { is_expected.to contain_file('/etc/redis').with_group('_VALUE_') } end describe 'with parameter: config_owner' do - let (:params) { { :config_owner => '_VALUE_' } } + let(:params) { { config_owner: '_VALUE_' } } it { is_expected.to contain_file('/etc/redis').with_owner('_VALUE_') } end describe 'with parameter daemonize' do - let (:params) { + let(:params) do { - :daemonize => true + daemonize: true } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /daemonize.*yes/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{daemonize.*yes} ) } end describe 'with parameter databases' do - let (:params) { + let(:params) do { - :databases => '_VALUE_' + databases: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /databases.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{databases 42} ) } end describe 'with parameter dbfilename' do - let (:params) { + let(:params) do { - :dbfilename => '_VALUE_' + dbfilename: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /dbfilename.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{dbfilename.*_VALUE_} ) } end describe 'without parameter dbfilename' do - let(:params) { + let(:params) do { - :dbfilename => false, + dbfilename: false } - } + end - it { is_expected.to contain_file(config_file_orig).without_content(/^dbfilename/) } - end + it { is_expected.to contain_file(config_file_orig).without_content(%r{^dbfilename}) } + end describe 'with parameter hash_max_ziplist_entries' do - let (:params) { + let(:params) do { - :hash_max_ziplist_entries => '_VALUE_' + hash_max_ziplist_entries: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /hash-max-ziplist-entries.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{hash-max-ziplist-entries 42} ) } end describe 'with parameter hash_max_ziplist_value' do - let (:params) { + let(:params) do { - :hash_max_ziplist_value => '_VALUE_' + hash_max_ziplist_value: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /hash-max-ziplist-value.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{hash-max-ziplist-value 42} ) } end + # TODO: Only present in 3.0 describe 'with parameter list_max_ziplist_entries' do - let (:params) { + let(:params) do { - :list_max_ziplist_entries => '_VALUE_' + list_max_ziplist_entries: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /list-max-ziplist-entries.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{list-max-ziplist-entries 42} ) } end describe 'with parameter list_max_ziplist_value' do - let (:params) { + let(:params) do { - :list_max_ziplist_value => '_VALUE_' + list_max_ziplist_value: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /list-max-ziplist-value.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{list-max-ziplist-value 42} ) } end describe 'with parameter log_dir' do - let (:params) { + let(:params) do { - :log_dir => '_VALUE_' + log_dir: '/var/log/redis' } - } + end - it { is_expected.to contain_file('_VALUE_').with( + it { + is_expected.to contain_file('/var/log/redis').with( 'ensure' => 'directory' ) } end describe 'with parameter log_file' do - let (:params) { + let(:params) do { - :log_file => '_VALUE_' + log_file: '/var/log/redis/redis.log' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /logfile.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^logfile /var/log/redis/redis\.log$} ) } end describe 'with parameter log_level' do - let (:params) { + let(:params) do { - :log_level => '_VALUE_' + log_level: 'debug' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /loglevel.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^loglevel debug$} ) } end describe 'with parameter: manage_repo' do - let (:params) { { :manage_repo => true } } + let(:params) { { manage_repo: true } } case facts[:operatingsystem] - when 'Debian' - context 'on Debian' do - it do - is_expected.to create_apt__source('dotdeb').with({ - :location => 'http://packages.dotdeb.org/', - :release => facts[:lsbdistcodename], - :repos => 'all', - :key => { - "id"=>"6572BBEF1B5FF28B28B706837E3F070089DF5277", - "source"=>"http://www.dotdeb.org/dotdeb.gpg" - }, - :include => { 'src' => true }, - }) + is_expected.to create_apt__source('dotdeb').with(location: 'http://packages.dotdeb.org/', + repos: 'all', + key: { + 'id' => '6572BBEF1B5FF28B28B706837E3F070089DF5277', + 'source' => 'http://www.dotdeb.org/dotdeb.gpg' + }, + include: { 'src' => true }) end - end - when 'Ubuntu' - - let(:ppa_repo) { manifest_vars[:ppa_repo] } - - it { is_expected.to contain_apt__ppa(ppa_repo) } - + it { is_expected.to contain_apt__ppa('ppa:chris-lea/redis-server') } when 'RedHat', 'CentOS', 'Scientific', 'OEL', 'Amazon' - it { is_expected.to contain_class('epel') } - end end describe 'with parameter unixsocket' do - let (:params) { - { - :unixsocket => '/tmp/redis.sock' - } - } + describe '/tmp/redis.sock' do + let(:params) { { unixsocket: '/tmp/redis.sock' } } - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /unixsocket.*\/tmp\/redis.sock/ - ) - } + it { is_expected.to contain_file(config_file_orig).with_content(%r{^unixsocket /tmp/redis\.sock$}) } + end + + describe 'empty string' do + let(:params) { { unixsocket: '' } } + + it { is_expected.to contain_file(config_file_orig).without_content(%r{^unixsocket }) } + end end describe 'with parameter unixsocketperm' do - let (:params) { - { - :unixsocketperm => '777' - } - } + describe '777' do + let(:params) { { unixsocketperm: '777' } } - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /unixsocketperm.*777/ - ) - } + it { is_expected.to contain_file(config_file_orig).with_content(%r{^unixsocketperm 777$}) } + end + + describe 'empty string' do + let(:params) { { unixsocketperm: '' } } + + it { is_expected.to contain_file(config_file_orig).without_content(%r{^unixsocketperm }) } + end end describe 'with parameter masterauth' do - let (:params) { + let(:params) do { - :masterauth => '_VALUE_' + masterauth: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /masterauth.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{masterauth.*_VALUE_} ) } end describe 'with parameter maxclients' do - let (:params) { + let(:params) do { - :maxclients => '_VALUE_' + maxclients: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /maxclients.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^maxclients 42$} ) } end describe 'with parameter maxmemory' do - let (:params) { + let(:params) do { - :maxmemory => '_VALUE_' + maxmemory: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /maxmemory.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{maxmemory.*_VALUE_} ) } end describe 'with parameter maxmemory_policy' do - let (:params) { + let(:params) do { - :maxmemory_policy => '_VALUE_' + maxmemory_policy: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /maxmemory-policy.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{maxmemory-policy.*_VALUE_} ) } end describe 'with parameter maxmemory_samples' do - let (:params) { + let(:params) do { - :maxmemory_samples => '_VALUE_' + maxmemory_samples: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /maxmemory-samples.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{maxmemory-samples.*_VALUE_} ) } end describe 'with parameter min_slaves_max_lag' do - let (:params) { + let(:params) do { - :min_slaves_max_lag => '_VALUE_' + min_slaves_max_lag: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /min-slaves-max-lag.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^min-slaves-max-lag 42$} ) } end describe 'with parameter min_slaves_to_write' do - let (:params) { + let(:params) do { - :min_slaves_to_write => '_VALUE_' + min_slaves_to_write: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /min-slaves-to-write.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^min-slaves-to-write 42$} ) } end describe 'with parameter notify_keyspace_events' do - let (:params) { + let(:params) do { - :notify_keyspace_events => '_VALUE_' + notify_keyspace_events: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /notify-keyspace-events.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{notify-keyspace-events.*_VALUE_} ) } end describe 'with parameter notify_service' do - let (:params) { + let(:params) do { - :notify_service => true + notify_service: true } - } - - let(:service_name) { manifest_vars[:service_name] } + end it { is_expected.to contain_file(config_file_orig).that_notifies("Service[#{service_name}]") } end describe 'with parameter no_appendfsync_on_rewrite' do - let (:params) { + let(:params) do { - :no_appendfsync_on_rewrite => true + no_appendfsync_on_rewrite: true } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /no-appendfsync-on-rewrite.*yes/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{no-appendfsync-on-rewrite.*yes} ) } end describe 'with parameter: package_ensure' do - let (:params) { { :package_ensure => '_VALUE_' } } - let(:package_name) { manifest_vars[:package_name] } + let(:params) { { package_ensure: '_VALUE_' } } - it { is_expected.to contain_package(package_name).with( + it { + is_expected.to contain_package(package_name).with( 'ensure' => '_VALUE_' ) } end describe 'with parameter: package_name' do - let (:params) { { :package_name => '_VALUE_' } } + let(:params) { { package_name: '_VALUE_' } } it { is_expected.to contain_package('_VALUE_') } end describe 'with parameter pid_file' do - let (:params) { + let(:params) do { - :pid_file => '_VALUE_' + pid_file: '/path/to/redis.pid' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /pidfile.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^pidfile /path/to/redis.pid$} ) } end describe 'with parameter port' do - let (:params) { + let(:params) do { - :port => '_VALUE_' + port: 6666 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /port.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^port 6666$} ) } end describe 'with parameter protected-mode' do - let (:params) { - { - :protected_mode => '_VALUE_' - } - } + let(:params) do + { + protected_mode: false + } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /protected-mode.*_VALUE_/ - ) - } - end + it do + if facts[:operatingsystem] == 'Ubuntu' && facts[:operatingsystemmajrelease] == '16.04' + is_expected.not_to contain_file(config_file_orig).with_content(%r{protected-mode}) + else + is_expected.to contain_file(config_file_orig).with_content(%r{^protected-mode no$}) + end + end + end describe 'with parameter hll_sparse_max_bytes' do - let (:params) { + let(:params) do { - :hll_sparse_max_bytes=> '_VALUE_' + hll_sparse_max_bytes: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /hll-sparse-max-bytes.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^hll-sparse-max-bytes 42$} ) } end describe 'with parameter hz' do - let (:params) { + let(:params) do { - :hz=> '_VALUE_' + hz: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /hz.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^hz 42$} ) } end describe 'with parameter latency_monitor_threshold' do - let (:params) { + let(:params) do { - :latency_monitor_threshold=> '_VALUE_' + latency_monitor_threshold: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /latency-monitor-threshold.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^latency-monitor-threshold 42$} ) } end describe 'with parameter rdbcompression' do - let (:params) { + let(:params) do { - :rdbcompression => true + rdbcompression: true } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /rdbcompression.*yes/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{rdbcompression.*yes} ) } end describe 'with parameter repl_backlog_size' do - let (:params) { + let(:params) do { - :repl_backlog_size => '_VALUE_' + repl_backlog_size: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /repl-backlog-size.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{repl-backlog-size.*_VALUE_} ) } end describe 'with parameter repl_backlog_ttl' do - let (:params) { + let(:params) do { - :repl_backlog_ttl => '_VALUE_' + repl_backlog_ttl: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /repl-backlog-ttl.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^repl-backlog-ttl 42$} ) } end describe 'with parameter repl_disable_tcp_nodelay' do - let (:params) { + let(:params) do { - :repl_disable_tcp_nodelay => true + repl_disable_tcp_nodelay: true } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /repl-disable-tcp-nodelay.*yes/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{repl-disable-tcp-nodelay.*yes} ) } end describe 'with parameter repl_ping_slave_period' do - let (:params) { + let(:params) do { - :repl_ping_slave_period => 1 + repl_ping_slave_period: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /repl-ping-slave-period.*1/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^repl-ping-slave-period 42} ) } end describe 'with parameter repl_timeout' do - let (:params) { + let(:params) do { - :repl_timeout => 1 + repl_timeout: 1 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /repl-timeout.*1/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{repl-timeout.*1} ) } end describe 'with parameter requirepass' do - let (:params) { + let(:params) do { - :requirepass => '_VALUE_' + requirepass: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /requirepass.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{requirepass.*_VALUE_} ) } end describe 'with parameter save_db_to_disk' do context 'true' do - let (:params) { + let(:params) do { - :save_db_to_disk => true + save_db_to_disk: true } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /^save/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^save} ) } end context 'false' do - let (:params) { + let(:params) do { - :save_db_to_disk => false + save_db_to_disk: false } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /^(?!save)/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^(?!save)} ) } end end describe 'with parameter save_db_to_disk_interval' do context 'with save_db_to_disk true' do - context 'default' do - let (:params) { + let(:params) do { - :save_db_to_disk => true + save_db_to_disk: true } - } + end - it { is_expected.to contain_file(config_file_orig).with('content' => /save 900 1/)} - it { is_expected.to contain_file(config_file_orig).with('content' => /save 300 10/)} - it { is_expected.to contain_file(config_file_orig).with('content' => /save 60 10000/) + it { is_expected.to contain_file(config_file_orig).with('content' => %r{save 900 1}) } + it { is_expected.to contain_file(config_file_orig).with('content' => %r{save 300 10}) } + it { + is_expected.to contain_file(config_file_orig).with('content' => %r{save 60 10000}) } end context 'default' do - let (:params) { + let(:params) do { - :save_db_to_disk => true, - :save_db_to_disk_interval => {'900' =>'2', '300' => '11', '60' => '10011'} + save_db_to_disk: true, + save_db_to_disk_interval: { '900' => '2', '300' => '11', '60' => '10011' } } - } + end - it { is_expected.to contain_file(config_file_orig).with('content' => /save 900 2/)} - it { is_expected.to contain_file(config_file_orig).with('content' => /save 300 11/)} - it { is_expected.to contain_file(config_file_orig).with('content' => /save 60 10011/) + it { is_expected.to contain_file(config_file_orig).with('content' => %r{save 900 2}) } + it { is_expected.to contain_file(config_file_orig).with('content' => %r{save 300 11}) } + it { + is_expected.to contain_file(config_file_orig).with('content' => %r{save 60 10011}) } end - end context 'with save_db_to_disk false' do context 'default' do - let (:params) { + let(:params) do { - :save_db_to_disk => false + save_db_to_disk: false } - } + end - it { is_expected.to contain_file(config_file_orig).without('content' => /save 900 1/) } - it { is_expected.to contain_file(config_file_orig).without('content' => /save 300 10/) } - it { is_expected.to contain_file(config_file_orig).without('content' => /save 60 10000/) } + it { is_expected.to contain_file(config_file_orig).without('content' => %r{save 900 1}) } + it { is_expected.to contain_file(config_file_orig).without('content' => %r{save 300 10}) } + it { is_expected.to contain_file(config_file_orig).without('content' => %r{save 60 10000}) } end end end describe 'with parameter: service_manage (set to false)' do - let (:params) { { :service_manage => false } } - let(:package_name) { manifest_vars[:package_name] } + let(:params) { { service_manage: false } } - it { should_not contain_service(package_name) } + it { is_expected.not_to contain_service(package_name) } end describe 'with parameter: service_enable' do - let (:params) { { :service_enable => true } } - let(:package_name) { manifest_vars[:package_name] } + let(:params) { { service_enable: true } } it { is_expected.to contain_service(package_name).with_enable(true) } end describe 'with parameter: service_ensure' do - let (:params) { { :service_ensure => '_VALUE_' } } - let(:package_name) { manifest_vars[:package_name] } + let(:params) { { service_ensure: 'stopped' } } - it { is_expected.to contain_service(package_name).with_ensure('_VALUE_') } + it { is_expected.to contain_service(package_name).with_ensure('stopped') } end describe 'with parameter: service_group' do - let (:params) { { :service_group => '_VALUE_' } } + let(:params) { { service_group: '_VALUE_' } } it { is_expected.to contain_file('/var/log/redis').with_group('_VALUE_') } end describe 'with parameter: service_hasrestart' do - let (:params) { { :service_hasrestart => true } } - let(:package_name) { manifest_vars[:package_name] } + let(:params) { { service_hasrestart: true } } it { is_expected.to contain_service(package_name).with_hasrestart(true) } end describe 'with parameter: service_hasstatus' do - let (:params) { { :service_hasstatus => true } } - let(:package_name) { manifest_vars[:package_name] } + let(:params) { { service_hasstatus: true } } it { is_expected.to contain_service(package_name).with_hasstatus(true) } end describe 'with parameter: service_name' do - let (:params) { { :service_name => '_VALUE_' } } + let(:params) { { service_name: '_VALUE_' } } it { is_expected.to contain_service('_VALUE_').with_name('_VALUE_') } end describe 'with parameter: service_user' do - let (:params) { { :service_user => '_VALUE_' } } + let(:params) { { service_user: '_VALUE_' } } it { is_expected.to contain_file('/var/log/redis').with_owner('_VALUE_') } end describe 'with parameter set_max_intset_entries' do - let (:params) { + let(:params) do { - :set_max_intset_entries => '_VALUE_' + set_max_intset_entries: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /set-max-intset-entries.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^set-max-intset-entries 42$} ) } end describe 'with parameter slave_priority' do - let (:params) { + let(:params) do { - :slave_priority => '_VALUE_' + slave_priority: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /slave-priority.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^slave-priority 42$} ) } end describe 'with parameter slave_read_only' do - let (:params) { + let(:params) do { - :slave_read_only => true + slave_read_only: true } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /slave-read-only.*yes/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{slave-read-only.*yes} ) } end describe 'with parameter slave_serve_stale_data' do - let (:params) { + let(:params) do { - :slave_serve_stale_data => true + slave_serve_stale_data: true } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /slave-serve-stale-data.*yes/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{slave-serve-stale-data.*yes} ) } end describe 'with parameter: slaveof' do context 'binding to localhost' do - let (:params) { + let(:params) do { - :bind => '127.0.0.1', - :slaveof => '_VALUE_' + bind: '127.0.0.1', + slaveof: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /^slaveof _VALUE_/ - )} + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^slaveof _VALUE_} + ) + } end context 'binding to external ip' do - let (:params) { + let(:params) do { - :bind => '10.0.0.1', - :slaveof => '_VALUE_' + bind: '10.0.0.1', + slaveof: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /^slaveof _VALUE_/ - ) - } + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^slaveof _VALUE_} + ) + } end end describe 'with parameter slowlog_log_slower_than' do - let (:params) { + let(:params) do { - :slowlog_log_slower_than => '_VALUE_' + slowlog_log_slower_than: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /slowlog-log-slower-than.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^slowlog-log-slower-than 42$} ) } end describe 'with parameter slowlog_max_len' do - let (:params) { + let(:params) do { - :slowlog_max_len => '_VALUE_' + slowlog_max_len: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /slowlog-max-len.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^slowlog-max-len 42$} ) } end describe 'with parameter stop_writes_on_bgsave_error' do - let (:params) { + let(:params) do { - :stop_writes_on_bgsave_error => true + stop_writes_on_bgsave_error: true } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /stop-writes-on-bgsave-error.*yes/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{stop-writes-on-bgsave-error.*yes} ) } end describe 'with parameter syslog_enabled' do - let (:params) { + let(:params) do { - :syslog_enabled => true + syslog_enabled: true } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /syslog-enabled yes/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{syslog-enabled yes} ) } end describe 'with parameter syslog_facility' do - let (:params) { + let(:params) do { - :syslog_enabled => true, - :syslog_facility => '_VALUE_' + syslog_enabled: true, + syslog_facility: '_VALUE_' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /syslog-facility.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{syslog-facility.*_VALUE_} ) } end describe 'with parameter tcp_backlog' do - let (:params) { + let(:params) do { - :tcp_backlog=> '_VALUE_' + tcp_backlog: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /tcp-backlog.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^tcp-backlog 42$} ) } end describe 'with parameter tcp_keepalive' do - let (:params) { + let(:params) do { - :tcp_keepalive => '_VALUE_' + tcp_keepalive: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /tcp-keepalive.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^tcp-keepalive 42$} ) } end describe 'with parameter timeout' do - let (:params) { + let(:params) do { - :timeout => '_VALUE_' + timeout: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /timeout.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^timeout 42$} ) } end describe 'with parameter workdir' do - let (:params) { + let(:params) do { - :workdir => '_VALUE_' + workdir: '/var/workdir' } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /dir.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{^dir /var/workdir$} ) } end describe 'with parameter zset_max_ziplist_entries' do - let (:params) { + let(:params) do { - :zset_max_ziplist_entries => '_VALUE_' + zset_max_ziplist_entries: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /zset-max-ziplist-entries.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{zset-max-ziplist-entries 42} ) } end describe 'with parameter zset_max_ziplist_value' do - let (:params) { + let(:params) do { - :zset_max_ziplist_value => '_VALUE_' + zset_max_ziplist_value: 42 } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /zset-max-ziplist-value.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{zset-max-ziplist-value 42} ) } end describe 'with parameter cluster_enabled-false' do - let (:params) { + let(:params) do { - :cluster_enabled => false + cluster_enabled: false } - } + end - it { should_not contain_file(config_file_orig).with( - 'content' => /cluster-enabled/ + it { + is_expected.not_to contain_file(config_file_orig).with( + 'content' => %r{cluster-enabled} ) } end describe 'with parameter cluster_enabled-true' do - let (:params) { + let(:params) do { - :cluster_enabled => true + cluster_enabled: true } - } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /cluster-enabled.*yes/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{cluster-enabled.*yes} ) } end describe 'with parameter cluster_config_file' do - let (:params) { + let(:params) do { - :cluster_enabled => true, - :cluster_config_file => '_VALUE_' + cluster_enabled: true, + cluster_config_file: '_VALUE_' } + end + + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{cluster-config-file.*_VALUE_} + ) } + end + + describe 'with parameter cluster_config_file' do + let(:params) do + { + cluster_enabled: true, + cluster_node_timeout: 42 + } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /cluster-config-file.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{cluster-node-timeout 42} ) } end describe 'with parameter cluster_config_file' do - let (:params) { + let(:params) do { - :cluster_enabled => true, - :cluster_node_timeout => '_VALUE_' + cluster_enabled: true, + cluster_slave_validity_factor: 1 } + end + + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{cluster-slave-validity-factor.*1} + ) } + end + + describe 'with parameter cluster_config_file' do + let(:params) do + { + cluster_enabled: true, + cluster_require_full_coverage: true + } + end - it { is_expected.to contain_file(config_file_orig).with( - 'content' => /cluster-node-timeout.*_VALUE_/ + it { + is_expected.to contain_file(config_file_orig).with( + 'content' => %r{cluster-require-full-coverage.*yes} ) } end + describe 'with parameter cluster_config_file' do + let(:params) do + { + cluster_enabled: true, + cluster_require_full_coverage: false + } + end + + it { is_expected.to contain_file(config_file_orig).with_content(%r{cluster-require-full-coverage.*no}) } + end + describe 'with parameter cluster_config_file' do + let(:params) do + { + cluster_enabled: true, + cluster_migration_barrier: 1 + } + end + + it { is_expected.to contain_file(config_file_orig).with_content(%r{cluster-migration-barrier.*1}) } + end + + describe 'with parameter manage_service_file' do + let(:params) do + { + manage_service_file: true + } + end + + it { is_expected.to contain_file(service_file) } + + it do + content = <<-END.gsub(%r{^\s+\|}, '') + |[Unit] + |Description=Redis Advanced key-value store for instance default + |After=network.target + |After=network-online.target + |Wants=network-online.target + | + |[Service] + |RuntimeDirectory=redis + |RuntimeDirectoryMode=2755 + |Type=#{servicetype} + |ExecStart=/usr/bin/redis-server #{config_file}#{systemd} + |ExecStop=/usr/bin/redis-cli -p 6379 shutdown + |Restart=always + |User=redis + |Group=redis + |LimitNOFILE=65536 + | + |[Install] + |WantedBy=multi-user.target + END + + if facts['service_provider'] == 'systemd' + is_expected.to contain_file(service_file).with_content(content) + else + is_expected.to contain_file(service_file).with_content(%r{Required-Start:}) + end + end + end + + describe 'with parameter manage_service_file' do + let(:params) do + { + manage_service_file: false + } + end + + it { is_expected.not_to contain_file(service_file) } + end + + context 'when $::redis_server_version fact is not present' do + let(:facts) { super().merge(redis_server_version: nil) } + + context 'when package_ensure is version (3.2.1)' do + let(:params) { { package_ensure: '3.2.1' } } + + it { is_expected.to contain_file(config_file_orig).with_content(%r{^protected-mode}) } + end + + context 'when package_ensure is a newer version(4.0-rc3) (older features enabled)' do + let(:params) { { package_ensure: '4.0-rc3' } } + + it { is_expected.to contain_file(config_file_orig).with_content(%r{^protected-mode}) } + end + end + + context 'when $::redis_server_version fact is present but a newer version (older features enabled)' do + let(:facts) { super().merge(redis_server_version: '3.2.1') } + + it { is_expected.to contain_file(config_file_orig).with_content(%r{^protected-mode}) } + end end end - end - diff --git a/spec/classes/redis_ubuntu_1404_spec.rb b/spec/classes/redis_ubuntu_1404_spec.rb deleted file mode 100644 index 6244a27..0000000 --- a/spec/classes/redis_ubuntu_1404_spec.rb +++ /dev/null @@ -1,116 +0,0 @@ -require 'spec_helper' - -describe 'redis' do - context 'on Ubuntu 1404' do - - let(:facts) { - ubuntu_1404_facts - } - - context 'should set Ubuntu specific values' do - - context 'when $::redis_server_version fact is not present (older features not enabled)' do - - let(:facts) { - ubuntu_1404_facts.merge({ - :redis_server_version => nil, - }) - } - - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^hash-max-ziplist-entries/) } - it { should contain_file('/etc/redis/redis.conf.puppet').without('content' => /^tcp-backlog/) } - it { should contain_file('/etc/redis/redis.conf.puppet').without('content' => /^protected-mode/) } - - end - - context 'when $::redis_server_version fact is not present and package_ensure is a newer version(3.2.1) (older features enabled)' do - - let(:facts) { - ubuntu_1404_facts.merge({ - :redis_server_version => nil, - }) - } - let (:params) { { :package_ensure => '3.2.1' } } - - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^hash-max-ziplist-entries/) } - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^protected-mode/) } - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^tcp-backlog/) } - - end - - context 'when $::redis_server_version fact is not present and package_ensure is a newer version(3:3.2.1) (older features enabled)' do - - let(:facts) { - ubuntu_1404_facts.merge({ - :redis_server_version => nil, - }) - } - let (:params) { { :package_ensure => '3:3.2.1' } } - - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^hash-max-ziplist-entries/) } - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^protected-mode/) } - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^tcp-backlog/) } - - end - - context 'when $::redis_server_version fact is not present and package_ensure is a newer version(4:4.0-rc3) (older features enabled)' do - - let(:facts) { - ubuntu_1404_facts.merge({ - :redis_server_version => nil, - }) - } - let (:params) { { :package_ensure => '4:4.0-rc3' } } - - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^hash-max-ziplist-entries/) } - it { should contain_file('/etc/redis/redis.conf.puppet').without('content' => /^protected-mode/) } - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^tcp-backlog/) } - - end - context 'when $::redis_server_version fact is not present and package_ensure is a newer version(4.0-rc3) (older features enabled)' do - - let(:facts) { - ubuntu_1404_facts.merge({ - :redis_server_version => nil, - }) - } - let (:params) { { :package_ensure => '4.0-rc3' } } - - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^hash-max-ziplist-entries/) } - it { should contain_file('/etc/redis/redis.conf.puppet').without('content' => /^protected-mode/) } - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^tcp-backlog/) } - - end - - context 'when $::redis_server_version fact is present but the older version (older features not enabled)' do - - let(:facts) { - ubuntu_1404_facts.merge({ - :redis_server_version => '2.8.4', - }) - } - - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^hash-max-ziplist-entries/) } - it { should contain_file('/etc/redis/redis.conf.puppet').without('content' => /^tcp-backlog/) } - it { should contain_file('/etc/redis/redis.conf.puppet').without('content' => /^protected-mode/) } - - end - - context 'when $::redis_server_version fact is present but a newer version (older features enabled)' do - - let(:facts) { - ubuntu_1404_facts.merge({ - :redis_server_version => '3.2.1', - }) - } - - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^hash-max-ziplist-entries/) } - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^tcp-backlog/) } - it { should contain_file('/etc/redis/redis.conf.puppet').with('content' => /^protected-mode/) } - - end - end - - end - -end diff --git a/spec/classes/redis_ulimit_spec.rb b/spec/classes/redis_ulimit_spec.rb deleted file mode 100644 index f8d7a94..0000000 --- a/spec/classes/redis_ulimit_spec.rb +++ /dev/null @@ -1,164 +0,0 @@ -require 'spec_helper' - -describe 'redis::ulimit' do - # add these two lines in a single test block to enable puppet and hiera debug mode - # Puppet::Util::Log.level = :debug - # Puppet::Util::Log.newdestination(:console) - - context 'with managed_by_cluster_manager true' do - let(:facts) { - debian_facts - } - let :pre_condition do - [ - 'class { redis: - managed_by_cluster_manager => true, - }' - ] - end - it { should compile.with_all_deps } - it do - is_expected.to contain_file("/etc/security/limits.d/redis.conf").with( - { - "ensure" => "file", - "owner" => "root", - "group" => "root", - "mode" => "0644", - "content" => "redis soft nofile 65536\nredis hard nofile 65536\n", - } - ) - end - end - - context 'with managed_by_cluster_manager true but not managing service' do - let(:facts) { - debian_facts.merge({ - :service_provider => 'systemd', - }) - } - let :pre_condition do - [ - 'class { "redis": - managed_by_cluster_manager => true, - service_manage => false, - notify_service => false, - }' - ] - end - it { should compile.with_all_deps } - it do - is_expected.to contain_file("/etc/security/limits.d/redis.conf").with( - { - "ensure" => "file", - "owner" => "root", - "group" => "root", - "mode" => "0644", - "content" => "redis soft nofile 65536\nredis hard nofile 65536\n", - } - ) - end - end - - context 'on a systemd system' do - let(:facts) { - debian_facts.merge({ - :service_provider => 'systemd', - }) - } - let :pre_condition do - [ - 'class { redis: - ulimit => "7777", - }' - ] - end - it { should compile.with_all_deps } - it do - is_expected.to contain_file("/etc/systemd/system/redis-server.service.d/limit.conf").with( - { - "ensure" => "file", - "owner" => "root", - "group" => "root", - "mode" => "0444", - } - ) - end - - it do - is_expected.to contain_augeas("Systemd redis ulimit").with( - { - 'incl' => '/etc/systemd/system/redis-server.service.d/limits.conf', - 'lens' => 'Systemd.lns', - 'context' => '/etc/systemd/system/redis-server.service.d/limits.conf', - 'changes' => [ - "defnode nofile Service/LimitNOFILE \"\"", - "set $nofile/value \"7777\"" - ], - 'notify' => [ - 'Exec[systemd-reload-redis]', - ], - } - ) - end - end - - context 'on a non-systemd system' do - context 'Ubuntu 1404 system' do - let(:facts) { - ubuntu_1404_facts.merge({ - :service_provider => 'debian', - }) - } - let :pre_condition do - [ - 'class { redis: - ulimit => "7777", - }' - ] - end - it { should compile.with_all_deps } - it do - is_expected.not_to contain_file('/etc/systemd/system/redis-server.service.d/limit.conf') - end - - it do - is_expected.not_to contain_augeas('Systemd redis ulimit') - end - - it do - is_expected.to contain_augeas('redis ulimit').with('changes' => 'set ULIMIT 7777') - is_expected.to contain_augeas('redis ulimit').with('context' => '/files/etc/default/redis-server') - end - end - - context 'CentOS 6 system' do - let(:facts) { - centos_6_facts.merge({ - :service_provider => 'redhat', - }) - } - let :pre_condition do - [ - 'class { redis: - ulimit => "7777", - }' - ] - end - it { should compile.with_all_deps } - it do - is_expected.not_to contain_file('/etc/systemd/system/redis-server.service.d/limit.conf') - end - - it do - is_expected.not_to contain_augeas('Systemd redis ulimit') - end - - it do - is_expected.to contain_augeas('redis ulimit').with('changes' => 'set ULIMIT 7777') - is_expected.to contain_augeas('redis ulimit').with('context' => '/files/etc/sysconfig/redis') - end - end - end - - -end diff --git a/spec/defines/instance_spec.rb b/spec/defines/instance_spec.rb index a1dd2cf..03647ff 100644 --- a/spec/defines/instance_spec.rb +++ b/spec/defines/instance_spec.rb @@ -1,76 +1,63 @@ require 'spec_helper' -describe 'redis::instance', :type => :define do +describe 'redis::instance' do let :pre_condition do - 'class { "redis": + <<-PUPPET + class { 'redis': default_install => false, - }' + } + PUPPET end - let :title do - 'app2' - end - describe 'os-dependent items' do - context "on Ubuntu systems" do - context '14.04' do - let(:facts) { - ubuntu_1404_facts - } - it { should contain_file('/etc/redis/redis-server-app2.conf.puppet').with('content' => /^bind 127.0.0.1/) } - it { should contain_file('/etc/redis/redis-server-app2.conf.puppet').with('content' => /^logfile \/var\/log\/redis\/redis-server-app2.log/) } - it { should contain_file('/etc/redis/redis-server-app2.conf.puppet').with('content' => /^dir \/var\/lib\/redis\/redis-server-app2/) } - it { should contain_file('/etc/redis/redis-server-app2.conf.puppet').with('content' => /^unixsocket \/var\/run\/redis\/redis-server-app2.sock/) } - it { should contain_file('/var/lib/redis/redis-server-app2') } - it { should contain_service('redis-server-app2').with_ensure('running') } - it { should contain_service('redis-server-app2').with_enable('true') } - it { should contain_file('/etc/init.d/redis-server-app2').with_content(/DAEMON_ARGS=\/etc\/redis\/redis-server-app2.conf/) } - it { should contain_file('/etc/init.d/redis-server-app2').with_content(/PIDFILE=\/var\/run\/redis\/redis-server-app2.pid/) } - end - context '16.04' do - let(:facts) { - ubuntu_1604_facts.merge({ - :service_provider => 'systemd', - }) - } - it { should contain_file('/etc/redis/redis-server-app2.conf.puppet').with('content' => /^bind 127.0.0.1/) } - it { should contain_file('/etc/redis/redis-server-app2.conf.puppet').with('content' => /^logfile \/var\/log\/redis\/redis-server-app2.log/) } - it { should contain_file('/etc/redis/redis-server-app2.conf.puppet').with('content' => /^dir \/var\/lib\/redis\/redis-server-app2/) } - it { should contain_file('/etc/redis/redis-server-app2.conf.puppet').with('content' => /^unixsocket \/var\/run\/redis\/redis-server-app2.sock/) } - it { should contain_file('/var/lib/redis/redis-server-app2') } - it { should contain_service('redis-server-app2').with_ensure('running') } - it { should contain_service('redis-server-app2').with_enable('true') } - it { should contain_file('/etc/systemd/system/redis-server-app2.service').with_content(/ExecStart=\/usr\/bin\/redis-server \/etc\/redis\/redis-server-app2.conf/) } - end - end - context "on CentOS systems" do - context '6' do - let(:facts) { - centos_6_facts - } - it { should contain_file('/etc/redis-server-app2.conf.puppet').with('content' => /^bind 127.0.0.1/) } - it { should contain_file('/etc/redis-server-app2.conf.puppet').with('content' => /^logfile \/var\/log\/redis\/redis-server-app2.log/) } - it { should contain_file('/etc/redis-server-app2.conf.puppet').with('content' => /^dir \/var\/lib\/redis\/redis-server-app2/) } - it { should contain_file('/etc/redis-server-app2.conf.puppet').with('content' => /^unixsocket \/var\/run\/redis\/redis-server-app2.sock/) } - it { should contain_file('/var/lib/redis/redis-server-app2') } - it { should contain_service('redis-server-app2').with_ensure('running') } - it { should contain_service('redis-server-app2').with_enable('true') } - it { should contain_file('/etc/init.d/redis-server-app2').with_content(/REDIS_CONFIG="\/etc\/redis-server-app2.conf"/) } - it { should contain_file('/etc/init.d/redis-server-app2').with_content(/pidfile="\/var\/run\/redis\/redis-server-app2.pid"/) } - end - context '7' do - let(:facts) { - centos_7_facts.merge({ - :service_provider => 'systemd', - }) - } - it { should contain_file('/etc/redis-server-app2.conf.puppet').with('content' => /^bind 127.0.0.1/) } - it { should contain_file('/etc/redis-server-app2.conf.puppet').with('content' => /^logfile \/var\/log\/redis\/redis-server-app2.log/) } - it { should contain_file('/etc/redis-server-app2.conf.puppet').with('content' => /^dir \/var\/lib\/redis\/redis-server-app2/) } - it { should contain_file('/etc/redis-server-app2.conf.puppet').with('content' => /^unixsocket \/var\/run\/redis\/redis-server-app2.sock/) } - it { should contain_file('/var/lib/redis/redis-server-app2') } - it { should contain_service('redis-server-app2').with_ensure('running') } - it { should contain_service('redis-server-app2').with_enable('true') } - it { should contain_file('/etc/systemd/system/redis-server-app2.service').with_content(/ExecStart=\/usr\/bin\/redis-server \/etc\/redis-server-app2.conf/) } + + on_supported_os.each do |os, facts| + context "on #{os}" do + let(:facts) { facts } + + context 'with app2 title' do + let(:title) { 'app2' } + let(:config_file) do + case facts[:osfamily] + when 'RedHat' + '/etc/redis-server-app2.conf' + when 'FreeBSD' + '/usr/local/etc/redis/redis-server-app2.conf' + when 'Debian' + '/etc/redis/redis-server-app2.conf' + when 'Archlinux' + '/etc/redis/redis-server-app2.conf' + end + end + + it do + is_expected.to contain_file("#{config_file}.puppet"). + with_content(%r{^bind 127.0.0.1}). + with_content(%r{^logfile /var/log/redis/redis-server-app2\.log}). + with_content(%r{^dir /var/lib/redis/redis-server-app2}). + with_content(%r{^unixsocket /var/run/redis/redis-server-app2\.sock}) + end + it { is_expected.to contain_file('/var/lib/redis/redis-server-app2') } + + it do + if facts['service_provider'] == 'systemd' + is_expected.to contain_file('/etc/systemd/system/redis-server-app2.service').with_content(%r{ExecStart=/usr/bin/redis-server #{config_file}}) + else + case facts[:os]['family'] + when 'Debian' + is_expected.to contain_file('/etc/init.d/redis-server-app2'). + with_content(%r{DAEMON_ARGS=#{config_file}}). + with_content(%r{PIDFILE=/var/run/redis/redis-server-app2\.pid}) + when 'RedHat' + is_expected.to contain_file('/etc/init.d/redis-server-app2'). + with_content(%r{REDIS_CONFIG="#{config_file}"}). + with_content(%r{pidfile="/var/run/redis/redis-server-app2\.pid"}) + else + is_expected.to contain_file('/etc/init.d/redis-server-app2') + end + end + end + + it { is_expected.to contain_service('redis-server-app2').with_ensure('running').with_enable('true') } end end end end diff --git a/spec/fixtures/facts/redis_server_2410_version b/spec/fixtures/facts/redis_server_2410_version deleted file mode 100644 index 3fd5831..0000000 --- a/spec/fixtures/facts/redis_server_2410_version +++ /dev/null @@ -1 +0,0 @@ -Redis server version 2.4.10 (00000000:0) diff --git a/spec/fixtures/facts/redis_server_2819_version b/spec/fixtures/facts/redis_server_2819_version deleted file mode 100644 index fa5c6b5..0000000 --- a/spec/fixtures/facts/redis_server_2819_version +++ /dev/null @@ -1 +0,0 @@ -Redis server v=2.8.19 sha=00000000:0 malloc=jemalloc-3.6.0 bits=64 build=c0359e7aa3798aa2 diff --git a/spec/fixtures/facts/redis_server_3209_version b/spec/fixtures/facts/redis_server_3209_version deleted file mode 100644 index 510d820..0000000 --- a/spec/fixtures/facts/redis_server_3209_version +++ /dev/null @@ -1 +0,0 @@ -Redis server v=3.2.9 sha=00000000:0 malloc=jemalloc-4.0.3 bits=64 build=67e0f9d6580364c0 \ No newline at end of file diff --git a/spec/functions/redisget_spec.rb b/spec/functions/redis/get_spec.rb similarity index 74% rename from spec/functions/redisget_spec.rb rename to spec/functions/redis/get_spec.rb index 99d2fb0..c774ba5 100644 --- a/spec/functions/redisget_spec.rb +++ b/spec/functions/redis/get_spec.rb @@ -1,93 +1,93 @@ require 'spec_helper' require 'mock_redis' require 'redis' -REDIS_URL='redis://localhost:6379' -LOCAL_BROKEN_URL='redis://localhost:1234' -REMOTE_BROKEN_URL='redis://redis.example.com:1234' +REDIS_URL = 'redis://localhost:6379'.freeze +LOCAL_BROKEN_URL = 'redis://localhost:1234'.freeze +REMOTE_BROKEN_URL = 'redis://redis.example.com:1234'.freeze -describe 'redisget' do +describe 'redis::get' do context 'should error if connection to remote redis server cannot be made and no default is specified' do - it { is_expected.to run.with_params('nonexistent_key', REMOTE_BROKEN_URL).and_raise_error(Puppet::Error, /connection to redis server failed - getaddrinfo/) } + it { is_expected.to run.with_params('nonexistent_key', REMOTE_BROKEN_URL).and_raise_error(Puppet::Error, %r{connection to redis server failed - Error connecting to Redis on redis.example.com:1234 \(SocketError\)}) } end context 'should return default value if connection to remote redis server cannot be made and default is specified' do it { is_expected.to run.with_params('nonexistent_key', REMOTE_BROKEN_URL, 'default_value').and_return('default_value') } end context 'should error if connection to local redis server cannot be made and no default is specified' do - it { is_expected.to run.with_params('nonexistent_key', LOCAL_BROKEN_URL).and_raise_error(Puppet::Error, /connection to redis server failed - Error connecting to Redis on localhost:1234/) } + it { is_expected.to run.with_params('nonexistent_key', LOCAL_BROKEN_URL).and_raise_error(Puppet::Error, %r{connection to redis server failed - Error connecting to Redis on localhost:1234}) } end context 'should return default value if connection to local redis server cannot be made and default is specified' do it { is_expected.to run.with_params('nonexistent_key', LOCAL_BROKEN_URL, 'default_value').and_return('default_value') } end context 'should return nil if key does not exist and no default is specified' do - before { + before do mr = MockRedis.new Redis.stubs(:new).returns(mr) - } + end it { is_expected.to run.with_params('nonexistent_key', REDIS_URL).and_return(nil) } end context 'should return the default value if specified and key does not exist' do - before { + before do mr = MockRedis.new Redis.stubs(:new).returns(mr) - } + end it { is_expected.to run.with_params('nonexistent_key', REDIS_URL, 'default_value').and_return('default_value') } end context 'should return the value of the specified key' do - before { + before do mr = MockRedis.new Redis.stubs(:new).returns(mr) mr.set('key', 'value') - } + end it { is_expected.to run.with_params('key', REDIS_URL).and_return('value') } end context 'should return the value of the specified key and not the default' do - before { + before do mr = MockRedis.new Redis.stubs(:new).returns(mr) mr.set('key', 'value') - } + end it { is_expected.to run.with_params('key', REDIS_URL, 'default_value').and_return('value') } end describe 'with incorrect arguments' do context 'with no argument specified' do - it { is_expected.to run.with_params().and_raise_error(Puppet::ParseError, /wrong number of arguments/i) } + it { is_expected.to run.with_params.and_raise_error(ArgumentError) } end context 'with only one argument specified' do - it { is_expected.to run.with_params('some_key').and_raise_error(Puppet::ParseError, /wrong number of arguments/i) } + it { is_expected.to run.with_params('some_key').and_raise_error(ArgumentError) } end context 'with more than three arguments specified' do - it { is_expected.to run.with_params('way', 'too', 'many', 'args').and_raise_error(Puppet::ParseError, /wrong number of arguments/i) } + it { is_expected.to run.with_params('way', 'too', 'many', 'args').and_raise_error(ArgumentError) } end end describe 'when an invalid type (non-string) is specified' do - before(:each) { + before do mr = MockRedis.new Redis.stubs(:new).returns(mr) - } - [{ 'ha' => 'sh'}, true, 1, ['an', 'array']].each do |p| + end + [{ 'ha' => 'sh' }, true, 1, %w[an array]].each do |p| context "specifing first parameter as <#{p}>" do - it { is_expected.to run.with_params(p, REDIS_URL).and_raise_error(Puppet::ParseError, /wrong argument type/i) } + it { is_expected.to run.with_params(p, REDIS_URL).and_raise_error(ArgumentError) } end context "specifing second parameter as <#{p}>" do - it { is_expected.to run.with_params('valid', p).and_raise_error(Puppet::ParseError, /wrong argument type/i) } + it { is_expected.to run.with_params('valid', p).and_raise_error(ArgumentError) } end context "specifing third parameter as <#{p}>" do - it { is_expected.to run.with_params('valid', p).and_raise_error(Puppet::ParseError, /wrong argument type/i) } + it { is_expected.to run.with_params('valid', p).and_raise_error(ArgumentError) } end end end end diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb index 7b96623..8858124 100644 --- a/spec/spec_helper.rb +++ b/spec/spec_helper.rb @@ -1,163 +1,85 @@ -require 'rubygems' +RSpec.configure do |c| + c.mock_with :mocha +end + require 'puppetlabs_spec_helper/module_spec_helper' require 'rspec-puppet-facts' include RspecPuppetFacts -require 'puppet/indirector/catalog/compiler' - -# Magic to add a catalog.exported_resources accessor -class Puppet::Resource::Catalog::Compiler - alias_method :filter_exclude_exported_resources, :filter - def filter(catalog) - filter_exclude_exported_resources(catalog).tap do |filtered| - # Every time we filter a catalog, add a .exported_resources to it. - filtered.define_singleton_method(:exported_resources) do - # The block passed to filter returns `false` if it wants to keep a resource. Go figure. - catalog.filter { |r| !r.exported? } - end - end - end -end - -module Support - module ExportedResources - # Get exported resources as a catalog. Compatible with all catalog matchers, e.g. - # `expect(exported_resources).to contain_myexportedresource('name').with_param('value')` - def exported_resources - # Catalog matchers expect something that can receive .call - proc { subject.call.exported_resources } - end - end -end - -def get_spec_fixtures_dir - spec_dir = File.expand_path(File.dirname(__FILE__) + '/fixtures') - - raise "The directory #{spec_dir} does not exist" unless Dir.exists? spec_dir - - spec_dir -end - -def read_fixture_file filename - filename = get_spec_fixtures_dir + "/#{filename}" - - raise "The fixture file #{filename} doesn't exist" unless File.exists? filename - - File.read(filename) -end - def manifest_vars - vars = {} case facts[:osfamily].to_s when 'RedHat' vars[:package_name] = 'redis' vars[:service_name] = 'redis' + vars[:config_file] = '/etc/redis.conf' vars[:config_file_orig] = '/etc/redis.conf.puppet' - vars[:ppa_repo] = nil when 'FreeBSD', vars[:package_name] = 'redis' vars[:service_name] = 'redis' + vars[:config_file] = '/usr/local/etc/redis.conf' vars[:config_file_orig] = '/usr/local/etc/redis.conf.puppet' - vars[:ppa_repo] = nil when 'Debian' vars[:package_name] = 'redis-server' vars[:service_name] = 'redis-server' + vars[:config_file] = '/etc/redis/redis.conf' vars[:config_file_orig] = '/etc/redis/redis.conf.puppet' - vars[:ppa_repo] = 'ppa:chris-lea/redis-server' when 'Archlinux' vars[:package_name] = 'redis' vars[:service_name] = 'redis' vars[:config_file] = '/etc/redis/redis.conf' vars[:config_file_orig] = '/etc/redis/redis.conf.puppet' - vars[:ppa_repo] = nil end vars end -def centos_facts - { - :operatingsystem => 'CentOS', - :osfamily => 'RedHat', - :puppetversion => '4.5.2', - } -end - -def debian_facts - { - :operatingsystem => 'Debian', - :osfamily => 'Debian', - :operatingsystemmajrelease => '8', - :puppetversion => '4.5.2', - :lsbdistcodename => 'jessie', - } -end - -def freebsd_facts - { - :operatingsystem => 'FreeBSD', - :osfamily => 'FreeBSD', - :puppetversion => '4.5.2', - } -end - -def centos_6_facts - { - :operatingsystem => 'CentOS', - :osfamily => 'RedHat', - :operatingsystemmajrelease => '6', - :puppetversion => '4.5.2', - } -end - -def centos_7_facts - { - :operatingsystem => 'CentOS', - :osfamily => 'RedHat', - :operatingsystemmajrelease => '7', - :puppetversion => '4.5.2', - } +if ENV['DEBUG'] + Puppet::Util::Log.level = :debug + Puppet::Util::Log.newdestination(:console) end -def debian_wheezy_facts - { - :operatingsystem => 'Debian', - :osfamily => 'Debian', - :operatingsystemmajrelease => '8', - :puppetversion => '4.5.2', - :lsbdistcodename => 'wheezy', - } -end - -def ubuntu_1404_facts - { - :operatingsystem => 'Ubuntu', - :osfamily => 'Debian', - :operatingsystemmajrelease => '14.04', - :puppetversion => '4.5.2', - :lsbdistcodename => 'trusty', - } -end - -def ubuntu_1604_facts - { - :operatingsystem => 'Ubuntu', - :osfamily => 'Debian', - :operatingsystemmajrelease => '16.04', - :puppetversion => '4.5.2', - :lsbdistcodename => 'xenial', - } -end - -def archlinux_facts - { - :operatingsystem => 'Archlinux', - :osfamily => 'Archlinux', - :puppetversion => '4.5.2', - } +add_custom_fact :service_provider, (lambda do |_os, facts| + case facts[:osfamily].downcase + when 'archlinux' + 'systemd' + when 'darwin' + 'launchd' + when 'debian' + 'systemd' + when 'freebsd' + 'freebsd' + when 'gentoo' + 'openrc' + when 'openbsd' + 'openbsd' + when 'redhat' + facts[:operatingsystemrelease].to_i >= 7 ? 'systemd' : 'redhat' + when 'suse' + facts[:operatingsystemmajrelease].to_i >= 12 ? 'systemd' : 'redhat' + when 'windows' + 'windows' + else + 'init' + end +end) + +RSpec.configure do |c| + # getting the correct facter version is tricky. We use facterdb as a source to mock facts + # see https://github.com/camptocamp/facterdb + # people might provide a specific facter version. In that case we use it. + # Otherwise we need to match the correct facter version to the used puppet version. + # as of 2019-10-31, puppet 5 ships facter 3.11 and puppet 6 ships facter 3.14 + # https://puppet.com/docs/puppet/5.5/about_agent.html + c.default_facter_version = if ENV['FACTERDB_FACTS_VERSION'] + ENV['FACTERDB_FACTS_VERSION'] + else + Gem::Dependency.new('', ENV['PUPPET_VERSION']).match?('', '5') ? '3.11.0' : '3.14.0' + end + + # Coverage generation + c.after(:suite) do + RSpec::Puppet::Coverage.report! + end end - -# Include code coverage report for all our specs -at_exit { RSpec::Puppet::Coverage.report! } diff --git a/spec/spec_helper_acceptance.rb b/spec/spec_helper_acceptance.rb index 21b3c51..8ef29a3 100644 --- a/spec/spec_helper_acceptance.rb +++ b/spec/spec_helper_acceptance.rb @@ -1,87 +1,25 @@ require 'beaker-rspec' require 'beaker/puppet_install_helper' require 'beaker/module_install_helper' -def change_root_password - on(hosts, 'echo "root:root" | chpasswd') -end - -def install_bolt_on(hosts) - on(hosts, "/opt/puppetlabs/puppet/bin/gem install bolt -v '0.5.1' --no-ri --no-rdoc", acceptable_exit_codes: [0]).stdout -end - run_puppet_install_helper unless ENV['BEAKER_provision'] == 'no' -change_root_password install_module_on(hosts) install_module_dependencies_on(hosts) - -UNSUPPORTED_PLATFORMS = %w[windows AIX Solaris].freeze - -DEFAULT_PASSWORD = if default[:hypervisor] == 'vagrant' - 'root' - elsif default[:hypervisor] == 'docker' - 'root' - end - -def run_task(task_name:, params: nil, password: DEFAULT_PASSWORD) - run_bolt_task(task_name: task_name, params: params, password: password) -end - -def run_bolt_task(task_name:, params: nil, password: DEFAULT_PASSWORD) - on(master, "/opt/puppetlabs/puppet/bin/bolt task run #{task_name} --modules /etc/puppetlabs/code/modules/ --nodes localhost --user root --password #{password} #{params}", acceptable_exit_codes: [0, 1]).stdout # rubocop:disable Metrics/LineLength -end - -def expect_multiple_regexes(result:, regexes:) - regexes.each do |regex| - expect(result).to match(regex) - end -end - -# This method allows a block to be passed in and if an exception is raised -# that matches the 'error_matcher' matcher, the block will wait a set number -# of seconds before retrying. -# Params: -# - max_retry_count - Max number of retries -# - retry_wait_interval_secs - Number of seconds to wait before retry -# - error_matcher - Matcher which the exception raised must match to allow retry -# Example Usage: -# retry_on_error_matching(3, 5, /OpenGPG Error/) do -# apply_manifest(pp, :catch_failures => true) -# end -def retry_on_error_matching(max_retry_count = 3, retry_wait_interval_secs = 5, error_matcher = nil) - try = 0 - begin - try += 1 - yield - rescue Exception => e - if try < max_retry_count && (error_matcher.nil? || e.message =~ error_matcher) - sleep retry_wait_interval_secs - retry - else - raise - end - end -end +install_module_from_forge('camptocamp/systemd', '>= 2.0.0 < 3.0.0') RSpec.configure do |c| - # Project root - proj_root = File.expand_path(File.join(File.dirname(__FILE__), '..')) - # Readable test descriptions c.formatter = :documentation c.before :suite do - hosts.each do |host| - if fact('osfamily') == 'Debian' - # These should be on all Deb-flavor machines by default... - # But Docker is often more slimline - shell('apt-get install apt-transport-https software-properties-common -y', acceptable_exit_codes: [0]) + case fact_on(host, 'operatingsystem') + when 'CentOS' + host.install_package('epel-release') + when 'Ubuntu' + host.install_package('software-properties-common') end - # Bolt requires gcc and make - install_package(host, 'gcc') - install_package(host, 'make') - install_bolt_on(host) + host.install_package('puppet-bolt') end end end diff --git a/spec/unit/redis_server_version_spec.rb b/spec/unit/redis_server_version_spec.rb index ec5e659..171e466 100644 --- a/spec/unit/redis_server_version_spec.rb +++ b/spec/unit/redis_server_version_spec.rb @@ -1,32 +1,18 @@ require 'spec_helper' describe 'redis_server_version', type: :fact do before { Facter.clear } after { Facter.clear } - it 'is 2.4.10 according to output' do - Facter::Util::Resolution.stubs(:which).with('redis-server').returns('/usr/bin/redis-server') - redis_server_2410_version = File.read(fixtures('facts', 'redis_server_2410_version')) - Facter::Util::Resolution.stubs(:exec).with('redis-server -v').returns(redis_server_2410_version) - expect(Facter.fact(:redis_server_version).value).to eq('2.4.10') - end - - it 'is 2.8.19 according to output' do - Facter::Util::Resolution.stubs(:which).with('redis-server').returns('/usr/bin/redis-server') - redis_server_2819_version = File.read(fixtures('facts', 'redis_server_2819_version')) - Facter::Util::Resolution.stubs(:exec).with('redis-server -v').returns(redis_server_2819_version) - expect(Facter.fact(:redis_server_version).value).to eq('2.8.19') - end - it 'is 3.2.9 according to output' do Facter::Util::Resolution.stubs(:which).with('redis-server').returns('/usr/bin/redis-server') - redis_server_3209_version = File.read(fixtures('facts', 'redis_server_3209_version')) + redis_server_3209_version = "Redis server v=3.2.9 sha=00000000:0 malloc=jemalloc-4.0.3 bits=64 build=67e0f9d6580364c0\n" Facter::Util::Resolution.stubs(:exec).with('redis-server -v').returns(redis_server_3209_version) expect(Facter.fact(:redis_server_version).value).to eq('3.2.9') end it 'is empty string if redis-server not installed' do Facter::Util::Resolution.stubs(:which).with('redis-server').returns(nil) expect(Facter.fact(:redis_server_version).value).to eq(nil) end end diff --git a/tasks/redis_cli.rb b/tasks/redis_cli.rb index 1d848a2..09be4e8 100755 --- a/tasks/redis_cli.rb +++ b/tasks/redis_cli.rb @@ -1,22 +1,22 @@ #!/opt/puppetlabs/puppet/bin/ruby require 'json' require 'open3' require 'puppet' def redis_cli(command) - stdout, stderr, status = Open3.capture3("redis-cli", command) + stdout, stderr, status = Open3.capture3('redis-cli', command) raise Puppet::Error, stderr if status != 0 { status: stdout.strip } end params = JSON.parse(STDIN.read) command = params['command'] begin result = redis_cli(command) puts result.to_json exit 0 rescue Puppet::Error => e puts({ status: 'failure', error: e.message }.to_json) exit 1 end diff --git a/templates/redis-sentinel.conf.erb b/templates/redis-sentinel.conf.erb index 662f8d6..b3d1f93 100644 --- a/templates/redis-sentinel.conf.erb +++ b/templates/redis-sentinel.conf.erb @@ -1,25 +1,25 @@ <% if @sentinel_bind -%> bind <%= @sentinel_bind %> <% end -%> port <%= @sentinel_port %> dir <%= @working_dir %> -<% if @daemonize -%>daemonize yes<% else -%>daemonize no<% end %> +daemonize <%= @daemonize ? 'yes' : 'no' %> pidfile <%= @pid_file %> -protected-mode <%= @protected_mode %> +protected-mode <%= @protected_mode ? 'yes' : 'no' %> sentinel monitor <%= @master_name %> <%= @redis_host %> <%= @redis_port %> <%= @quorum %> sentinel down-after-milliseconds <%= @master_name %> <%= @down_after %> sentinel parallel-syncs <%= @master_name %> <%= @parallel_sync %> sentinel failover-timeout <%= @master_name %> <%= @failover_timeout %> <% if @auth_pass -%> sentinel auth-pass <%= @master_name %> <%= @auth_pass %> <% end -%> <% if @notification_script -%> sentinel notification-script <%= @master_name %> <%= @notification_script %> <% end -%> <% if @client_reconfig_script -%> sentinel client-reconfig-script <%= @master_name %> <%= @client_reconfig_script %> <% end -%> loglevel <%= @log_level %> logfile <%= @log_file %> diff --git a/templates/redis.conf.2.4.10.erb b/templates/redis.conf.2.4.10.erb deleted file mode 100644 index 0219774..0000000 --- a/templates/redis.conf.2.4.10.erb +++ /dev/null @@ -1,505 +0,0 @@ -# Redis configuration file example - -# Note on units: when memory size is needed, it is possible to specifiy -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize <% if @daemonize -%>yes<% else -%>no<% end -%> - -# When running daemonized, Redis writes a pid file in /var/run/redis.pid by -# default. You can specify a custom pid file location here. -pidfile <%= @pid_file %> - -# Accept connections on the specified port, default is 6379. -# If port 0 is specified Redis will not listen on a TCP socket. -port <%= @port %> - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -bind <%= @bind %> - -# Specify the path for the unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -<% if @unixsocket %>unixsocket <%= @unixsocket %><% end %> -<% if @unixsocketperm %>unixsocketperm <%= @unixsocketperm %><% end %> - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout <%= @timeout %> - - -# Set server verbosity to 'debug' -# it can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel <%= @log_level %> - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile <%= @_real_log_file %> - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -syslog-enabled <% if @syslog_enabled %>yes<% else %>no<% end %> - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -<% if @syslog_facility %>syslog-facility <%= @syslog_facility %><% else %># syslog-facility local0<% end %> - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases <%= @databases %> - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -<% if @save_db_to_disk %> -<%- @save_db_to_disk_interval.sort_by{|k,v|k}.each do |seconds, key_change| -%> -save <%= seconds -%> <%= key_change -%> <%= "\n" -%> -<%- end -%> -<% end %> - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes -rdbcompression <% if @rdbcompression -%>yes<% else -%>no<% end %> - - -# The filename where to dump the DB -<% if @dbfilename %>dbfilename <%= @dbfilename %><% else %># dbfilename dump.rdb<% end %> - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# Also the Append Only File will be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir <%= @workdir %> - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -<% if @slaveof -%>slaveof <%= @slaveof %><% end -%> - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth -<% if @masterauth -%>masterauth <%= @masterauth %><% end -%> - -# When a slave lost the connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of data data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -slave-serve-stale-data <% if @slave_serve_stale_data -%>yes<% else -%>no<% end %> - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -repl-timeout <%= @repl_timeout %> - - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -<% if @requirepass -%>requirepass <%= @requirepass %><% end -%> - -# Command renaming. -# -# It is possilbe to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# of hard to guess so that it will be still available for internal-use -# tools but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possilbe to completely kill a command renaming it into -# an empty string: -# -# rename-command CONFIG "" - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default there -# is no limit, and it's up to the number of file descriptors the Redis process -# is able to open. The special value '0' means no limits. -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -maxclients <%= @maxclients %> - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU cache, or to set -# an hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave -# output buffers (but this is not needed if the policy is 'noeviction'). -# -# maxmemory -<% if @maxmemory -%>maxmemory <%= @maxmemory %><% end -%> - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached? You can select among five behavior: -# -# volatile-lru -> remove the key with an expire set using an LRU algorithm -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# volatile-random -> remove a random key with an expire set -# allkeys->random -> remove a random key, any key -# volatile-ttl -> remove the key with the nearest expire time (minor TTL) -# noeviction -> don't expire at all, just return an error on write operations -# -# Note: with all the kind of policies, Redis will return an error on write -# operations, when there are not suitable keys for eviction. -# -# At the date of writing this commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -# maxmemory-policy volatile-lru -<% if @maxmemory_policy -%>maxmemory-policy <%= @maxmemory_policy %><% end -%> - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default Redis will check three keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -# maxmemory-samples 3 -<% if @maxmemory_samples -%>maxmemory-samples <%= @maxmemory_samples %><% end -%> - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. If you can live -# with the idea that the latest records will be lost if something like a crash -# happens this is the preferred way to run Redis. If instead you care a lot -# about your data and don't want to that a single record can get lost you should -# enable the append only mode: when this mode is enabled Redis will append -# every write operation received in the file appendonly.aof. This file will -# be read on startup in order to rebuild the full dataset in memory. -# -# Note that you can have both the async dumps and the append only file if you -# like (you have to comment the "save" statements above to disable the dumps). -# Still if append only mode is enabled Redis will load the data from the -# log file at startup ignoring the dump.rdb file. -# -# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append -# log file in background when it gets too big. - -appendonly <% if @appendonly -%>yes<% else -%>no<% end -%> - -# The name of the append only file (default: "appendonly.aof") -appendfilename <%= @appendfilename %> - -# The fsync() call tells the Operating System to actually write data on disk -# instead to wait for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log . Slow, Safest. -# everysec: fsync only if one second passed since the last fsync. Compromise. -# -# The default is "everysec" that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# If unsure, use "everysec". - -appendfsync <%= @appendfsync %> - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving the durability of Redis is -# the same as "appendfsync none", that in pratical terms means that it is -# possible to lost up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. -no-appendfsync-on-rewrite <% if @no_appendfsync_on_rewrite -%>yes<% else -%>no<% end -%> - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size will growth by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (or if no rewrite happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a precentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage <%= @auto_aof_rewrite_percentage %> -auto-aof-rewrite-min-size <%= @auto_aof_rewrite_min_size %> - - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than <%= @slowlog_log_slower_than %> - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len <%= @slowlog_max_len %> - -################################ VIRTUAL MEMORY ############################### - -### WARNING! Virtual Memory is deprecated in Redis 2.4 -### The use of Virtual Memory is strongly discouraged. - -# Virtual Memory allows Redis to work with datasets bigger than the actual -# amount of RAM needed to hold the whole dataset in memory. -# In order to do so very used keys are taken in memory while the other keys -# are swapped into a swap file, similarly to what operating systems do -# with memory pages. -# -# To enable VM just set 'vm-enabled' to yes, and set the following three -# VM parameters accordingly to your needs. - -vm-enabled no -# vm-enabled yes - -# This is the path of the Redis swap file. As you can guess, swap files -# can't be shared by different Redis instances, so make sure to use a swap -# file for every redis process you are running. Redis will complain if the -# swap file is already in use. -# -# The best kind of storage for the Redis swap file (that's accessed at random) -# is a Solid State Disk (SSD). -# -# *** WARNING *** if you are using a shared hosting the default of putting -# the swap file under /tmp is not secure. Create a dir with access granted -# only to Redis user and configure Redis to create the swap file there. -vm-swap-file /tmp/redis.swap - -# vm-max-memory configures the VM to use at max the specified amount of -# RAM. Everything that deos not fit will be swapped on disk *if* possible, that -# is, if there is still enough contiguous space in the swap file. -# -# With vm-max-memory 0 the system will swap everything it can. Not a good -# default, just specify the max amount of RAM you can in bytes, but it's -# better to leave some margin. For instance specify an amount of RAM -# that's more or less between 60 and 80% of your free RAM. -vm-max-memory 0 - -# Redis swap files is split into pages. An object can be saved using multiple -# contiguous pages, but pages can't be shared between different objects. -# So if your page is too big, small objects swapped out on disk will waste -# a lot of space. If you page is too small, there is less space in the swap -# file (assuming you configured the same number of total swap file pages). -# -# If you use a lot of small objects, use a page size of 64 or 32 bytes. -# If you use a lot of big objects, use a bigger page size. -# If unsure, use the default :) -vm-page-size 32 - -# Number of total memory pages in the swap file. -# Given that the page table (a bitmap of free/used pages) is taken in memory, -# every 8 pages on disk will consume 1 byte of RAM. -# -# The total swap size is vm-page-size * vm-pages -# -# With the default of 32-bytes memory pages and 134217728 pages Redis will -# use a 4 GB swap file, that will use 16 MB of RAM for the page table. -# -# It's better to use the smallest acceptable value for your application, -# but the default is large in order to work in most conditions. -vm-pages 134217728 - -# Max number of VM I/O threads running at the same time. -# This threads are used to read/write data from/to swap file, since they -# also encode and decode objects from disk to memory or the reverse, a bigger -# number of threads can help with big objects even if they can't help with -# I/O itself as the physical device may not be able to couple with many -# reads/writes operations at the same time. -# -# The special value of 0 turn off threaded I/O and enables the blocking -# Virtual Memory implementation. -vm-max-threads 4 - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-zipmap-entries <%= @hash_max_ziplist_entries %> -hash-max-zipmap-value <%= @hash_max_ziplist_value %> - -# Similarly to hashes, small lists are also encoded in a special way in order -# to save a lot of space. The special representation is only used when -# you are under the following limits: -list-max-ziplist-entries <%= @list_max_ziplist_entries %> -list-max-ziplist-value <%= @list_max_ziplist_value %> - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happens to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries <%= @set_max_intset_entries %> - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries <%= @zset_max_ziplist_entries %> -zset-max-ziplist-value <%= @zset_max_ziplist_value %> - - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into an hash table -# that is rhashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# active rehashing the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply form time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing <% if @activerehashing -%>yes<% else -%>no<% end -%> - - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all redis server but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# include /path/to/local.conf -# include /path/to/other.conf -<% if @extra_config_file -%> -include <%= @extra_config_file %> -<% end -%> diff --git a/templates/redis.conf.2.8.erb b/templates/redis.conf.2.8.erb deleted file mode 100644 index 37e974a..0000000 --- a/templates/redis.conf.2.8.erb +++ /dev/null @@ -1,740 +0,0 @@ -# Redis configuration file example. -# -# Note that in order to read the configuration file, Redis must be -# started with the file path as first argument: -# -# ./redis-server /path/to/redis.conf - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis server but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Redis Sentinel. Since Redis always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# include /path/to/local.conf -# include /path/to/other.conf -<% if @extra_config_file -%> -include <%= @extra_config_file %> -<% end -%> - -################################ GENERAL ##################################### - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize <% if @daemonize -%>yes<% else -%>no<% end -%> - -# When running daemonized, Redis writes a pid file in /var/run/redis.pid by -# default. You can specify a custom pid file location here. -pidfile <%= @pid_file %> - -# Accept connections on the specified port, default is 6379. -# If port 0 is specified Redis will not listen on a TCP socket. -port <%= @port %> - -<% if scope.lookupvar('redis_version_real') and (scope.function_versioncmp([scope.lookupvar('redis_version_real'), '2.8.5']) >= 0) -%> -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog <%= @tcp_backlog %> -<% end -%> - -# By default Redis listens for connections from all the network interfaces -# available on the server. It is possible to listen to just one or multiple -# interfaces using the "bind" configuration directive, followed by one or -# more IP addresses. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -# bind 127.0.0.1 -bind <%= @bind %> - -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -# unixsocket /tmp/redis.sock -# unixsocketperm 700 -<% if @unixsocket %>unixsocket <%= @unixsocket %><% end %> -<% if @unixsocketperm %>unixsocketperm <%= @unixsocketperm %><% end %> - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout <%= @timeout %> - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 60 seconds. -tcp-keepalive <%= @tcp_keepalive %> - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel <%= @log_level %> - -# Specify the log file name. Also the empty string can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile <%= @_real_log_file %> - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -syslog-enabled <% if @syslog_enabled %>yes<% else %>no<% end %> - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -<% if @syslog_facility %>syslog-facility <%= @syslog_facility %><% else %># syslog-facility local0<% end %> - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases <%= @databases %> - -################################ SNAPSHOTTING ################################ -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving completely by commenting out all "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" -<% if @save_db_to_disk %> -<%- @save_db_to_disk_interval.sort_by{|k,v|k}.each do |seconds, key_change| -%> -save <%= seconds -%> <%= key_change -%> <%= "\n" -%> -<%- end -%> -<% end %> -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error <% if @stop_writes_on_bgsave_error -%>yes<% else -%>no<% end %> - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression <% if @rdbcompression -%>yes<% else -%>no<% end %> - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -<% if @dbfilename %>dbfilename <%= @dbfilename %><% else %># dbfilename dump.rdb<% end %> - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir <%= @workdir %> - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. A few things to understand ASAP about Redis replication. -# -# 1) Redis replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of slaves. -# 2) Redis slaves are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition slaves automatically try to reconnect to masters -# and resynchronize with them. -# -# slaveof -<% if @slaveof -%>slaveof <%= @slaveof %><% end -%> - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth -<% if @masterauth -%>masterauth <%= @masterauth %><% end -%> - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -slave-serve-stale-data <% if @slave_serve_stale_data -%>yes<% else -%>no<% end %> - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only <% if @slave_read_only -%>yes<% else -%>no<% end %> - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of slave. -# 2) Master timeout from the point of view of slaves (data, pings). -# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -repl-timeout <%= @repl_timeout %> - -# Disable TCP_NODELAY on the slave socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the slave side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay <% if @repl_disable_tcp_nodelay -%>yes<% else -%>no<% end -%> - -# Set the replication backlog size. The backlog is a buffer that accumulates -# slave data when slaves are disconnected for some time, so that when a slave -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the slave missed while -# disconnected. -# -# The bigger the replication backlog, the longer the time the slave can be -# disconnected and later be able to perform a partial resynchronization. -# -# The backlog is only allocated once there is at least a slave connected. -# -repl-backlog-size <%= @repl_backlog_size %> - -# After a master has no longer connected slaves for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last slave disconnected, for -# the backlog buffer to be freed. -# -# A value of 0 means to never release the backlog. -# -repl-backlog-ttl <%= @repl_backlog_ttl %> - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority <%= @slave_priority %> - -# It is possible for a master to stop accepting writes if there are less than -# N slaves connected, having a lag less or equal than M seconds. -# -# The N slaves need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the slave, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough slaves -# are available, to the specified number of seconds. -# -# For example to require at least 3 slaves with a lag <= 10 seconds use: -# -# min-slaves-to-write 3 -# min-slaves-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-slaves-to-write is set to 0 (feature disabled) and -# min-slaves-max-lag is set to 10. -min-slaves-to-write <%= @min_slaves_to_write %> -min-slaves-max-lag <%= @min_slaves_max_lag %> - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -<% if @requirepass -%>requirepass <%= @requirepass %><% end -%> - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -maxclients <%= @maxclients %> - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU cache, or to set -# a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave -# output buffers (but this is not needed if the policy is 'noeviction'). -# -# maxmemory -<% if @maxmemory -%>maxmemory <%= @maxmemory %><% end -%> - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# -# volatile-lru -> remove the key with an expire set using an LRU algorithm -# allkeys-lru -> remove any key according to the LRU algorithm -# volatile-random -> remove a random key with an expire set -# allkeys-random -> remove a random key, any key -# volatile-ttl -> remove the key with the nearest expire time (minor TTL) -# noeviction -> don't expire at all, just return an error on write operations -# -# Note: with any of the above policies, Redis will return an error on write -# operations, when there are no suitable keys for eviction. -# -# At the date of writing these commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -# maxmemory-policy volatile-lru -<% if @maxmemory_policy -%>maxmemory-policy <%= @maxmemory_policy %><% end -%> - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default Redis will check three keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -# maxmemory-samples 3 -<% if @maxmemory_samples -%>maxmemory-samples <%= @maxmemory_samples %><% end -%> - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly <% if @appendonly -%>yes<% else -%>no<% end -%> - -# The name of the append only file (default: "appendonly.aof") -appendfilename <%= @appendfilename %> - -# The fsync() call tells the Operating System to actually write data on disk -# instead of waiting for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log. Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -appendfsync <%= @appendfsync %> - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of Redis is -# the same as "appendfsync none". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. -no-appendfsync-on-rewrite <% if @no_appendfsync_on_rewrite -%>yes<% else -%>no<% end -%> - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. -auto-aof-rewrite-percentage <%= @auto_aof_rewrite_percentage %> -auto-aof-rewrite-min-size <%= @auto_aof_rewrite_min_size %> - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceeds the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write command was -# already issued by the script but the user doesn't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than <%= @slowlog_log_slower_than %> - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len <%= @slowlog_max_len %> - -############################# Event notification ############################## - -# Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that Redis will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events <% if @notify_keyspace_events -%><%= @notify_keyspace_events %><% else -%>""<% end -%> - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries <%= @hash_max_ziplist_entries %> -hash-max-ziplist-value <%= @hash_max_ziplist_value %> - -# Similarly to hashes, small lists are also encoded in a special way in order -# to save a lot of space. The special representation is only used when -# you are under the following limits: -list-max-ziplist-entries <%= @list_max_ziplist_entries %> -list-max-ziplist-value <%= @list_max_ziplist_value %> - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happen to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries <%= @set_max_intset_entries %> - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries <%= @zset_max_ziplist_entries %> -zset-max-ziplist-value <%= @zset_max_ziplist_value %> - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into a hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# actively rehash the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply from time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing <% if @activerehashing -%>yes<% else -%>no<% end -%> - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# slave -> slave clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave <%= @output_buffer_limit_slave %> -client-output-buffer-limit pubsub <%= @output_buffer_limit_pubsub %> - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform according to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz <%= @hz %> - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync <% if @aof_rewrite_incremental_fsync -%>yes<% else -%>no<% end -%> diff --git a/templates/redis.conf.3.2.erb b/templates/redis.conf.3.2.erb deleted file mode 100644 index 3c6a97c..0000000 --- a/templates/redis.conf.3.2.erb +++ /dev/null @@ -1,787 +0,0 @@ -# Redis configuration file example - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize <% if @daemonize -%>yes<% else -%>no<% end -%> - -# When running daemonized, Redis writes a pid file in /var/run/redis.pid by -# default. You can specify a custom pid file location here. -pidfile <%= @pid_file %> - -# Protected mode is a layer of security protection, in order to avoid that -# Redis instances left open on the internet are accessed and exploited. -# -# When protected mode is on and if: -# -# 1) The server is not binding explicitly to a set of addresses using the -# "bind" directive. -# 2) No password is configured. -# -# The server only accepts connections from clients connecting from the -# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain -# sockets. -# -# By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to Redis -# even if no authentication is configured, nor a specific set of interfaces -# are explicitly listed using the "bind" directive. -protected-mode <%= @protected_mode %> - -# Accept connections on the specified port, default is 6379. -# If port 0 is specified Redis will not listen on a TCP socket. -port <%= @port %> - -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog <%= @tcp_backlog %> - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -bind <%= @bind %> - -# Specify the path for the unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -<% if @unixsocket %>unixsocket <%= @unixsocket %><% end %> -<% if @unixsocketperm %>unixsocketperm <%= @unixsocketperm %><% end %> - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout <%= @timeout %> - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 60 seconds. -tcp-keepalive <%= @tcp_keepalive %> - -# Set server verbosity to 'debug' -# it can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel <%= @log_level %> - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile <%= @_real_log_file %> - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -syslog-enabled <% if @syslog_enabled %>yes<% else %>no<% end %> - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -<% if @syslog_facility %>syslog-facility <%= @syslog_facility %><% else %># syslog-facility local0<% end %> - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases <%= @databases %> - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" -<% if @save_db_to_disk %> -<%- @save_db_to_disk_interval.sort_by{|k,v|k}.each do |seconds, key_change| -%> -save <%= seconds -%> <%= key_change -%> <%= "\n" -%> -<%- end -%> -<% end %> -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error <% if @stop_writes_on_bgsave_error -%>yes<% else -%>no<% end %> - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression <% if @rdbcompression -%>yes<% else -%>no<% end %> - -# Since verison 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -<% if @dbfilename %>dbfilename <%= @dbfilename %><% else %># dbfilename dump.rdb<% end %> - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# Also the Append Only File will be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir <%= @workdir %> - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof -<% if @slaveof -%>slaveof <%= @slaveof %><% end -%> - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth -<% if @masterauth -%>masterauth <%= @masterauth %><% end -%> - -# When a slave loses the connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -slave-serve-stale-data <% if @slave_serve_stale_data -%>yes<% else -%>no<% end %> - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only <% if @slave_read_only -%>yes<% else -%>no<% end %> - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -repl-timeout <%= @repl_timeout %> - -# Disable TCP_NODELAY on the slave socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the slave side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay <% if @repl_disable_tcp_nodelay -%>yes<% else -%>no<% end -%> - -# Set the replication backlog size. The backlog is a buffer that accumulates -# slave data when slaves are disconnected for some time, so that when a slave -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the slave missed while -# disconnected. -# -# The bigger the replication backlog, the longer the time the slave can be -# disconnected and later be able to perform a partial resynchronization. -# -# The backlog is only allocated once there is at least a slave connected. -# -repl-backlog-size <%= @repl_backlog_size %> - -# After a master has no longer connected slaves for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last slave disconnected, for -# the backlog buffer to be freed. -# -# A value of 0 means to never release the backlog. -# -repl-backlog-ttl <%= @repl_backlog_ttl %> - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority <%= @slave_priority %> - -# It is possible for a master to stop accepting writes if there are less than -# N slaves connected, having a lag less or equal than M seconds. -# -# The N slaves need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the slave, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough slaves -# are available, to the specified number of seconds. -# -# For example to require at least 3 slaves with a lag <= 10 seconds use: -# -# min-slaves-to-write 3 -# min-slaves-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-slaves-to-write is set to 0 (feature disabled) and -# min-slaves-max-lag is set to 10. -min-slaves-to-write <%= @min_slaves_to_write %> -min-slaves-max-lag <%= @min_slaves_max_lag %> - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -<% if @requirepass -%>requirepass <%= @requirepass %><% end -%> - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# of hard to guess so that it will be still available for internal-use -# tools but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command renaming it into -# an empty string: -# -# rename-command CONFIG "" - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able ot configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -maxclients <%= @maxclients %> - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU cache, or to set -# an hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave -# output buffers (but this is not needed if the policy is 'noeviction'). -# -# maxmemory -<% if @maxmemory -%>maxmemory <%= @maxmemory %><% end -%> - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached? You can select among five behavior: -# -# volatile-lru -> remove the key with an expire set using an LRU algorithm -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# volatile-random -> remove a random key with an expire set -# allkeys-random -> remove a random key, any key -# volatile-ttl -> remove the key with the nearest expire time (minor TTL) -# noeviction -> don't expire at all, just return an error on write operations -# -# Note: with all the kind of policies, Redis will return an error on write -# operations, when there are not suitable keys for eviction. -# -# At the date of writing this commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -# maxmemory-policy volatile-lru -<% if @maxmemory_policy -%>maxmemory-policy <%= @maxmemory_policy %><% end -%> - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default Redis will check three keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -# maxmemory-samples 3 -<% if @maxmemory_samples -%>maxmemory-samples <%= @maxmemory_samples %><% end -%> - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly <% if @appendonly -%>yes<% else -%>no<% end -%> - -# The name of the append only file (default: "appendonly.aof") -appendfilename <%= @appendfilename %> - -# The fsync() call tells the Operating System to actually write data on disk -# instead to wait for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log . Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec" that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -appendfsync <%= @appendfsync %> - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving the durability of Redis is -# the same as "appendfsync none", that in practical terms means that it is -# possible to lost up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. -no-appendfsync-on-rewrite <% if @no_appendfsync_on_rewrite -%>yes<% else -%>no<% end -%> - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size will growth by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (or if no rewrite happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage <%= @auto_aof_rewrite_percentage %> -auto-aof-rewrite-min-size <%= @auto_aof_rewrite_min_size %> - -# An AOF file may be found to be truncated at the end during the Redis -# startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where Redis is running -# crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when Redis itself -# crashes or aborts but the operating system still works correctly). -# -# Redis can either exit with an error when this happens, or load as much -# data as possible (the default now) and start if the AOF file is found -# to be truncated at the end. The following option controls this behavior. -# -# If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the Redis server starts emitting a log to inform the user of the event. -# Otherwise if the option is set to no, the server aborts with an error -# and refuses to start. When the option is set to no, the user requires -# to fix the AOF file using the "redis-check-aof" utility before to restart -# the server. -# -# Note that if the AOF file will be found to be corrupted in the middle -# the server will still exit with an error. This option only applies when -# Redis will try to read more data from the AOF file but not enough bytes -# will be found. -aof-load-truncated <% if @aof_load_truncated -%>yes<% else -%>no<% end -%> - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than <%= @slowlog_log_slower_than %> - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len <%= @slowlog_max_len %> - -################################ LATENCY MONITOR ############################## - -# The Redis latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a Redis instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enalbed at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold <%= @latency_monitor_threshold %> - -############################# Event notification ############################## - -# Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that Redis will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events <% if @notify_keyspace_events -%><%= @notify_keyspace_events %><% else -%>""<% end -%> - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries <%= @hash_max_ziplist_entries %> -hash-max-ziplist-value <%= @hash_max_ziplist_value %> - -# Similarly to hashes, small lists are also encoded in a special way in order -# to save a lot of space. The special representation is only used when -# you are under the following limits: -list-max-ziplist-entries <%= @list_max_ziplist_entries %> -list-max-ziplist-value <%= @list_max_ziplist_value %> - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happens to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries <%= @set_max_intset_entries %> - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries <%= @zset_max_ziplist_entries %> -zset-max-ziplist-value <%= @zset_max_ziplist_value %> - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When an HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes <%= @hll_sparse_max_bytes %> - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into an hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# active rehashing the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply form time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing <% if @activerehashing -%>yes<% else -%>no<% end -%> - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients -# slave -> slave clients and MONITOR clients -# pubsub -> clients subcribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled just setting it to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave <%= @output_buffer_limit_slave %> -client-output-buffer-limit pubsub <%= @output_buffer_limit_pubsub %> - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform accordingly to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz <%= @hz %> - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync <% if @aof_rewrite_incremental_fsync -%>yes<% else -%>no<% end -%> - -# Redis Cluster Settings -<% if @cluster_enabled -%> -cluster-enabled yes -cluster-config-file <%= @cluster_config_file %> -cluster-node-timeout <%= @cluster_node_timeout %> -<% end -%> - - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis server but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# include /path/to/local.conf -# include /path/to/other.conf -<% if @extra_config_file -%> -include <%= @extra_config_file %> -<% end -%> diff --git a/templates/redis.conf.erb b/templates/redis.conf.erb index e06b7af..d395bb1 100644 --- a/templates/redis.conf.erb +++ b/templates/redis.conf.erb @@ -1,768 +1,797 @@ # Redis configuration file example # Note on units: when memory size is needed, it is possible to specify # it in the usual form of 1k 5GB 4M and so forth: # # 1k => 1000 bytes # 1kb => 1024 bytes # 1m => 1000000 bytes # 1mb => 1024*1024 bytes # 1g => 1000000000 bytes # 1gb => 1024*1024*1024 bytes # # units are case insensitive so 1GB 1Gb 1gB are all the same. # By default Redis does not run as a daemon. Use 'yes' if you need it. # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. daemonize <% if @daemonize -%>yes<% else -%>no<% end -%> # When running daemonized, Redis writes a pid file in /var/run/redis.pid by # default. You can specify a custom pid file location here. pidfile <%= @pid_file %> +<% if @supports_protected_mode -%> +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode <%= @protected_mode ? 'yes' : 'no' %> + +<% end -%> # Accept connections on the specified port, default is 6379. # If port 0 is specified Redis will not listen on a TCP socket. port <%= @port %> # TCP listen() backlog. # # In high requests-per-second environments you need an high backlog in order # to avoid slow clients connections issues. Note that the Linux kernel # will silently truncate it to the value of /proc/sys/net/core/somaxconn so # make sure to raise both the value of somaxconn and tcp_max_syn_backlog # in order to get the desired effect. tcp-backlog <%= @tcp_backlog %> -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -bind <%= @bind %> +<% unless @bind_arr.empty? -%> +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all the network interfaces available on the server. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +bind <%= @bind_arr.join(' ') %> +<% end -%> +<% unless @unixsocket.empty? -%> # Specify the path for the unix socket that will be used to listen for # incoming connections. There is no default, so Redis will not listen # on a unix socket when not specified. # -<% if @unixsocket %>unixsocket <%= @unixsocket %><% end %> -<% if @unixsocketperm %>unixsocketperm <%= @unixsocketperm %><% end %> +unixsocket <%= @unixsocket %> +<% unless @unixsocketperm.empty? %>unixsocketperm <%= @unixsocketperm %><% end -%> +<% end -%> # Close the connection after a client is idle for N seconds (0 to disable) timeout <%= @timeout %> # TCP keepalive. # # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence # of communication. This is useful for two reasons: # # 1) Detect dead peers. # 2) Take the connection alive from the point of view of network # equipment in the middle. # # On Linux, the specified value (in seconds) is the period used to send ACKs. # Note that to close the connection the double of the time is needed. # On other kernels the period depends on the kernel configuration. # # A reasonable value for this option is 60 seconds. tcp-keepalive <%= @tcp_keepalive %> # Set server verbosity to 'debug' # it can be one of: # debug (a lot of information, useful for development/testing) # verbose (many rarely useful info, but not a mess like the debug level) # notice (moderately verbose, what you want in production probably) # warning (only very important / critical messages are logged) loglevel <%= @log_level %> # Specify the log file name. Also 'stdout' can be used to force # Redis to log on the standard output. Note that if you use standard # output for logging but daemonize, logs will be sent to /dev/null logfile <%= @_real_log_file %> # To enable logging to the system logger, just set 'syslog-enabled' to yes, # and optionally update the other syslog parameters to suit your needs. syslog-enabled <% if @syslog_enabled %>yes<% else %>no<% end %> # Specify the syslog identity. # syslog-ident redis # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. <% if @syslog_facility %>syslog-facility <%= @syslog_facility %><% else %># syslog-facility local0<% end %> # Set the number of databases. The default database is DB 0, you can select # a different one on a per-connection basis using SELECT where # dbid is a number between 0 and 'databases'-1 databases <%= @databases %> ################################ SNAPSHOTTING ################################# # # Save the DB on disk: # # save # # Will save the DB if both the given number of seconds and the given # number of write operations against the DB occurred. # # In the example below the behaviour will be to save: # after 900 sec (15 min) if at least 1 key changed # after 300 sec (5 min) if at least 10 keys changed # after 60 sec if at least 10000 keys changed # # Note: you can disable saving at all commenting all the "save" lines. # # It is also possible to remove all the previously configured save # points by adding a save directive with a single empty string argument # like in the following example: # # save "" <% if @save_db_to_disk %> <%- @save_db_to_disk_interval.sort_by{|k,v|k}.each do |seconds, key_change| -%> save <%= seconds -%> <%= key_change -%> <%= "\n" -%> <%- end -%> <% end %> # By default Redis will stop accepting writes if RDB snapshots are enabled # (at least one save point) and the latest background save failed. # This will make the user aware (in a hard way) that data is not persisting # on disk properly, otherwise chances are that no one will notice and some # distater will happen. # # If the background saving process will start working again Redis will # automatically allow writes again. # # However if you have setup your proper monitoring of the Redis server # and persistence, you may want to disable this feature so that Redis will # continue to work as usual even if there are problems with disk, # permissions, and so forth. stop-writes-on-bgsave-error <% if @stop_writes_on_bgsave_error -%>yes<% else -%>no<% end %> # Compress string objects using LZF when dump .rdb databases? # For default that's set to 'yes' as it's almost always a win. # If you want to save some CPU in the saving child set it to 'no' but # the dataset will likely be bigger if you have compressible values or keys. rdbcompression <% if @rdbcompression -%>yes<% else -%>no<% end %> # Since verison 5 of RDB a CRC64 checksum is placed at the end of the file. # This makes the format more resistant to corruption but there is a performance # hit to pay (around 10%) when saving and loading RDB files, so you can disable it # for maximum performances. # # RDB files created with checksum disabled have a checksum of zero that will # tell the loading code to skip the check. rdbchecksum yes # The filename where to dump the DB <% if @dbfilename %>dbfilename <%= @dbfilename %><% else %># dbfilename dump.rdb<% end %> # The working directory. # # The DB will be written inside this directory, with the filename specified # above using the 'dbfilename' configuration directive. # # Also the Append Only File will be created inside this directory. # # Note that you must specify a directory here, not a file name. dir <%= @workdir %> ################################# REPLICATION ################################# # Master-Slave replication. Use slaveof to make a Redis instance a copy of # another Redis server. Note that the configuration is local to the slave # so for example it is possible to configure the slave to save the DB with a # different interval, or to listen to another port, and so on. # # slaveof <% if @slaveof -%>slaveof <%= @slaveof %><% end -%> # If the master is password protected (using the "requirepass" configuration # directive below) it is possible to tell the slave to authenticate before # starting the replication synchronization process, otherwise the master will # refuse the slave request. # # masterauth <% if @masterauth -%>masterauth <%= @masterauth %><% end -%> # When a slave loses the connection with the master, or when the replication # is still in progress, the slave can act in two different ways: # # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will # still reply to client requests, possibly with out of date data, or the # data set may just be empty if this is the first synchronization. # # 2) if slave-serve-stale data is set to 'no' the slave will reply with # an error "SYNC with master in progress" to all the kind of commands # but to INFO and SLAVEOF. # slave-serve-stale-data <% if @slave_serve_stale_data -%>yes<% else -%>no<% end %> # You can configure a slave instance to accept writes or not. Writing against # a slave instance may be useful to store some ephemeral data (because data # written on a slave will be easily deleted after resync with the master) but # may also cause problems if clients are writing to it because of a # misconfiguration. # # Since Redis 2.6 by default slaves are read-only. # # Note: read only slaves are not designed to be exposed to untrusted clients # on the internet. It's just a protection layer against misuse of the instance. # Still a read only slave exports by default all the administrative commands # such as CONFIG, DEBUG, and so forth. To a limited extend you can improve # security of read only slaves using 'rename-command' to shadow all the # administrative / dangerous commands. slave-read-only <% if @slave_read_only -%>yes<% else -%>no<% end %> # Slaves send PINGs to server in a predefined interval. It's possible to change # this interval with the repl_ping_slave_period option. The default value is 10 # seconds. # -# repl-ping-slave-period 10 +repl-ping-slave-period <%= @repl_ping_slave_period %> # The following option sets a timeout for both Bulk transfer I/O timeout and # master data or ping response timeout. The default value is 60 seconds. # # It is important to make sure that this value is greater than the value # specified for repl-ping-slave-period otherwise a timeout will be detected # every time there is low traffic between the master and the slave. # repl-timeout <%= @repl_timeout %> # Disable TCP_NODELAY on the slave socket after SYNC? # # If you select "yes" Redis will use a smaller number of TCP packets and # less bandwidth to send data to slaves. But this can add a delay for # the data to appear on the slave side, up to 40 milliseconds with # Linux kernels using a default configuration. # # If you select "no" the delay for data to appear on the slave side will # be reduced but more bandwidth will be used for replication. # # By default we optimize for low latency, but in very high traffic conditions # or when the master and slaves are many hops away, turning this to "yes" may # be a good idea. repl-disable-tcp-nodelay <% if @repl_disable_tcp_nodelay -%>yes<% else -%>no<% end -%> # Set the replication backlog size. The backlog is a buffer that accumulates # slave data when slaves are disconnected for some time, so that when a slave # wants to reconnect again, often a full resync is not needed, but a partial # resync is enough, just passing the portion of data the slave missed while # disconnected. # # The bigger the replication backlog, the longer the time the slave can be # disconnected and later be able to perform a partial resynchronization. # # The backlog is only allocated once there is at least a slave connected. # repl-backlog-size <%= @repl_backlog_size %> # After a master has no longer connected slaves for some time, the backlog # will be freed. The following option configures the amount of seconds that # need to elapse, starting from the time the last slave disconnected, for # the backlog buffer to be freed. # # A value of 0 means to never release the backlog. # repl-backlog-ttl <%= @repl_backlog_ttl %> # The slave priority is an integer number published by Redis in the INFO output. # It is used by Redis Sentinel in order to select a slave to promote into a # master if the master is no longer working correctly. # # A slave with a low priority number is considered better for promotion, so # for instance if there are three slaves with priority 10, 100, 25 Sentinel will # pick the one wtih priority 10, that is the lowest. # # However a special priority of 0 marks the slave as not able to perform the # role of master, so a slave with priority of 0 will never be selected by # Redis Sentinel for promotion. # # By default the priority is 100. slave-priority <%= @slave_priority %> # It is possible for a master to stop accepting writes if there are less than # N slaves connected, having a lag less or equal than M seconds. # # The N slaves need to be in "online" state. # # The lag in seconds, that must be <= the specified value, is calculated from # the last ping received from the slave, that is usually sent every second. # # This option does not GUARANTEE that N replicas will accept the write, but # will limit the window of exposure for lost writes in case not enough slaves # are available, to the specified number of seconds. # # For example to require at least 3 slaves with a lag <= 10 seconds use: # # min-slaves-to-write 3 # min-slaves-max-lag 10 # # Setting one or the other to 0 disables the feature. # # By default min-slaves-to-write is set to 0 (feature disabled) and # min-slaves-max-lag is set to 10. min-slaves-to-write <%= @min_slaves_to_write %> min-slaves-max-lag <%= @min_slaves_max_lag %> ################################## SECURITY ################################### # Require clients to issue AUTH before processing any other # commands. This might be useful in environments in which you do not trust # others with access to the host running redis-server. # # This should stay commented out for backward compatibility and because most # people do not need auth (e.g. they run their own servers). # # Warning: since Redis is pretty fast an outside user can try up to # 150k passwords per second against a good box. This means that you should # use a very strong password otherwise it will be very easy to break. # <% if @requirepass -%>requirepass <%= @requirepass %><% end -%> # Command renaming. # # It is possible to change the name of dangerous commands in a shared # environment. For instance the CONFIG command may be renamed into something # of hard to guess so that it will be still available for internal-use # tools but not available for general clients. # # Example: # # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 # # It is also possible to completely kill a command renaming it into # an empty string: # # rename-command CONFIG "" ################################### LIMITS #################################### # Set the max number of connected clients at the same time. By default # this limit is set to 10000 clients, however if the Redis server is not # able ot configure the process file limit to allow for the specified limit # the max number of allowed clients is set to the current file limit # minus 32 (as Redis reserves a few file descriptors for internal uses). # # Once the limit is reached Redis will close all the new connections sending # an error 'max number of clients reached'. # maxclients <%= @maxclients %> # Don't use more memory than the specified amount of bytes. # When the memory limit is reached Redis will try to remove keys # accordingly to the eviction policy selected (see maxmemmory-policy). # # If Redis can't remove keys according to the policy, or if the policy is # set to 'noeviction', Redis will start to reply with errors to commands # that would use more memory, like SET, LPUSH, and so on, and will continue # to reply to read-only commands like GET. # # This option is usually useful when using Redis as an LRU cache, or to set # an hard memory limit for an instance (using the 'noeviction' policy). # # WARNING: If you have slaves attached to an instance with maxmemory on, # the size of the output buffers needed to feed the slaves are subtracted # from the used memory count, so that network problems / resyncs will # not trigger a loop where keys are evicted, and in turn the output # buffer of slaves is full with DELs of keys evicted triggering the deletion # of more keys, and so forth until the database is completely emptied. # # In short... if you have slaves attached it is suggested that you set a lower # limit for maxmemory so that there is some free RAM on the system for slave # output buffers (but this is not needed if the policy is 'noeviction'). # # maxmemory <% if @maxmemory -%>maxmemory <%= @maxmemory %><% end -%> # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory # is reached? You can select among five behavior: # # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key accordingly to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations # # Note: with all the kind of policies, Redis will return an error on write # operations, when there are not suitable keys for eviction. # # At the date of writing this commands are: set setnx setex append # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby # getset mset msetnx exec sort # # The default is: # # maxmemory-policy volatile-lru <% if @maxmemory_policy -%>maxmemory-policy <%= @maxmemory_policy %><% end -%> # LRU and minimal TTL algorithms are not precise algorithms but approximated # algorithms (in order to save memory), so you can select as well the sample # size to check. For instance for default Redis will check three keys and # pick the one that was used less recently, you can change the sample size # using the following configuration directive. # # maxmemory-samples 3 <% if @maxmemory_samples -%>maxmemory-samples <%= @maxmemory_samples %><% end -%> ############################## APPEND ONLY MODE ############################### # By default Redis asynchronously dumps the dataset on disk. This mode is # good enough in many applications, but an issue with the Redis process or # a power outage may result into a few minutes of writes lost (depending on # the configured save points). # # The Append Only File is an alternative persistence mode that provides # much better durability. For instance using the default data fsync policy # (see later in the config file) Redis can lose just one second of writes in a # dramatic event like a server power outage, or a single write if something # wrong with the Redis process itself happens, but the operating system is # still running correctly. # # AOF and RDB persistence can be enabled at the same time without problems. # If the AOF is enabled on startup Redis will load the AOF, that is the file # with the better durability guarantees. # # Please check http://redis.io/topics/persistence for more information. appendonly <% if @appendonly -%>yes<% else -%>no<% end -%> # The name of the append only file (default: "appendonly.aof") appendfilename <%= @appendfilename %> # The fsync() call tells the Operating System to actually write data on disk # instead to wait for more data in the output buffer. Some OS will really flush # data on disk, some other OS will just try to do it ASAP. # # Redis supports three different modes: # # no: don't fsync, just let the OS flush the data when it wants. Faster. # always: fsync after every write to the append only log . Slow, Safest. # everysec: fsync only one time every second. Compromise. # # The default is "everysec" that's usually the right compromise between # speed and data safety. It's up to you to understand if you can relax this to # "no" that will let the operating system flush the output buffer when # it wants, for better performances (but if you can live with the idea of # some data loss consider the default persistence mode that's snapshotting), # or on the contrary, use "always" that's very slow but a bit safer than # everysec. # # More details please check the following article: # http://antirez.com/post/redis-persistence-demystified.html # # If unsure, use "everysec". appendfsync <%= @appendfsync %> # When the AOF fsync policy is set to always or everysec, and a background # saving process (a background save or AOF log background rewriting) is # performing a lot of I/O against the disk, in some Linux configurations # Redis may block too long on the fsync() call. Note that there is no fix for # this currently, as even performing fsync in a different thread will block # our synchronous write(2) call. # # In order to mitigate this problem it's possible to use the following option # that will prevent fsync() from being called in the main process while a # BGSAVE or BGREWRITEAOF is in progress. # # This means that while another child is saving the durability of Redis is # the same as "appendfsync none", that in practical terms means that it is # possible to lost up to 30 seconds of log in the worst scenario (with the # default Linux settings). # # If you have latency problems turn this to "yes". Otherwise leave it as # "no" that is the safest pick from the point of view of durability. no-appendfsync-on-rewrite <% if @no_appendfsync_on_rewrite -%>yes<% else -%>no<% end -%> # Automatic rewrite of the append only file. # Redis is able to automatically rewrite the log file implicitly calling # BGREWRITEAOF when the AOF log size will growth by the specified percentage. # # This is how it works: Redis remembers the size of the AOF file after the # latest rewrite (or if no rewrite happened since the restart, the size of # the AOF at startup is used). # # This base size is compared to the current size. If the current size is # bigger than the specified percentage, the rewrite is triggered. Also # you need to specify a minimal size for the AOF file to be rewritten, this # is useful to avoid rewriting the AOF file even if the percentage increase # is reached but it is still pretty small. # # Specify a percentage of zero in order to disable the automatic AOF # rewrite feature. auto-aof-rewrite-percentage <%= @auto_aof_rewrite_percentage %> auto-aof-rewrite-min-size <%= @auto_aof_rewrite_min_size %> # An AOF file may be found to be truncated at the end during the Redis # startup process, when the AOF data gets loaded back into memory. # This may happen when the system where Redis is running # crashes, especially when an ext4 filesystem is mounted without the # data=ordered option (however this can't happen when Redis itself # crashes or aborts but the operating system still works correctly). # # Redis can either exit with an error when this happens, or load as much # data as possible (the default now) and start if the AOF file is found # to be truncated at the end. The following option controls this behavior. # # If aof-load-truncated is set to yes, a truncated AOF file is loaded and # the Redis server starts emitting a log to inform the user of the event. # Otherwise if the option is set to no, the server aborts with an error # and refuses to start. When the option is set to no, the user requires # to fix the AOF file using the "redis-check-aof" utility before to restart # the server. # # Note that if the AOF file will be found to be corrupted in the middle # the server will still exit with an error. This option only applies when # Redis will try to read more data from the AOF file but not enough bytes # will be found. aof-load-truncated <% if @aof_load_truncated -%>yes<% else -%>no<% end -%> ################################ LUA SCRIPTING ############################### # Max execution time of a Lua script in milliseconds. # # If the maximum execution time is reached Redis will log that a script is # still in execution after the maximum allowed time and will start to # reply to queries with an error. # # When a long running script exceed the maximum execution time only the # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be # used to stop a script that did not yet called write commands. The second # is the only way to shut down the server in the case a write commands was # already issue by the script but the user don't want to wait for the natural # termination of the script. # # Set it to 0 or a negative value for unlimited execution without warnings. lua-time-limit 5000 ################################## SLOW LOG ################################### # The Redis Slow Log is a system to log queries that exceeded a specified # execution time. The execution time does not include the I/O operations # like talking with the client, sending the reply and so forth, # but just the time needed to actually execute the command (this is the only # stage of command execution where the thread is blocked and can not serve # other requests in the meantime). # # You can configure the slow log with two parameters: one tells Redis # what is the execution time, in microseconds, to exceed in order for the # command to get logged, and the other parameter is the length of the # slow log. When a new command is logged the oldest one is removed from the # queue of logged commands. # The following time is expressed in microseconds, so 1000000 is equivalent # to one second. Note that a negative number disables the slow log, while # a value of zero forces the logging of every command. slowlog-log-slower-than <%= @slowlog_log_slower_than %> # There is no limit to this length. Just be aware that it will consume memory. # You can reclaim memory used by the slow log with SLOWLOG RESET. slowlog-max-len <%= @slowlog_max_len %> ################################ LATENCY MONITOR ############################## # The Redis latency monitoring subsystem samples different operations # at runtime in order to collect data related to possible sources of # latency of a Redis instance. # # Via the LATENCY command this information is available to the user that can # print graphs and obtain reports. # # The system only logs operations that were performed in a time equal or # greater than the amount of milliseconds specified via the # latency-monitor-threshold configuration directive. When its value is set # to zero, the latency monitor is turned off. # # By default latency monitoring is disabled since it is mostly not needed # if you don't have latency issues, and collecting data has a performance # impact, that while very small, can be measured under big load. Latency # monitoring can easily be enalbed at runtime using the command # "CONFIG SET latency-monitor-threshold " if needed. latency-monitor-threshold <%= @latency_monitor_threshold %> ############################# Event notification ############################## # Redis can notify Pub/Sub clients about events happening in the key space. # This feature is documented at http://redis.io/topics/notifications # # For instance if keyspace events notification is enabled, and a client # performs a DEL operation on key "foo" stored in the Database 0, two # messages will be published via Pub/Sub: # # PUBLISH __keyspace@0__:foo del # PUBLISH __keyevent@0__:del foo # # It is possible to select the events that Redis will notify among a set # of classes. Every class is identified by a single character: # # K Keyspace events, published with __keyspace@__ prefix. # E Keyevent events, published with __keyevent@__ prefix. # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... # $ String commands # l List commands # s Set commands # h Hash commands # z Sorted set commands # x Expired events (events generated every time a key expires) # e Evicted events (events generated when a key is evicted for maxmemory) # A Alias for g$lshzxe, so that the "AKE" string means all the events. # # The "notify-keyspace-events" takes as argument a string that is composed # of zero or multiple characters. The empty string means that notifications # are disabled. # # Example: to enable list and generic events, from the point of view of the # event name, use: # # notify-keyspace-events Elg # # Example 2: to get the stream of the expired keys subscribing to channel # name __keyevent@0__:expired use: # # notify-keyspace-events Ex # # By default all notifications are disabled because most users don't need # this feature and the feature has some overhead. Note that if you don't # specify at least one of K or E, no events will be delivered. notify-keyspace-events <% if @notify_keyspace_events -%><%= @notify_keyspace_events %><% else -%>""<% end -%> ############################### ADVANCED CONFIG ############################### # Hashes are encoded using a memory efficient data structure when they have a # small number of entries, and the biggest entry does not exceed a given # threshold. These thresholds can be configured using the following directives. hash-max-ziplist-entries <%= @hash_max_ziplist_entries %> hash-max-ziplist-value <%= @hash_max_ziplist_value %> # Similarly to hashes, small lists are also encoded in a special way in order # to save a lot of space. The special representation is only used when # you are under the following limits: list-max-ziplist-entries <%= @list_max_ziplist_entries %> list-max-ziplist-value <%= @list_max_ziplist_value %> # Sets have a special encoding in just one case: when a set is composed # of just strings that happens to be integers in radix 10 in the range # of 64 bit signed integers. # The following configuration setting sets the limit in the size of the # set in order to use this special memory saving encoding. set-max-intset-entries <%= @set_max_intset_entries %> # Similarly to hashes and lists, sorted sets are also specially encoded in # order to save a lot of space. This encoding is only used when the length and # elements of a sorted set are below the following limits: zset-max-ziplist-entries <%= @zset_max_ziplist_entries %> zset-max-ziplist-value <%= @zset_max_ziplist_value %> # HyperLogLog sparse representation bytes limit. The limit includes the # 16 bytes header. When an HyperLogLog using the sparse representation crosses # this limit, it is converted into the dense representation. # # A value greater than 16000 is totally useless, since at that point the # dense representation is more memory efficient. # # The suggested value is ~ 3000 in order to have the benefits of # the space efficient encoding without slowing down too much PFADD, # which is O(N) with the sparse encoding. The value can be raised to # ~ 10000 when CPU is not a concern, but space is, and the data set is # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. hll-sparse-max-bytes <%= @hll_sparse_max_bytes %> # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in # order to help rehashing the main Redis hash table (the one mapping top-level # keys to values). The hash table implementation Redis uses (see dict.c) # performs a lazy rehashing: the more operation you run into an hash table # that is rehashing, the more rehashing "steps" are performed, so if the # server is idle the rehashing is never complete and some more memory is used # by the hash table. # # The default is to use this millisecond 10 times every second in order to # active rehashing the main dictionaries, freeing memory when possible. # # If unsure: # use "activerehashing no" if you have hard latency requirements and it is # not a good thing in your environment that Redis can reply form time to time # to queries with 2 milliseconds delay. # # use "activerehashing yes" if you don't have such hard requirements but # want to free memory asap when possible. activerehashing <% if @activerehashing -%>yes<% else -%>no<% end -%> # The client output buffer limits can be used to force disconnection of clients # that are not reading data from the server fast enough for some reason (a # common reason is that a Pub/Sub client can't consume messages as fast as the # publisher can produce them). # # The limit can be set differently for the three different classes of clients: # # normal -> normal clients # slave -> slave clients and MONITOR clients # pubsub -> clients subcribed to at least one pubsub channel or pattern # # The syntax of every client-output-buffer-limit directive is the following: # # client-output-buffer-limit # # A client is immediately disconnected once the hard limit is reached, or if # the soft limit is reached and remains reached for the specified number of # seconds (continuously). # So for instance if the hard limit is 32 megabytes and the soft limit is # 16 megabytes / 10 seconds, the client will get disconnected immediately # if the size of the output buffers reach 32 megabytes, but will also get # disconnected if the client reaches 16 megabytes and continuously overcomes # the limit for 10 seconds. # # By default normal clients are not limited because they don't receive data # without asking (in a push way), but just after a request, so only # asynchronous clients may create a scenario where data is requested faster # than it can read. # # Instead there is a default limit for pubsub and slave clients, since # subscribers and slaves receive data in a push fashion. # # Both the hard or the soft limit can be disabled just setting it to zero. client-output-buffer-limit normal 0 0 0 client-output-buffer-limit slave <%= @output_buffer_limit_slave %> client-output-buffer-limit pubsub <%= @output_buffer_limit_pubsub %> # Redis calls an internal function to perform many background tasks, like # closing connections of clients in timeout, purging expired keys that are # never requested, and so forth. # # Not all tasks are performed with the same frequency, but Redis checks for # tasks to perform accordingly to the specified "hz" value. # # By default "hz" is set to 10. Raising the value will use more CPU when # Redis is idle, but at the same time will make Redis more responsive when # there are many keys expiring at the same time, and timeouts may be # handled with more precision. # # The range is between 1 and 500, however a value over 100 is usually not # a good idea. Most users should use the default of 10 and raise this up to # 100 only in environments where very low latency is required. hz <%= @hz %> # When a child rewrites the AOF file, if the following option is enabled # the file will be fsync-ed every 32 MB of data generated. This is useful # in order to commit the file to the disk more incrementally and avoid # big latency spikes. aof-rewrite-incremental-fsync <% if @aof_rewrite_incremental_fsync -%>yes<% else -%>no<% end -%> # Redis Cluster Settings <% if @cluster_enabled -%> cluster-enabled yes cluster-config-file <%= @cluster_config_file %> cluster-node-timeout <%= @cluster_node_timeout %> +cluster-slave-validity-factor <%= @cluster_slave_validity_factor %> +cluster-require-full-coverage <% if @cluster_require_full_coverage -%>yes<% else -%>no<% end %> +cluster-migration-barrier <%= @cluster_migration_barrier %> <% end -%> ################################## INCLUDES ################################### # Include one or more other config files here. This is useful if you # have a standard template that goes to all Redis server but also need # to customize a few per-server settings. Include files can include # other files, so use this wisely. # # include /path/to/local.conf # include /path/to/other.conf <% if @extra_config_file -%> include <%= @extra_config_file %> <% end -%> diff --git a/templates/service_templates/redis.service.erb b/templates/service_templates/redis.service.erb index 1898e35..88c7470 100644 --- a/templates/service_templates/redis.service.erb +++ b/templates/service_templates/redis.service.erb @@ -1,15 +1,25 @@ [Unit] -Description=Advanced key-value store for <%= @title %> +Description=Redis Advanced key-value store for instance <%= @title %> After=network.target +After=network-online.target +Wants=network-online.target [Service] +RuntimeDirectory=redis +RuntimeDirectoryMode=2755 +<%# Redis on Xenial is too old for systemd integration -%> +<% if @facts['os']['name'] == 'Ubuntu' and @facts['os']['release']['major'] == '16.04' -%> Type=forking ExecStart=/usr/bin/redis-server <%= @redis_file_name %> -ExecStop=/usr/bin/redis-server -p <%= @port %> shutdown -TimeoutStopSec=0 +<% else -%> +Type=notify +ExecStart=/usr/bin/redis-server <%= @redis_file_name %> --supervised systemd +<% end -%> +ExecStop=/usr/bin/redis-cli -p <%= @port %> shutdown Restart=always User=<%= @service_user %> Group=<%= @service_user %> +LimitNOFILE=<%= @ulimit %> [Install] WantedBy=multi-user.target diff --git a/types/loglevel.pp b/types/loglevel.pp new file mode 100644 index 0000000..49fd38e --- /dev/null +++ b/types/loglevel.pp @@ -0,0 +1,7 @@ +# @summary Specify the server verbosity level. +# This can be one of: +# * debug (a lot of information, useful for development/testing) +# * verbose (many rarely useful info, but not a mess like the debug level) +# * notice (moderately verbose, what you want in production probably) +# * warning (only very important / critical messages are logged) +type Redis::LogLevel = Enum['debug', 'verbose', 'notice', 'warning'] diff --git a/types/redisurl.pp b/types/redisurl.pp new file mode 100644 index 0000000..9bd4fff --- /dev/null +++ b/types/redisurl.pp @@ -0,0 +1 @@ +type Redis::RedisUrl = Pattern[/(^redis:\/\/)/]