Page Menu
Home
Software Heritage
Search
Configure Global Search
Log In
Files
F8393454
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
170 KB
Subscribers
None
View Options
diff --git a/data/defaults.yaml b/data/defaults.yaml
index c770bd3b..8e18dfdc 100644
--- a/data/defaults.yaml
+++ b/data/defaults.yaml
@@ -1,4266 +1,4268 @@
---
### See also defaults_security.yaml for public key/cert fingerprint blocks
###
dns::local_cache: true
dns::nameservers:
- 127.0.0.1
dns::search_domains:
- internal.softwareheritage.org
- softwareheritage.org
dns::forward_zones:
'internal.softwareheritage.org.': "%{alias('dns::local_nameservers')}"
'100.168.192.in-addr.arpa.': "%{alias('dns::local_nameservers')}"
'101.168.192.in-addr.arpa.': "%{alias('dns::local_nameservers')}"
'200.168.192.in-addr.arpa.': "%{alias('dns::local_nameservers')}"
'internal.staging.swh.network.': "%{alias('dns::local_nameservers')}"
'128.168.192.in-addr.arpa.': "%{alias('dns::local_nameservers')}"
# dns::forwarders per-location. No Default value
# dns::local_nameservers per-location. No Default value
# ntp::servers per-location. Default value:
ntp::servers:
- 0.debian.pool.ntp.org
- 1.debian.pool.ntp.org
- 2.debian.pool.ntp.org
- 3.debian.pool.ntp.org
sudo::configs: {}
# smtp::relay_hostname is per-location. Default value:
smtp::relay_hostname: 'pergamon.internal.softwareheritage.org'
smtp::relayhost: "[%{lookup('smtp::relay_hostname')}]"
smtp::mydestination:
- "%{::fqdn}"
smtp::mynetworks:
- 127.0.0.0/8
- "[::ffff:127.0.0.0]/104"
- "[::1]/128"
smtp::relay_destinations: []
smtp::virtual_aliases: []
smtp::mail_aliases:
- user: anlambert
aliases:
- antoine.lambert33@gmail.com
- user: ardumont
aliases:
- antoine.romain.dumont@gmail.com
- user: ddouard
aliases:
- david.douard@sdfa3.org
- user: olasd
aliases:
- nicolas+swhinfra@dandrimont.eu
- user: morane
aliases:
- morane.gg@gmail.com
- user: postgres
aliases:
- root
- user: rdicosmo
aliases:
- roberto@dicosmo.org
- user: root
aliases:
- olasd
- zack
- ardumont
- user: seirl
aliases:
- antoine.pietri1@gmail.com
- user: swhstorage
aliases:
- root
- user: swhworker
aliases:
- zack
- olasd
- ardumont
- user: swhdeposit
aliases:
- ardumont
- user: zack
aliases:
- zack@upsilon.cc
- user: vlorentz
aliases:
- valentin.lorentz@inria.fr
- user: haltode
aliases:
- haltode@gmail.com
- user: danseraf
aliases:
- me@danieleserafini.eu
locales::default_locale: C.UTF-8
locales::installed_locales:
- C.UTF-8 UTF-8
- en_US.UTF-8 UTF-8
- fr_FR.UTF-8 UTF-8
- it_IT.UTF-8 UTF-8
timezone: Etc/UTC
packages:
- acl
- etckeeper
- git
- htop
- ipython3
- molly-guard
- moreutils
- ncdu
- nfs-common
- python3
- ruby-filesystem
- strace
- tmux
- vim
- zsh
- fish
- zstd
packages::desktop:
- autojump
- chromium
- curl
- emacs
- ethtool
- gnome
- i3
- ii
- libx11-dev
- mosh
- myrepos
- net-tools
- ruby-dev
- rxvt-unicode-256color
- screen
- scrot
- tcpdump
- tree
- vim-nox
- weechat
- weechat-scripts
packages::devel:
- arcanist
- elpa-magit
- git-email
- gitg
- gitk
- ltrace
- perl-doc
packages::devel::debian:
- devscripts
- dpkg-dev
- reprepro
- sbuild
packages::devel::python:
- graphviz
- make
- python3-arrow
- python3-azure-storage
- python3-blinker
- python3-celery
- python3-cffi
- python3-click
- python3-dateutil
- python3-dev
- python3-dulwich
- python3-flake8
- python3-flask
- python3-flask-api
- python3-flask-limiter
- python3-flask-testing
- python3-libcloud
- python3-msgpack
- python3-nose
- python3-psycopg2
- python3-pygit2
- python3-requests
- python3-retrying
- python3-sphinx
- python3-subvertpy
- python3-vcversioner
- python3-venv
- python3-wheel
packages::devel::broker:
- rabbitmq-server
packages::devel::postgres:
- apgdiff
- barman
- check-postgres
- libpq-dev
- postgresql
- postgresql-autodoc
- postgresql-client
- postgresql-contrib
- postgresql-doc
- postgresql-plpython3-11
users:
root:
uid: 0
full_name:
shell: /bin/bash
groups: []
authorized_keys:
root@louvre:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAABAQDMLEWHlUQldlvZs5rg0y42lRNAfOhD+6pmO8a73DzpJWHTqvAlfteLpU78IPjSacB4dO5ish1E/1RX/HC+Bt8p2v4RBqbCnVLx2w+Hx4ahWu6qbeTVmTz+U++1SQrHnL08fSlhT0OekCw0lRZM2sQq21FZi6+vul97Ecikag4Xaw6Qfumylu94pM3t05uzTUlKk1+6VMCjhT8dlSe8VS8OirVQpE/OqYtTMAWtQaMXGHPCsqDdYRAKzkJ8GjH7ydZmX5VCRyqS0RvPKAlcJfLCs5HBtv0u5rbeGtiHhuzhj/j3YgS/6NJOC2mUfcetcDOMPLnhkKpnF0vUAzTsJ7aR
root@banco:
type: ssh-ed25519
key: AAAAC3NzaC1lZDI1NTE5AAAAIDcljv9eR52wJsu9yYan6/riIQw70lQuyz+Qt0XpGXMs
zack:
uid: 1000
full_name: Stefano Zacchiroli
shell: /usr/bin/zsh
groups:
- adm
- swhdev
- swhstorage
- swhscheduler
- swhdeploy
- sudo
- gitorious
- swhteam
authorized_keys:
zack-software-heritage:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAACAQDU0O8tkUqtQCelLEatOGfGpx1sIwHPSMA+7OdXoZjZG5pT9Sfgf3ITsNgo1iYWge5bpH/TKhhvf20B05fa8cCEE5ULaD+xdV9eTIvBEaCiP36HH33WNl/UV8T8klTG2sqBXUgLMJuinfGkuRJ977ndm7mjNwzl3Ghf6JwKfpHrvob4GLc0hm54yzcnNEzQZLcdxmOCWdwTINKnL+W/DDM8NR3vNF6T5+xaiLJzsS0IGcTubklugD3m05qbswS/uACWys3FzRM8tttw/0wCRrC9SCSKoDLonab5y3Ld6vCj1k12J2RAHSqJYwVCm70JRPWZcmU67Udi6kbqkJMftp04K0pplu8V7RLPrpwLyH4sPx7Kkhslvxqj0rerLPOkoDkqneFgxNoMcxN5ayod7fBJAq5jQUmGozeTtgPLKybnxRDhsYpkEH9paZroQ3CqDsA0dptOpedVpcQUSbiLMaYd8kgCPkVIdKANnTGGXDcTfWv21IvFx6sKm1kld2Me3ExVMq7JFcmXutF/IQom9F4vj/xd/7Lt4KmqZKyiAq4n5iaPIRUbZvmwd2D6umOHpMGlqKwtsiWRUYnAVvhRfuSZmgrGgliYiYr+vU2xeWe+XXQhP9vt3eItmdSp/8/+a2lqaIE9slE75hEI2n8in7DeSn6QhFDbyUKwZz5OwK7QVw==
olasd:
uid: 1001
full_name: Nicolas Dandrimont
shell: /bin/bash
groups:
- adm
- swhdev
- swhstorage
- swhscheduler
- swhdeploy
- sudo
- gitorious
- swhteam
authorized_keys:
nicolasd@darboux:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAABAQDZ1TCpfzrvxLhEMhxjbxqPDCwY0nazIr1cyIbhGD2bUdAbZqVMdNtr7MeDnlLIKrIPJWuvltauvLNkYU0iLc1jMntdBCBM3hgXjmTyDtc8XvXseeBp5tDqccYNR/cnDUuweNcL5tfeu5kzaAg3DFi5Dsncs5hQK5KQ8CPKWcacPjEk4ir9gdFrtKG1rZmg/wi7YbfxrJYWzb171hdV13gSgyXdsG5UAFsNyxsKSztulcLKxvbmDgYbzytr38FK2udRk7WuqPbtEAW1zV4yrBXBSB/uw8EAMi+wwvLTwyUcEl4u0CTlhREljUx8LhYrsQUCrBcmoPAmlnLCD5Q9XrGH
ardumont:
uid: 1003
full_name: Antoine R. Dumont
shell: /usr/bin/zsh
groups:
- adm
- swhdev
- swhstorage
- swhscheduler
- swhdeploy
- sudo
- gitorious
- swhteam
authorized_keys:
eniotna.t@gmail.com:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAABAQDZarzgHrzUYspvrgSI6fszrALo92BDys7QOkJgUfZa9t9m4g7dUANNtwBiqIbqijAQPmB1zKgG6QTZC5rJkRy6KqXCW/+Qeedw/FWIbuI7jOD5WxnglbEQgvPkkB8kf1xIF7icRfWcQmK2je/3sFd9yS4/+jftNMPPXkBCxYm74onMenyllA1akA8FLyujLu6MNA1D8iLLXvz6pBDTT4GZ5/bm3vSE6Go8Xbuyu4SCtYZSHaHC2lXZ6Hhi6dbli4d3OwkUWz+YhFGaEra5Fx45Iig4UCL6kXPkvL/oSc9KGerpT//Xj9qz1K7p/IrBS8+eA4X69bHYYV0UZKDADZSn
ardumont@louvre:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAABAQC0Xj8nwGWTb6VGFNIrlhVTLX6VFTlvpirjdgOTOz8riRxBTS9ra35g3cz8zfDl0iVyE455GXzxlm33w/uu3DX0jQOIzkcoEBRw+T33EK89lo6tCCd9xQrteWCTNR1ZBFloHSnYk2m7kw9kyrisziyAdULsCrXmMd3BH1oJyEpISA+sv/dtVpIOWdEQmkbLmdHl2uEdjBLjqb3BtAp2oJZMmppE5YjAx0Aa1+7uSnURf7NnwMx+0wTDMdfqn8z4wqI8eQny+B+bqLH9kY++52FfMVALuErGh5+75/vtd2xzRQamjKsBlTGjFFbMRagZiVNLDX2wtdudhNmnQDIKA+rH
swhworker:
uid: 1004
full_name: SWH Worker Acccount
shell: /bin/bash
groups:
- swhdeploy
- gitorious
swhstorage:
uid: 1005
full_name: SWH Storage Account
shell: /bin/bash
groups:
- swhdeploy
- swhstorage
swhwebapp:
uid: 1006
full_name: SWH Web App Account
shell: /bin/bash
groups: []
swhbackup:
uid: 1007
full_name: SWH Backup Account
shell: /bin/bash
groups: []
rdicosmo:
uid: 1008
full_name: Roberto Di Cosmo
shell: /bin/bash
groups:
- swhteam
authorized_keys:
dicosmo@voyager:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAACAQC5aS/3Cps2Ru9EW+nIF9Z9o6/xq1thwtCgpIjSPgcrm2BVisj6xbD5OOapS3U6BpLKjWZG8sMGBCsJJ3S1cP0s2I+xHFToqCcbfOxIe/tq/UgTtxGJ0+TfUKNoD+QJjIKnjyC+HVEQm5bSm8mJv0vptj4On8yNopytSGuLcFHHnMB2t+IOkHnTW7n3emhh3SZKAcpI1h7WvPqsqBobMFDMeqvGeHaH2AM2OSoUi7AY+MmcVL0Je6QtJqpz60QI5dvaM4AsobC12AZSJKXnuqQTY6nJy4r9jPRK8RUqo5PuAAsNtlxf5xA4s1LrDR5PxBDpYz47Pq2LHtI9Hgf/SFB3IqZeBKqquMI1xThRBwP307/vOtTiwJr4ZKcpOH+SbU7Tnde4n8siM719QZM8VITtrbwm/VBiEwvhGC/23npX4S55W7Et/l9gmeP3Q+lSw50vBuQhBSn7BzedPM1CqbTN/zqM8TCDUtPVIo+6b2s5ao/Vcq9vBXm5bP0xZeNsqsCl05zpCShudKpT6AlMGAaRTd6NUHHsf4D1JjNx3v42R3vQr6OgHELVMGECuyPs3zWHOS/P6AdD0yJTSOMaklRh2HGN8uj0+aQ7RhnrkYqRfhN+6UkrTANuxdb44AGdLmBAKIYglVrAJe+DEji/LzJdZ22baAWg4ar/WikpFJtxkw==
swhteamannex:
uid: 1009
full_name: SWH Team Git Annex Account
shell: /bin/bash
groups:
- swhteam
authorized_keys:
swhteamannex@louvre:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAACAQDL/Ef9kktq/QkJ0lohan8ObQ3o7hMf7EOQPjO+u7UhIMjBNufJnaftQRGIA6N1/wEsDvxxNNz75/oJECJHgZs2OOTJJPsKfYeybmSBocSa/sn6IKK7/b/qlwHJlSGWPGVgbtfP0KexlSAKAmKZuJyqjES5igTLV5w4wTjvUUen9QyefuUehnCX3MJhTpoyixp7icXE80aNMaCPwHZppKb/28sNlPX3MbSONGM45wSFRXNuj0mAAjrgojkhAqFNnI9oKNAt9mDcw1hV0n86VvrDhEbMCJP/z58ecn376BgyXQ8zNUPIr2g0SrSPUNjfxZHfJ0XYpF7624wOMOmZE3fsQzZh+FeMF0IFRPvcG34RaelV9dXiy+/q45oqwbMF464gFSbyt++4jpgqHdsARM4zR//jBhyLvFXR+GaKC3hFENno5W5Raff4XE5rzN/q9jVJBNfvfuEPWrapyM3A/ePeuK3SyNJwyIx+bOEQXsRdxEWKszTeJO2SLPWtCrKrC+G4/HktQSQOj5S9a+N6HoKD8E889eBEYoeZGPIuzMot4cuUlyPt3P99z4oRIaeC6XwUCvZCD2DaTAkQWQMsmOn+soaeZ1zBHbsCBbV0mBMRx7K4Vjs62vhSelryQAXW+cBgd6+f5XBjOnNhHQhsNsDfYP4Kmztn58faQV2TzGG5ow==
swhscheduler:
uid: 1010
full_name: SWH Scheduler Account
shell: /bin/bash
groups:
- swhscheduler
jbertran:
uid: 2001
full_name: Jordi Bertran de Balanda
shell: /bin/false
groups: []
password: "!"
qcampos:
uid: 2002
full_name: Quentin Campos
shell: /bin/false
groups: []
password: "!"
gitorious:
uid: 5000
full_name: Gitorious System User
shell: /bin/false
groups:
- gitorious
fiendish:
uid: 1011
full_name: Avi Kelman
shell: /bin/false
groups: []
password: "!"
morane:
uid: 1012
full_name: Morane Otilia Gruenpeter
shell: /bin/bash
groups:
- swhdev
- swhstorage
- swhteam
authorized_keys:
morane.gg@gmail.com:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAABAQDm8kH1pP+4ENKmpkTCkL2ashxxnOFVndGrcvfX05lV1hOo2NdItpdoR9txIgFEs3d7v73mtH4nWciUyaK7FIByrtvsR2TIhdVgEcb0Xai8viV+sDMTndpiNlWNilbfxm0K70tgpG4BeSWRJy8cPxnCR9CWoB2Vo9Df7lDKz1LXDgfY4VLJd69ahf1DPFUDjpWIEQdPFX2ZyGUYM+0yPXIoyYW/qreDt1JkYZXXVbRAV8j44/TVgTRYJLgYb9ThW6WzlGM1S4uP7GQdAuROCcspqW3ahV/UmV4Z9SM6S34NN182KvM0Ve7uxAPQz+IdWOgZTK0pvd+hfjHKbLSTA6I3
seirl:
uid: 1013
full_name: Antoine Pietri
shell: /usr/bin/zsh
groups:
- swhdev
- swhstorage
- swhteam
- swhdeploy
authorized_keys:
seirl:
type: ssh-ed25519
key: AAAAC3NzaC1lZDI1NTE5AAAAILiua8eEg+nU0XSbYPTgnOMftzvpbN+u7v5jDabeO/0E
ssushant:
uid: 1014
full_name: Sushant
shell: /bin/false
groups: []
password: "!"
anlambert:
uid: 1015
full_name: Antoine Lambert
shell: /bin/bash
groups:
- swhdev
- swhstorage
- swhteam
- swhdeploy
- swhwebapp
authorized_keys:
antoine.lambert@inria.fr:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAACAQDLWPcZnSUszEedMa39dT3ZCHpRod3NTs6WT4OfMMRVahrhTtWYdSiNGy8U3kEQveTZvMrb9WLtLPB3K8o7Xrf8WCI8iTOl9eb9DVjE9XL+zS0ZAcEmoZ5YH8e3gEDoDm8ZrMxF+V5XSlvhNi6kbWzJdqhXu++bJHHqGrKUHeTCQCfpYYMrsnvhPjtxe+90BK7e+IGm1Ha8LZMCCmOtz0XggxD8d2mFBaP2p8v9xsM48KfwFvsRMb3TZIaO/+NcsRSTe7wfFAR1pb14pi5LZAHeb2tpWfVH2vQGaE7Rej+Ycf4UOeaRmFGpimw7u7fugvDvKfZ/vs7w7Qs2RtxNdqJf9JM+vvi78OQbloufot1Tz2r19aDbhM9nsCn+Uo3rNfkmD+UcSMKrRJCMEXVBbaY/bgzs7XoqCJ8ODE2U/dF3NtHBZr+CB52iilUtemXy+Xwqw4TSs/r9vW7/XueTdb0Yp/cUs5uLCqCwlMpGS5okorpdJextp5gRuN6EMlUo6PffRiz5T0CqKm1xJu0NeT0EaacAXoGTDQaS4pIQGglqWfAOmjej9dM8gxAF6rgrx70uJt6Hy18tvzdB5iwJ4F2LUjcZhFnrxjUDzhjPoDBiRtPNgEKrCc30OHsveqXwMPo3v/d3np1Vpkum0JEwmp83q92P5T2rbf+wiruxZhhtww==
grouss:
uid: 1016
full_name: Guillaume Rousseau
shell: /bin/bash
groups:
- swhteam
authorized_keys:
guillaume.rousseau@univ-paris-diderot.fr:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Akcdxrod/MFcHg53dCf7iZY/ph9MR0tWU08pjMDfU04j1nAgmHmvumYbxBtFNnd0nu4A9YY4nT79273PCE3c6ba3zSGS9DBYhrASGDqHBECrgEREM3YPXpA2NI0FKEZ878Ic3CQlYaOmRoe/QkFpm2j8CMoG4VdKp0EcvV1RCTgWqJY1P4KC30CJUg+OdGRaaqHEoSskjstU5yjbZCC9M90Hz0xO+MsMl/xKdcDmvwbLDMtp/3SKDQeyN4Q7Uu/zZwoZ8FmgEU4Xp7nKN3yCiEB9rqMkP/lLY71hTPHn/GiZnPo4rWL13w3unuI3X0GDpqxPxjt0LZN4xQEGEn+1
ftigeot:
uid: 1017
full_name: Francois Tigeot
shell: /bin/false
password: "!"
groups: []
swhdeposit:
uid: 1018
full_name: SWH Deposit App Account
shell: /bin/bash
groups:
- swhscheduler
swhvault:
uid: 1019
full_name: SWH Vault Account
shell: /bin/bash
groups:
- swhdeploy
- swhstorage
- swhvault
ddouard:
uid: 1020
full_name: David Douard
shell: /bin/bash
groups:
- swhdev
- swhteam
- swhscheduler
authorized_keys:
david.douard@sdfa3.org:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAACAQCoON7De2Bx03owpZfzbOyucZTmyQdm7F+LP4D4H9EyOFxtyMpjH2S9Ve/JvMoFIWGQQlXSkYzRv63Z0BzPLKD2NsYgomcjOLdw1Baxnv8VOH+Q01g4B3cabcP2LMVjerHt/KRkY3E6dnKLQGE5UiER/taQ7KazAwvu89nUd4BJsV43rJ3X3DtFEfH3lR4ZEIgFyPUkVemQAjBhueFmN3w8debOdr7t9cBpnYvYKzLQN+G/kQVFc+fgs+fFOtOv+Az9kTXChfLs5pKPBm+MuGxz4gS3fPiAjY9cN6vGzr7ZNkCRUSUjJ10Hlm7Gf2EN8f+k6iSR4CPeixDcZ+scbCg4dCORqTsliSQzUORIJED9fbUR6bBjF4rRwm5GvnXx5ZTToWDJu0PSHYOkomqffp30wqvAvs6gLb+bG1daYsOLp+wYru3q09J9zUAA8vNXoWYaERFxgwsmsf57t8+JevUuePJGUC45asHjQh/ON1H5PDXtULmeD1GKkjqyaS7SBNbpOWgQb21l3pwhLet3Mq3TJmxVqzGMDnYvQMUCkiPdZq2pDplzfpDpOKLaDg8q82rR5+/tAfB4P2Z9RCOqnMLRcQk9AluTyO1D472Mkp+v5VA4di0eTWZ0tuzwYJEft0OVo+QOVTslCGsyGiEUoOcHzkrdgsT5uQziyAfgTMSuiw==
vlorentz:
uid: 1021
full_name: Valentin Lorentz
shell: /usr/bin/zsh
groups:
- swhdev
- swhteam
authorized_keys:
valentin.lorentz@inria.fr:
type: ssh-ed25519
key: AAAAC3NzaC1lZDI1NTE5AAAAILsRMQjrrfUjX1ka9e6YlyMyDvTC+qk5a21Fp9yXYI7p
vlorentz@softwareheritage.org:
type: ssh-ed25519
key: AAAAC3NzaC1lZDI1NTE5AAAAIIjJoY4XBTTNsxLVF/sUKBI4WGR2AIiR9qfMdspnsRfJ
haltode:
uid: 1022
full_name: Thibault Allancon
shell: /usr/bin/zsh
groups:
- swhdev
- swhteam
authorized_keys:
haltode@gmail.com:
type: ssh-ed25519
key: AAAAC3NzaC1lZDI1NTE5AAAAIORGwY56PpvgwMWqDei718PPriV6U7LL5JMPJWS7zTcg
danseraf:
uid: 1023
full_name: Daniel Serafini
groups:
- swhdev
shell: /usr/bin/fish
authorized_keys:
me@danieleserafini.eu:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAABgQDsQ1QxD/xHOOhCAarH9o3oAfT7YCwxXCAVaazxC1ZMygWZUE95oMu6D2Wib5Q4GBKD35ddclY/l5GsYB5uXbl1UxAFUPe4COqTEt+7deMSWakv46ceb5oHxOkMAF4w400FV1Pi1usk2TpArarOPuxN7yKu54sBACI5HEezn3KOvxPYt/DUAt+XdrfLsiZPyzjOYYezocCV+O1PkivhC99cXHtlwTBRntWTjlyUt9p46U6Uf2G9u88v4v2KopH0sQG7nAYNXN7W14pB925fnDYDHFYUoKDCBbJiQMMKKlQxJZLSfh/KX6kX7OIXOiMclIfmBYAUwDHU3MevdczAIj8JWspUVyRf32B1jnD+H8UnDTCwApCFytcvzDuoYCgiUVk4bDpJOHeb8V4dh6UZt0DFa4iiLsVIX8MjcqaI5TmmvaYgjGPSTLlUsasUJnVqCweQNTiZPhDiqrs258aQvSGf634k10lxmhAAB0xAGVO6Zj1mBjqS+XDJiLzf4mWwm4M=
groups:
adm:
gid: 4 # assigned from base-files
zack:
gid: 1000
olasd:
gid: 1001
ardumont:
gid: 1003
ddouard:
gid: 1020
swhworker:
gid: 1004
swhdev:
gid: 1002
swhstorage:
gid: 1005
swhdeploy:
gid: 1006
swhbackup:
gid: 1007
swhwebapp:
gid: 1008
swhteam:
gid: 1009
swhscheduler:
gid: 1010
sudo:
gid: 27 # assigned from base-files
gitorious:
gid: 5000
swhdeposit:
gid: 1018
swhvault:
gid: 1019
gunicorn::statsd::host: 127.0.0.1:8125
munin::master::hostname: munin.internal.softwareheritage.org
rabbitmq::monitoring::user: swhdev
# following password key in private data
# - rabbitmq::monitoring::password
# - swh::deploy::worker::task_broker::password
# - swh::deploy::scheduler::task_broker::password
rabbitmq::server::users:
- name: "%{hiera('rabbitmq::monitoring::user')}"
is_admin: true
password: "%{hiera('rabbitmq::monitoring::password')}"
tags: []
- name: swhconsumer
is_admin: false
password: "%{hiera('swh::deploy::worker::task_broker::password')}"
tags: []
- name: swhproducer
is_admin: false
password: "%{hiera('swh::deploy::scheduler::task_broker::password')}"
tags:
- management
puppet::master::hostname: pergamon.internal.softwareheritage.org
puppet::master::puppetdb: pergamon.internal.softwareheritage.org
puppetdb::master::config::terminus_package: puppet-terminus-puppetdb
strict_transport_security::max_age: 15768000
php::version: '7.3'
# Those variables get picked up by 'include ::php::fpm::daemon'
php::fpm::daemon::log_owner: www-data
php::fpm::daemon::log_group: adm
php::fpm::daemon::log_dir_mode: '0750'
# Those variables get picked up by 'include ::apache'
apache::server_tokens: 'Prod'
apache::server_signature: 'Off'
apache::trace_enable: 'Off'
# Those variables get picked up by 'include ::apache::mod::passenger'
apache::mod::passenger::passenger_root: /usr/lib/ruby/vendor_ruby/phusion_passenger/locations.ini
# Those variables need to be set manually in the SSL vhosts.
apache::ssl_protocol: all -SSLv2 -SSLv3 -TLSv1 -TLSv1.1
apache::ssl_honorcipherorder: 'On'
apache::ssl_cipher: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
apache::hsts_header: "set Strict-Transport-Security \"max-age=%{hiera('strict_transport_security::max_age')}\""
# Those variables need to be set manually for all vhosts
apache::http_port: 80
apache::https_port: 443
# Hitch TLS proxy configuration
hitch::frontend: "[*]:10443"
hitch::proxy_support: false
hitch::http2_support: false
# Varnish configuration
varnish::http_port: 10080
varnish::proxy_port: 6081
varnish::http2_support: false
varnish::listen:
- ":%{hiera('varnish::http_port')}"
- "[::1]:%{hiera('varnish::proxy_port')},PROXY"
varnish::backend_http_port: "%{hiera('apache::http_port')}"
varnish::admin_listen: 127.0.0.1
varnish::admin_port: 6082
varnish::storage_type: malloc
varnish::storage_size: 256m
varnish::storage_file: /var/lib/varnish/varnish_storage.bin
# varnish::secret in private-data
letsencrypt::account_email: sysop+letsencrypt@softwareheritage.org
letsencrypt::server: https://acme-v02.api.letsencrypt.org/directory
letsencrypt::gandi_livedns_hook::config:
gandi_api: https://dns.api.gandi.net/api/v5/
zones:
softwareheritage.org:
api_key: "%{alias('gandi::softwareheritage_org::api_key')}"
sharing_id: "%{alias('gandi::softwareheritage_org::sharing_id')}"
swh.network:
api_key: "%{alias('gandi::softwareheritage_org::api_key')}"
sharing_id: "%{alias('gandi::swh_network::sharing_id')}"
letsencrypt::gandi_paas_hook::config:
gandi_xmlrpc: https://rpc.gandi.net/xmlrpc/
zone_keys:
softwareheritage.org: "%{alias('gandi::softwareheritage_org::xmlrpc_key')}"
letsencrypt::certificates::exported_directory: "%{::puppet_vardir}/letsencrypt_exports"
letsencrypt::certificates::directory: /etc/ssl/certs/letsencrypt
letsencrypt::certificates:
archive_production:
domains:
- archive.softwareheritage.org
- base.softwareheritage.org
- archive.internal.softwareheritage.org
- webapp0.softwareheritage.org
archive_staging:
domains:
- webapp.staging.swh.network
- webapp.internal.staging.swh.network
deposit_production:
domains:
- deposit.softwareheritage.org
- deposit.internal.softwareheritage.org
deposit_staging:
domains:
- deposit.staging.swh.network
- deposit.internal.staging.swh.network
stats_export:
domains:
- stats.export.softwareheritage.org
- pergamon.softwareheritage.org
jenkins:
domains:
- jenkins.softwareheritage.org
sentry:
domains:
- sentry.softwareheritage.org
keycloak:
domains:
- idp.softwareheritage.org
kafka01.euwest.azure.internal.softwareheritage.org:
domains:
- kafka01.euwest.azure.internal.softwareheritage.org
- kafka01.euwest.azure.softwareheritage.org
kafka02.euwest.azure.internal.softwareheritage.org:
domains:
- kafka02.euwest.azure.internal.softwareheritage.org
- kafka02.euwest.azure.softwareheritage.org
kafka03.euwest.azure.internal.softwareheritage.org:
domains:
- kafka03.euwest.azure.internal.softwareheritage.org
- kafka03.euwest.azure.softwareheritage.org
kafka04.euwest.azure.internal.softwareheritage.org:
domains:
- kafka04.euwest.azure.internal.softwareheritage.org
- kafka04.euwest.azure.softwareheritage.org
kafka05.euwest.azure.internal.softwareheritage.org:
domains:
- kafka05.euwest.azure.internal.softwareheritage.org
- kafka05.euwest.azure.softwareheritage.org
kafka06.euwest.azure.internal.softwareheritage.org:
domains:
- kafka06.euwest.azure.internal.softwareheritage.org
- kafka06.euwest.azure.softwareheritage.org
www-dev:
domains:
- www-dev.softwareheritage.org
deploy_hook: gandi_paas
www:
domains:
- softwareheritage.org
- www.softwareheritage.org
deploy_hook: gandi_paas
gandi-redirects:
domains:
- softwareheritage.org
- sponsors.softwareheritage.org
- sponsorship.softwareheritage.org
- testimonials.softwareheritage.org
deploy_hook: gandi_paas
bind::update_key: local-update
bind::zones:
internal.softwareheritage.org:
domain: internal.softwareheritage.org
100.168.192.in-addr.arpa:
domain: 100.168.192.in-addr.arpa
101.168.192.in-addr.arpa:
domain: 101.168.192.in-addr.arpa
internal.staging.swh.network:
domain: internal.staging.swh.network
128.168.192.in-addr.arpa:
domain: 128.168.192.in-addr.arpa
200.168.192.in-addr.arpa:
domain: 200.168.192.in-addr.arpa
201.168.192.in-addr.arpa:
domain: 201.168.192.in-addr.arpa
202.168.192.in-addr.arpa:
domain: 202.168.192.in-addr.arpa
203.168.192.in-addr.arpa:
domain: 203.168.192.in-addr.arpa
204.168.192.in-addr.arpa:
domain: 204.168.192.in-addr.arpa
205.168.192.in-addr.arpa:
domain: 205.168.192.in-addr.arpa
206.168.192.in-addr.arpa:
domain: 206.168.192.in-addr.arpa
207.168.192.in-addr.arpa:
domain: 207.168.192.in-addr.arpa
# Defaults for secondary bind server
bind::zones::type: slave
bind::zones::masters:
- 192.168.100.29
bind::zones::allow_transfers:
- 192.168.100.0/24
- 192.168.101.0/24
- 192.168.200.22
bind::zones::default_data:
zone_type: "%{alias('bind::zones::type')}"
dynamic: true
masters: "%{alias('bind::zones::masters')}"
transfer_source: ''
allow_updates: []
update_policies: ''
allow_transfers: "%{alias('bind::zones::allow_transfers')}"
dnssec: false
key_directory: ''
ns_notify: true
also_notify: ''
allow_notify: ''
forwarders: ''
forward: ''
source: ''
ns_records:
- pergamon.internal.softwareheritage.org.
- ns0.euwest.azure.internal.softwareheritage.org.
bind::resource_records:
archive/CNAME:
type: CNAME
record: archive.internal.softwareheritage.org
data: moma.internal.softwareheritage.org.
db/CNAME:
type: CNAME
record: db.internal.softwareheritage.org
data: belvedere.internal.softwareheritage.org.
debian/CNAME:
type: CNAME
record: debian.internal.softwareheritage.org
data: pergamon.internal.softwareheritage.org.
backup/CNAME:
type: CNAME
record: backup.internal.softwareheritage.org
data: banco.internal.softwareheritage.org.
icinga/CNAME:
type: CNAME
record: icinga.internal.softwareheritage.org
data: pergamon.internal.softwareheritage.org.
faitout/CNAME:
type: CNAME
record: faitout.internal.softwareheritage.org
data: prado.internal.softwareheritage.org.
graph/CNAME:
type: CNAME
record: graph.internal.softwareheritage.org
data: granet.internal.softwareheritage.org.
logstash/CNAME:
type: CNAME
record: logstash.internal.softwareheritage.org
data: logstash0.internal.softwareheritage.org.
kibana/CNAME:
type: CNAME
record: kibana.internal.softwareheritage.org
data: banco.internal.softwareheritage.org.
rabbitmq/CNAME:
type: CNAME
record: rabbitmq.internal.softwareheritage.org
data: saatchi.internal.softwareheritage.org.
staging-gateway/A: # Cannot autogenerate due to limitations in $::swh_hostname fact
record: staging-gateway.internal.softwareheritage.org
data: 192.168.100.125
# VPN hosts
zack/A:
record: zack.internal.softwareheritage.org
data: 192.168.101.6
olasd/A:
record: olasd.internal.softwareheritage.org
data: 192.168.101.10
ardumont/A:
record: ardumont.internal.softwareheritage.org
data: 192.168.101.14
ardumont-desktop/A:
record: ardumont-desktop.internal.softwareheritage.org
data: 192.168.101.158
rdicosmo/A:
record: rdicosmo.internal.softwareheritage.org
data: 192.168.101.38
petitpalais/A:
record: petitpalais.internal.softwareheritage.org
data: 192.168.101.154
grand-palais/A:
record: grand-palais.internal.softwareheritage.org
data: 192.168.101.62
grandpalais/CNAME:
type: CNAME
record: grandpalais.internal.softwareheritage.org
data: grand-palais.internal.softwareheritage.org.
giverny/A:
type: A
record: giverny.internal.softwareheritage.org
data: 192.168.101.118
orangeriedev/A:
type: A
record: orangeriedev.internal.softwareheritage.org
data: 192.168.101.130
orangerie/A:
type: A
record: orangerie.internal.softwareheritage.org
data: 192.168.101.142
ddouard-desktop/A:
record: ddouard-desktop.internal.softwareheritage.org
data: 192.168.101.162
vlorentz-desktop/A:
record: vlorentz-desktop.internal.softwareheritage.org
data: 192.168.101.166
bind::resource_records::default_data:
type: A
bind::clients:
- 192.168.100.0/24
- 192.168.101.0/24
- 192.168.200.0/21
- 127.0.0.0/8
- '::1/128'
bind::autogenerate:
192.168.100.0/24: .internal.softwareheritage.org
192.168.200.0/21: .internal.softwareheritage.org
192.168.128.0/24: .internal.staging.swh.network
backups::legacy_storage: /srv/backups
backups::enable: true
backups::base: /
backups::exclude:
- dev
- proc
- run
- srv/backups
- srv/db-backups
- srv/elasticsearch
- srv/remote-backups
- srv/softwareheritage/objects
- srv/softwareheritage/postgres
- srv/softwareheritage/scratch
- srv/softwareheritage/scratch.2TB
- srv/storage
- sys
- tmp
- var/cache
- var/lib/mysql
- var/log/journal
- var/run
- var/tmp
phabricator::basepath: /srv/phabricator
phabricator::user: phabricator
phabricator::vcs_user: git
phabricator::notification::client_host: 127.0.0.1
phabricator::notification::client_port: 22280
phabricator::notification::listen: "%{hiera('phabricator::notification::client_host')}:%{hiera('phabricator::notification::client_port')}"
phabricator::mysql::database_prefix: phabricator
phabricator::mysql::username: phabricator
phabricator::mysql::conf::max_allowed_packet: 33554432
phabricator::mysql::conf::sql_mode: STRICT_ALL_TABLES
phabricator::mysql::conf::ft_stopword_file: "%{hiera('phabricator::basepath')}/phabricator/resources/sql/stopwords.txt"
phabricator::mysql::conf::ft_min_word_len: 3
phabricator::mysql::conf::ft_boolean_syntax: "' |-><()~*:\"\"&^'"
phabricator::mysql::conf::innodb_buffer_pool_size: 4G
phabricator::mysql::conf::innodb_file_per_table: TRUE
phabricator::mysql::conf::innodb_flush_method: O_DIRECT
phabricator::mysql::conf::innodb_log_file_size: 1G
phabricator::mysql::conf::max_connections: 16384
phabricator::php::fpm_listen: 127.0.0.1:9001
phabricator::php::max_file_size: 128M
phabricator::php::opcache_validate_timestamps: 0
phabricator::vhost::name: forge.softwareheritage.org
phabricator::vhost::docroot: "%{hiera('phabricator::basepath')}/phabricator/webroot"
phabricator::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
phabricator::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
phabricator::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
phabricator::vhost::hsts_header: "%{hiera('apache::hsts_header')}"
mediawiki::php::fpm_listen: 127.0.0.1:9002
mediawiki::vhosts:
intranet.softwareheritage.org:
swh_logo: /images/9/99/Swh-intranet-logo.png
mysql:
username: mw_intranet
dbname: mediawiki_intranet
aliases: []
site_name: Software Heritage Intranet
wiki.softwareheritage.org:
swh_logo: /images/b/b2/Swh-logo.png
mysql:
username: mw_public
dbname: mediawiki_public
aliases: []
site_name: Software Heritage Wiki
mediawiki::vhost::docroot: /var/lib/mediawiki
mediawiki::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
mediawiki::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
mediawiki::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
mediawiki::vhost::hsts_header: "%{hiera('apache::hsts_header')}"
annex::basepath: /srv/softwareheritage/annex
annex::vhost::name: annex.softwareheritage.org
annex::vhost::docroot: "%{hiera('annex::basepath')}/webroot"
annex::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
annex::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
annex::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
annex::vhost::hsts_header: "%{hiera('apache::hsts_header')}"
docs::basepath: /srv/softwareheritage/docs
docs::vhost::name: docs.softwareheritage.org
docs::vhost::docroot: "%{hiera('docs::basepath')}/webroot"
docs::vhost::docroot_owner: "jenkins-push-docs"
docs::vhost::docroot_group: "www-data"
docs::vhost::docroot_mode: "2755"
docs::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
docs::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
docs::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
docs::vhost::hsts_header: "%{hiera('apache::hsts_header')}"
ssh::port: 22
ssh::permitrootlogin: without-password
swh::base_directory: /srv/softwareheritage
swh::conf_directory: /etc/softwareheritage
swh::log_directory: /var/log/softwareheritage
swh::global_conf::file: "%{hiera('swh::conf_directory')}/global.ini"
swh::global_conf::contents: |
# Managed by puppet (class profile::swh) - modifications will be overwritten
[main]
log_db =
swh::apt_config::swh_repository::hostname: debian.softwareheritage.org
swh::apt_config::swh_repository: "https://%{hiera('swh::apt_config::swh_repository::hostname')}/"
swh::apt_config::enable_non_free: false
swh::apt_config::backported_packages:
stretch:
# For swh.scheduler
- python3-msgpack
# T1609
- python3-urllib3
- python3-requests
- python3-chardet
- python3-idna
debian_repository::basepath: "%{hiera('swh::base_directory')}/repository"
debian_repository::owner: swhdebianrepo
debian_repository::owner::homedir: /home/swhdebianrepo
debian_repository::group: swhdev
debian_repository::mode: "02775"
debian_repository::ssh_authorized_keys:
nicolasd@darboux:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAABAQDZ1TCpfzrvxLhEMhxjbxqPDCwY0nazIr1cyIbhGD2bUdAbZqVMdNtr7MeDnlLIKrIPJWuvltauvLNkYU0iLc1jMntdBCBM3hgXjmTyDtc8XvXseeBp5tDqccYNR/cnDUuweNcL5tfeu5kzaAg3DFi5Dsncs5hQK5KQ8CPKWcacPjEk4ir9gdFrtKG1rZmg/wi7YbfxrJYWzb171hdV13gSgyXdsG5UAFsNyxsKSztulcLKxvbmDgYbzytr38FK2udRk7WuqPbtEAW1zV4yrBXBSB/uw8EAMi+wwvLTwyUcEl4u0CTlhREljUx8LhYrsQUCrBcmoPAmlnLCD5Q9XrGH
jenkins@thyssen:
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAABAQCrfYnl8v4QK1ClkPMHO4WiPqgLVoOGpOPFUvg3WehMo8xMQ9e/EeZddQn96mhHkbbC5HCWEVK1VwafpIeadaMHnypdGhpapncYPpoKItxmf1IwVtlt/h8OYai5pTMCgkuOHjhnQdO20Amr9WMkoRZ/K7v/GijIZ6svvgWiYKfDnu0s1ziFYIT5rEA5hL9SqNJTlKdy2H68/7mmTii9NpBsGWQYDOjcrwELNOI5EUgQSOzmeKxecPkABfh/dezp6jmrv/2x7bm7LT46d+rnVDqVRiUrLVnLhrZCmZDxXfbEmftTdAoK8U/wjLreanRxKOc7arYRyKu0RbAaejPejzgR
debian_repository::gpg_keys:
# olasd
- 791F12396630DD71FD364375B8E5087766475AAF
# zack
- 4900707DDC5C07F2DECB02839C31503C6D866396
# ardumont
- BF00203D741AC9D546A8BE0752E2E9840D10C3B8
# anlambert
- 91FAF3F5CDE011E4FDF4CBF2D026E5C2F802586D
# seirl
- 225CD9E3FA9374BDF6E057042F8984858B1A9945
# vlorentz
- 379043E3DF96D3237E6782AC0E082B40E4376B1E
# ddouard
- 7DC7325EF1A6226AB6C3D7E32388A3BF6F0A6938
# jenkins-debian1
- 1F4BDC445E30C7066324D7B3D7D3329147AE3148
debian_repository::vhost::name: "%{hiera('swh::apt_config::swh_repository::hostname')}"
debian_repository::vhost::aliases:
- debian.internal.softwareheritage.org
debian_repository::vhost::docroot: "%{hiera('debian_repository::basepath')}"
debian_repository::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
debian_repository::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
debian_repository::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
debian_repository::vhost::hsts_header: "%{hiera('apache::hsts_header')}"
swh::apt_config::debian_mirror::hostname: deb.debian.org
swh::apt_config::debian_mirror: "http://%{hiera('swh::apt_config::debian_mirror::hostname')}/debian/"
swh::apt_config::debian_security_mirror::hostname: "%{hiera('swh::apt_config::debian_mirror::hostname')}"
swh::apt_config::debian_security_mirror: "http://%{hiera('swh::apt_config::debian_mirror::hostname')}/debian-security/"
swh::apt_config::azure_repository::hostname: debian-archive.trafficmanager.net
swh::apt_config::azure_repository: "http://%{hiera('swh::apt_config::azure_repository::hostname')}/debian-azure/"
swh::apt_config::unattended_upgrades: true
swh::apt_config::unattended_upgraes::origins:
- "o=Debian,n=%{::lsbdistcodename}" # main Debian archive
- "o=Debian,n=%{::lsbdistcodename}-updates" # stable-updates (ex-volatile)
- "o=Debian,n=%{::lsbdistcodename},l=Debian-Security" # security updates
- "o=debian icinga-%{::lsbdistcodename},n=icinga-%{::lsbdistcodename}" # Icinga2 repository
- "o=Debian Azure,n=%{::lsbdistcodename}" # Debian Azure
- "o=Proxmox,n=%{::lsbdistcodename}" # Proxmox repository
- "o=packages.sury.org" # PHP backports (tate)
#####################################################################################################
# Remote service configurations
# Default ports
swh::remote_service::storage::port: 5002
swh::remote_service::objstorage::port: 5003
swh::remote_service::webapp::port: 5004
swh::remote_service::vault::port: 5005
swh::remote_service::deposit::port: 5006
swh::remote_service::indexer::port: 5007
swh::remote_service::scheduler::port: 5008
swh::remote_service::search::port: 5010
# Default backend services. Override in specific sites if needed. Configurations
# are split between read-only (the default) and writable storages. In most cases
# overrides should only happen for read-only services.
swh::remote_service::objstorage::config: "%{alias('swh::remote_service::objstorage::config::azure_readonly_with_fallback')}"
swh::remote_service::objstorage::config::writable: "%{alias('swh::remote_service::objstorage::config::uffizi')}"
swh::remote_service::objstorage::config_as_dict:
banco: "%{alias('swh::remote_service::objstorage::config::banco')}"
uffizi: "%{alias('swh::remote_service::objstorage::config::uffizi')}"
azure: "%{alias('swh::remote_service::objstorage::config::azure')}"
swh::remote_service::storage::config: "%{alias('swh::remote_service::storage::config::uffizi')}"
swh::remote_service::storage::config::writable: &swh_remote_service_storage_config_writable
"%{alias('swh::remote_service::storage::config::uffizi')}"
swh::remote_service::indexer::config: "%{alias('swh::remote_service::indexer::config::uffizi')}"
swh::remote_service::indexer::config::writable: "%{alias('swh::remote_service::indexer::config::uffizi')}"
swh::remote_service::scheduler::config: "%{alias('swh::remote_service::scheduler::config::saatchi')}"
swh::remote_service::scheduler::config::writable: "%{alias('swh::remote_service::scheduler::config::saatchi')}"
swh::remote_service::vault::config: "%{alias('swh::remote_service::vault::config::azure')}"
swh::remote_service::vault::config::writable: "%{alias('swh::remote_service::vault::config::azure')}"
# Pipeline storage with retry, filter, buffer and finally writable storage
swh::deploy::worker::storage::pipeline:
cls: retry
args:
storage:
cls: filter
args:
storage:
cls: buffer
args:
storage: "%{alias('swh::remote_service::storage::config::writable')}"
min_batch_size:
content: 1000
content_bytes: 52428800 # 50 MB
directory: 1000
revision: 1000
release: 1000
# Objstorage backend configurations
swh::remote_service::objstorage::config::azure: &swh_objstorage_config_azure
cls: azure-prefixed
args:
accounts:
"0":
account_name: 0euwestswh
api_secret_key: "%{hiera('swh::azure::credentials::0euwestswh')}"
container_name: contents
"1":
account_name: 1euwestswh
api_secret_key: "%{hiera('swh::azure::credentials::1euwestswh')}"
container_name: contents
"2":
account_name: 2euwestswh
api_secret_key: "%{hiera('swh::azure::credentials::2euwestswh')}"
container_name: contents
"3":
account_name: 3euwestswh
api_secret_key: "%{hiera('swh::azure::credentials::3euwestswh')}"
container_name: contents
"4":
account_name: 4euwestswh
api_secret_key: "%{hiera('swh::azure::credentials::4euwestswh')}"
container_name: contents
"5":
account_name: 5euwestswh
api_secret_key: "%{hiera('swh::azure::credentials::5euwestswh')}"
container_name: contents
"6":
account_name: 6euwestswh
api_secret_key: "%{hiera('swh::azure::credentials::6euwestswh')}"
container_name: contents
"7":
account_name: 7euwestswh
api_secret_key: "%{hiera('swh::azure::credentials::7euwestswh')}"
container_name: contents
"8":
account_name: 8euwestswh
api_secret_key: "%{hiera('swh::azure::credentials::8euwestswh')}"
container_name: contents
"9":
account_name: 9euwestswh
api_secret_key: "%{hiera('swh::azure::credentials::9euwestswh')}"
container_name: contents
"a":
account_name: aeuwestswh
api_secret_key: "%{hiera('swh::azure::credentials::aeuwestswh')}"
container_name: contents
"b":
account_name: beuwestswh
api_secret_key: "%{hiera('swh::azure::credentials::beuwestswh')}"
container_name: contents
"c":
account_name: ceuwestswh
api_secret_key: "%{hiera('swh::azure::credentials::ceuwestswh')}"
container_name: contents
"d":
account_name: deuwestswh
api_secret_key: "%{hiera('swh::azure::credentials::deuwestswh')}"
container_name: contents
"e":
account_name: eeuwestswh
api_secret_key: "%{hiera('swh::azure::credentials::eeuwestswh')}"
container_name: contents
"f":
account_name: feuwestswh
api_secret_key: "%{hiera('swh::azure::credentials::feuwestswh')}"
container_name: contents
swh::remote_service::objstorage::config::azure::readonly:
cls: filtered
args:
storage_conf: "%{alias('swh::remote_service::objstorage::config::azure')}"
filters_conf:
- type: readonly
swh::remote_service::objstorage::config::uffizi: &swh_objstorage_config_uffizi
cls: remote
args:
url: "http://uffizi.internal.softwareheritage.org:%{hiera('swh::remote_service::objstorage::port')}/"
swh::remote_service::objstorage::config::uffizi::readonly:
cls: filtered
args:
storage_conf: "%{alias('swh::remote_service::objstorage::config::uffizi')}"
filters_conf:
- type: readonly
swh::remote_service::objstorage::config::banco: &swh_objstorage_config_banco
cls: remote
args:
url: "http://banco.internal.softwareheritage.org:%{hiera('swh::remote_service::objstorage::port')}/"
swh::remote_service::objstorage::config::banco::readonly:
cls: filtered
args:
storage_conf: "%{alias('swh::remote_service::objstorage::config::banco')}"
filters_conf:
- type: readonly
swh::remote_service::objstorage::config::azure_readonly_with_fallback: &swh_azure_readonly_with_fallback
cls: multiplexer
args:
objstorages:
- "%{alias('swh::remote_service::objstorage::config::azure::readonly')}"
- "%{alias('swh::remote_service::objstorage::config::banco::readonly')}"
- "%{alias('swh::remote_service::objstorage::config::uffizi::readonly')}"
swh::remote_service::objstorage::config::localhost:
cls: remote
args:
url: "http://127.0.0.1:%{hiera('swh::remote_service::objstorage::port')}/"
# Storage backend configurations
swh::remote_service::storage::config::uffizi:
cls: remote
args:
url: "http://uffizi.internal.softwareheritage.org:%{hiera('swh::remote_service::storage::port')}/"
swh::remote_service::storage::config::azure:
cls: remote
args:
url: "http://storage01.euwest.azure.internal.softwareheritage.org:%{hiera('swh::remote_service::storage::port')}/"
swh::remote_service::storage::config::cassandra:
cls: remote
args:
url: "http://storage02.euwest.azure.internal.softwareheritage.org:%{hiera('swh::remote_service::storage::port')}/"
swh::remote_service::storage::config::localhost:
cls: remote
args:
url: "http://localhost:%{hiera('swh::remote_service::storage::port')}/"
swh::remote_service::search::config::empty: {}
swh::remote_service::search::config::storage0:
cls: remote
args:
url: "http://storage01.euwest.azure.internal.softwareheritage.org:%{hiera('swh::remote_service::search::port')}/"
swh::remote_service::search::config: "%{alias('swh::remote_service::search::config::empty')}"
# Indexer backend configurations
swh::remote_service::indexer::config::uffizi:
cls: remote
args:
url: "http://uffizi.internal.softwareheritage.org:%{hiera('swh::remote_service::indexer::port')}/"
swh::remote_service::indexer::config::azure:
cls: remote
args:
url: "http://storage01.euwest.azure.internal.softwareheritage.org:%{hiera('swh::remote_service::indexer::port')}/"
# Scheduler backend configurations
swh::remote_service::scheduler::config::saatchi:
cls: remote
args:
url: "http://saatchi.internal.softwareheritage.org:%{hiera('swh::remote_service::scheduler::port')}/"
# Vault backend configurations
swh::remote_service::vault::config::azure:
cls: remote
args:
url: "http://vangogh.euwest.azure.internal.softwareheritage.org:%{hiera('swh::remote_service::vault::port')}/"
# End remote service configurations
#####################################################################################################
swh::deploy::db::pgbouncer::port: 5432
swh::deploy::db::main::port: 5433
swh::deploy::db::secondary::port: 5434
swh::deploy::db::hdd::port: 5435
swh::deploy::db::pgbouncer::user::login: postgres
pgbouncer::config_params:
logfile: /var/log/postgresql/pgbouncer.log
pidfile: /var/run/postgresql/pgbouncer.pid
unix_socket_dir: /var/run/postgresql
client_tls_sslmode: allow
client_tls_ca_file: /etc/ssl/certs/ssl-cert-snakeoil.pem
client_tls_key_file: /etc/ssl/private/ssl-cert-snakeoil.key
client_tls_cert_file: /etc/ssl/certs/ssl-cert-snakeoil.pem
server_tls_sslmode: allow
listen_port: "%{hiera('swh::deploy::db::pgbouncer::port')}"
listen_addr:
- 127.0.0.1
- 127.0.1.1
- "%{hiera('pgbouncer::listen_addr')}"
auth_type: "hba"
auth_file: /etc/pgbouncer/userlist.txt
auth_hba_file: "%{hiera('pgbouncer::auth_hba_file')}"
admin_users:
- "%{hiera('swh::deploy::db::pgbouncer::user::login')}"
- olasd
pool_mode: session
ignore_startup_parameters: extra_float_digits
server_reset_query: DISCARD ALL
max_client_conn: 2000
default_pool_size: 2000
max_db_connections: 2000
max_user_connections: 2000
log_connections: 0
log_disconnections: 0
pgbouncer::user: postgres
pgbouncer::group: postgres
# swh::deploy::db::pgbouncer::user::password in private data
pgbouncer::userlist:
- user: "%{hiera('swh::deploy::db::pgbouncer::user::login')}"
password: "%{hiera('swh::deploy::db::pgbouncer::user::password')}"
pgbouncer::databases: []
swh::deploy::directory: "%{hiera('swh::conf_directory')}/deploy"
swh::deploy::group: swhdeploy
swh::deploy::public_key: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDWrJX/uUss/EYZaTp2EIsZgg3ZSH8JcNZV5gBdNZ7EHcQcqxYUCqmwv9Ss3xT8n9kIrH6iz/vquqf84XR+keoZK3bsp50tMOY8LJWpcl/JK2XD6ovoJrHPu+iAroLkE59RdTa1Vz+jF67Q2UuG9f0nKwL4rnkeWTyuK/zAbyHyYKFQntkkwMr5/YTU8sjl/4aNF/2Ww8hitdi2GORlCjav2bB0wyPBA2e8sMt8Hp9O4TIWg/RD6vPX+ZvuFaB/Lw/Hv21622QGTHoZiO92/8/W9/t24il6SU4z96ZGfXqdUZkpPYKBGwyIkZkS4dN6jb4CcRlyXTObphyu3dAlABRt swhworker@worker01'
swh::deploy::storage::sentry_swh_package: swh.storage
swh::deploy::storage::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::storage::conf_directory: "%{hiera('swh::conf_directory')}/storage"
swh::deploy::storage::conf_file: "%{hiera('swh::deploy::storage::conf_directory')}/storage.yml"
swh::deploy::storage::user: swhstorage
swh::deploy::storage::group: swhstorage
swh::deploy::storage::db::host: db.internal.softwareheritage.org
swh::deploy::storage::db::port: "%{alias('swh::deploy::db::pgbouncer::port')}"
swh::deploy::storage::db::user: swhstorage
swh::deploy::storage::db::dbname: softwareheritage
swh::deploy::storage::directory: "%{hiera('swh::base_directory')}/objects"
swh::deploy::storage::backend::listen::host: 127.0.0.1
swh::deploy::storage::backend::listen::port: "%{alias('swh::remote_service::storage::port')}"
swh::deploy::storage::backend::workers: 4
swh::deploy::storage::backend::reload_mercy: 3600
swh::deploy::storage::backend::http_keepalive: 5
swh::deploy::storage::backend::http_timeout: 3600
swh::deploy::storage::backend::max_requests: 10000
swh::deploy::storage::backend::max_requests_jitter: 1000
swh::deploy::storage::backend::server_names:
- "%{::swh_hostname.internal_fqdn}"
- "%{::hostname}"
- 127.0.0.1
- localhost
- "::1"
swh::deploy::storage::config:
storage:
cls: local
args:
db: "host=%{hiera('swh::deploy::storage::db::host')} port=%{hiera('swh::deploy::storage::db::port')} user=%{hiera('swh::deploy::storage::db::user')} dbname=%{hiera('swh::deploy::storage::db::dbname')} password=%{hiera('swh::deploy::storage::db::password')}"
objstorage: "%{alias('swh::remote_service::objstorage::config')}"
swh::deploy::indexer::storage::sentry_swh_package: swh.indexer
swh::deploy::indexer::storage::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::indexer::storage::conf_file: "%{hiera('swh::deploy::storage::conf_directory')}/indexer.yml"
swh::deploy::indexer::storage::user: swhstorage
swh::deploy::indexer::storage::group: swhstorage
swh::deploy::indexer::storage::db::host: somerset.internal.softwareheritage.org
swh::deploy::indexer::storage::db::port: "%{alias('swh::deploy::db::pgbouncer::port')}"
swh::deploy::indexer::storage::db::user: swhstorage
swh::deploy::indexer::storage::db::dbname: softwareheritage-indexer
swh::deploy::indexer::storage::backend::listen::host: 127.0.0.1
swh::deploy::indexer::storage::backend::listen::port: "%{alias('swh::remote_service::indexer::port')}"
swh::deploy::indexer::storage::backend::workers: 4
swh::deploy::indexer::storage::backend::reload_mercy: 3600
swh::deploy::indexer::storage::backend::http_keepalive: 5
swh::deploy::indexer::storage::backend::http_timeout: 3600
swh::deploy::indexer::storage::backend::max_requests: 10000
swh::deploy::indexer::storage::backend::max_requests_jitter: 1000
swh::deploy::indexer::storage::backend::server_names:
- "%{::swh_hostname.internal_fqdn}"
- "%{::hostname}"
- 127.0.0.1
- localhost
- "::1"
swh::deploy::indexer::storage::config:
indexer_storage:
cls: local
args:
db: "host=%{hiera('swh::deploy::indexer::storage::db::host')} port=%{hiera('swh::deploy::indexer::storage::db::port')} user=%{hiera('swh::deploy::indexer::storage::db::user')} dbname=%{hiera('swh::deploy::indexer::storage::db::dbname')} password=%{hiera('swh::deploy::indexer::storage::db::password')}"
swh::deploy::vault::cache: "%{hiera('swh::base_directory')}/vault_cache"
# Default cache (orangerie/orangeriedev) is a pathslicing objstorage
swh::deploy::vault::config::cache:
cls: pathslicing
args:
root: "%{hiera('swh::deploy::vault::cache')}"
slicing: "0:1/1:5"
swh::deploy::vault::sentry_swh_package: swh.vault
swh::deploy::vault::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::vault::conf_directory: "%{hiera('swh::conf_directory')}/vault"
swh::deploy::vault::conf_file: "%{hiera('swh::deploy::vault::conf_directory')}/server.yml"
swh::deploy::vault::user: swhvault
swh::deploy::vault::group: swhvault
swh::deploy::vault::db::host: db.internal.softwareheritage.org
swh::deploy::vault::db::port: "%{alias('swh::deploy::db::pgbouncer::port')}"
swh::deploy::vault::db::user: swh-vault
swh::deploy::vault::db::dbname: swh-vault
swh::deploy::vault::backend::listen::host: 127.0.0.1
swh::deploy::vault::backend::listen::port: "%{alias('swh::remote_service::vault::port')}"
swh::deploy::vault::backend::workers: 4
swh::deploy::vault::backend::reload_mercy: 3600
swh::deploy::vault::backend::http_keepalive: 5
swh::deploy::vault::backend::http_timeout: 3600
swh::deploy::vault::backend::max_requests: 10000
swh::deploy::vault::backend::max_requests_jitter: 1000
swh::deploy::vault::backend::server_names:
- "%{::swh_hostname.internal_fqdn}"
- "%{::hostname}"
- 127.0.0.1
- localhost
- "::1"
swh::deploy::vault::config:
storage: "%{alias('swh::remote_service::storage::config')}"
scheduler: "%{alias('swh::remote_service::scheduler::config::writable')}"
cache: "%{alias('swh::deploy::vault::config::cache')}"
vault:
cls: local
args:
db: "host=%{hiera('swh::deploy::vault::db::host')} port=%{hiera('swh::deploy::vault::db::port')} user=%{hiera('swh::deploy::vault::db::user')} dbname=%{hiera('swh::deploy::vault::db::dbname')} password=%{hiera('swh::deploy::vault::db::password')}"
swh::deploy::journal::conf_directory: "%{hiera('swh::conf_directory')}/journal"
swh::deploy::journal::brokers:
- esnode1.internal.softwareheritage.org
- esnode2.internal.softwareheritage.org
- esnode3.internal.softwareheritage.org
swh::deploy::journal::prefix: swh.journal.objects
swh::deploy::journal_simple_checker_producer::conf_file: "%{hiera('swh::deploy::journal::conf_directory')}/checker.yml"
swh::deploy::journal_simple_checker_producer::user: swhstorage
swh::deploy::journal_simple_checker_producer::group: swhstorage
swh::deploy::journal_simple_checker_producer::config:
brokers: "%{alias('swh::deploy::journal::brokers')}"
temporary_prefix: swh.tmp_journal.new
storage_dbconn: "host=%{hiera('swh::deploy::storage::db::host')} port=%{hiera('swh::deploy::storage::db::port')} user=%{hiera('swh::deploy::storage::db::user')} dbname=%{hiera('swh::deploy::storage::db::dbname')} password=%{hiera('swh::deploy::storage::db::password')}"
object_types:
- content
- directory
- revision
- release
- origin
- origin_visit
swh::deploy::objstorage::sentry_swh_package: swh.objstorage
swh::deploy::objstorage::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::objstorage::conf_directory: "%{hiera('swh::conf_directory')}/objstorage"
swh::deploy::objstorage::conf_file: "%{hiera('swh::deploy::objstorage::conf_directory')}/server.yml"
swh::deploy::objstorage::user: "%{hiera('swh::deploy::storage::user')}"
swh::deploy::objstorage::group: "%{hiera('swh::deploy::storage::group')}"
swh::deploy::objstorage::directory: "%{hiera('swh::deploy::storage::directory')}"
swh::deploy::objstorage::slicing: 0:2/2:4/4:6
swh::deploy::objstorage::config:
objstorage:
cls: pathslicing
args:
root: "%{hiera('swh::deploy::objstorage::directory')}"
slicing: "%{hiera('swh::deploy::objstorage::slicing')}"
client_max_size: 1073741824 # 1 GiB
swh::deploy::objstorage::backend::listen::host: 127.0.0.1
swh::deploy::objstorage::backend::listen::port: "%{alias('swh::remote_service::objstorage::port')}"
swh::deploy::objstorage::backend::workers: 4
swh::deploy::objstorage::backend::reload_mercy: 3600
swh::deploy::objstorage::backend::http_workers: 1
swh::deploy::objstorage::backend::http_keepalive: 5
swh::deploy::objstorage::backend::http_timeout: 3600
swh::deploy::objstorage::backend::max_requests: 0
swh::deploy::objstorage::backend::max_requests_jitter: 0
swh::deploy::objstorage::backend::server_names:
- "%{::swh_hostname.internal_fqdn}"
- "%{::hostname}"
- 127.0.0.1
- localhost
- "::1"
# aliases are pulled from letsencrypt::certificates[$swh::deploy::deposit::vhost::letsencrypt_cert]
swh::deploy::deposit::vhost::letsencrypt_cert: deposit_production
swh::deploy::deposit::url: https://deposit.softwareheritage.org
swh::deploy::deposit::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
swh::deploy::deposit::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
swh::deploy::deposit::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
swh::deploy::deposit::locked_endpoints:
- /1/private/[^/]+/[^/]+/[^/]+
- /1/private/deposits/
# e2e checks on deposit
swh::deploy::deposit::e2e::server: "%{hiera('swh::deploy::deposit::url')}/1"
swh::deploy::deposit::e2e::user: swh
swh::deploy::deposit::e2e::collection: swh
swh::deploy::deposit::e2e::poll_interval: 1
swh::deploy::deposit::e2e:archive: /usr/share/swh/icinga-plugins/data/deposit/jesuisgpl.tgz
swh::deploy::deposit::e2e:metadata: /usr/share/swh/icinga-plugins/data/deposit/jesuisgpl.tgz.xml
# e2e checks on vault
swh::deploy::vault::e2e::storage: "http://uffizi.internal.softwareheritage.org:%{hiera('swh::remote_service::storage::port')}"
swh::deploy::vault::e2e::webapp: "https://archive.softwareheritage.org"
swh::deploy::deposit::sentry_swh_package: swh.deposit
swh::deploy::deposit::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::deposit::config_directory: "%{hiera('swh::conf_directory')}/deposit"
swh::deploy::deposit::config_file: "%{hiera('swh::deploy::deposit::config_directory')}/server.yml"
swh::deploy::deposit::user: swhdeposit
swh::deploy::deposit::group: swhdeposit
swh::deploy::deposit::media_root_directory: /srv/storage/space/swh-deposit/uploads/
swh::deploy::deposit::db::host: db.internal.softwareheritage.org
swh::deploy::deposit::db::port: "%{alias('swh::deploy::db::pgbouncer::port')}"
swh::deploy::deposit::db::dbname: softwareheritage-deposit
swh::deploy::deposit::db::dbuser: swhstorage
# swh::deploy::deposit::db::password: in private data
# swh::deploy::deposit::runtime_secret_key in private data
swh::deploy::deposit::config:
max_upload_size: 209715200
tool:
name: 'swh-deposit'
version: '0.0.1'
configuration:
sword_version: 2
scheduler: "%{alias('swh::remote_service::scheduler::config::writable')}"
private:
secret_key: "%{hiera('swh::deploy::deposit::runtime_secret_key')}"
db:
host: "%{hiera('swh::deploy::deposit::db::host')}"
port: "%{hiera('swh::deploy::deposit::db::port')}"
name: "%{hiera('swh::deploy::deposit::db::dbname')}"
user: "%{hiera('swh::deploy::deposit::db::dbuser')}"
password: "%{hiera('swh::deploy::deposit::db::password')}"
media_root: "%{hiera('swh::deploy::deposit::media_root_directory')}"
loader-version: 2
swh::deploy::worker::loader_deposit::config_file: "%{hiera('swh::conf_directory')}/loader_deposit.yml"
swh::deploy::worker::loader_deposit::concurrency: 1
swh::deploy::worker::loader_deposit::private_tmp: true
swh::deploy::worker::loader_deposit::loglevel: info
# deposit_basic_auth_swhworker_{username|password} in private_data
swh::deploy::worker::loader_deposit::config:
storage: "%{alias('swh::deploy::worker::storage::pipeline')}"
max_content_size: 104857600
extraction_dir: /tmp/swh.loader.deposit/
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_queues:
- swh.loader.package.deposit.tasks.LoadDeposit
deposit:
url: "%{alias('swh::deploy::webapp::deposit::private::url')}"
auth:
username: "%{hiera('deposit_basic_auth_swhworker_username')}"
password: "%{hiera('deposit_basic_auth_swhworker_password')}"
swh::deploy::checker_deposit::sentry_swh_package: swh.deposit.loader
swh::deploy::checker_deposit::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::worker::checker_deposit::config_file: "%{hiera('swh::conf_directory')}/checker_deposit.yml"
swh::deploy::worker::checker_deposit::concurrency: 1
swh::deploy::worker::checker_deposit::private_tmp: true
swh::deploy::worker::checker_deposit::loglevel: info
# deposit_basic_auth_swhworker_{username|password} in private_data
swh::deploy::worker::checker_deposit::config:
storage: "%{alias('swh::remote_service::storage::config::writable')}"
extraction_dir: /tmp/swh.checker.deposit/
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_modules:
- swh.deposit.loader.tasks
task_queues:
- swh.deposit.loader.tasks.ChecksDepositTsk
url: "%{alias('swh::deploy::deposit::url')}"
auth:
username: "%{hiera('deposit_basic_auth_swhworker_username')}"
password: "%{hiera('deposit_basic_auth_swhworker_password')}"
swh::deploy::deposit::backend::listen::host: 127.0.0.1
swh::deploy::deposit::backend::listen::port: "%{alias('swh::remote_service::deposit::port')}"
swh::deploy::deposit::backend::workers: 8
swh::deploy::deposit::backend::reload_mercy: 3600
swh::deploy::deposit::backend::http_keepalive: 5
swh::deploy::deposit::backend::http_timeout: 3600
swh::deploy::objstorage_log_checker::conf_directory: "%{hiera('swh::deploy::objstorage::conf_directory')}"
swh::deploy::objstorage_log_checker::conf_file: "%{hiera('swh::deploy::objstorage_log_checker::conf_directory')}/log_checker.yml"
swh::deploy::objstorage_log_checker::user: "%{hiera('swh::deploy::objstorage::user')}"
swh::deploy::objstorage_log_checker::group: "%{hiera('swh::deploy::objstorage::group')}"
swh::deploy::objstorage_log_checker:config:
storage:
cls: pathslicing
args:
root: "%{hiera('swh::deploy::objstorage::directory')}"
slicing: "%{hiera('swh::deploy::objstorage::slicing')}"
batch_size: 1000
log_tag: objstorage.checker.log
swh::deploy::objstorage_repair_checker::conf_directory: "%{hiera('swh::deploy::objstorage::conf_directory')}"
swh::deploy::objstorage_repair_checker::conf_file: "%{hiera('swh::deploy::objstorage_repair_checker::conf_directory')}/repair_checker.yml"
swh::deploy::objstorage_repair_checker::user: "%{hiera('swh::deploy::objstorage::user')}"
swh::deploy::objstorage_repair_checker::group: "%{hiera('swh::deploy::objstorage::group')}"
swh::deploy::objstorage_repair_checker::config:
storage:
cls: pathslicing
args:
root: "%{hiera('swh::deploy::objstorage::directory')}"
slicing: "%{hiera('swh::deploy::objstorage::slicing')}"
batch_size: 1000
log_tag: objstorage.checker.repair
backup_storages: "%{alias('swh::remote_service::objstorage::config_as_dict')}"
swh::deploy::webapp::backported_packages:
stretch:
- python3-django
- python-django-common
swh::deploy::deposit::backported_packages: "%{alias('swh::deploy::webapp::backported_packages')}"
swh::deploy::webapp::sentry_swh_package: swh.web
swh::deploy::webapp::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::webapp::conf_directory: "%{hiera('swh::conf_directory')}/web"
swh::deploy::webapp::conf_file: "%{hiera('swh::deploy::webapp::conf_directory')}/web.yml"
swh::deploy::webapp::user: swhwebapp
swh::deploy::webapp::group: swhwebapp
swh::deploy::webapp::conf::log_dir: "%{hiera('swh::log_directory')}/webapp"
swh::deploy::webapp::backend::listen::host: 127.0.0.1
swh::deploy::webapp::backend::listen::port: "%{alias('swh::remote_service::webapp::port')}"
swh::deploy::webapp::backend::workers: 32
swh::deploy::webapp::backend::http_keepalive: 5
swh::deploy::webapp::backend::http_timeout: 3600
swh::deploy::webapp::backend::reload_mercy: 3600
# aliases are pulled from letsencrypt::certificates[$swh::deploy::webapp::vhost::letsencrypt_cert]
swh::deploy::webapp::vhost::letsencrypt_cert: archive_production
swh::deploy::webapp::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
swh::deploy::webapp::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
swh::deploy::webapp::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
swh::deploy::webapp::vhost::hsts_header: "%{hiera('apache::hsts_header')}"
swh::deploy::webapp::config::es_workers_index_url: http://esnode1.internal.softwareheritage.org:9200/swh_workers-*
swh::deploy::webapp::production_db_dir: /var/lib/swh
swh::deploy::webapp::production_db: "%{hiera('swh::deploy::webapp::production_db_dir')}/web.sqlite3"
swh::deploy::webapp::deposit::private::url: "%{hiera('swh::deploy::deposit::url')}/1/private/"
swh::deploy::webapp::config::throttling:
cache_uri: "%{hiera('memcached::server::bind')}:%{hiera('memcached::server::port')}"
scopes:
swh_api:
limiter_rate:
default: 120/h
exempted_networks:
- 127.0.0.0/8
- 192.168.100.0/23
- 128.93.193.29
- 131.107.174.0/24
# OpenAIRE
- 213.135.60.145
- 213.135.60.146
# DINSIC
- 37.187.137.47
swh_api_origin_search:
limiter_rate:
default: 10/m
swh_api_origin_visit_latest:
# This endpoint gets called a lot (by default, up to 70 times
# per origin search), so it deserves a much higher rate-limit
# than the rest of the API.
limiter_rate:
default: 700/m
swh_vault_cooking:
limiter_rate:
default: 120/h
GET: 60/m
exempted_networks:
- 127.0.0.0/8
- 192.168.100.0/23
- 128.93.193.29
- 131.107.174.0/24
# OpenAIRE
- 213.135.60.145
- 213.135.60.146
swh_save_origin:
limiter_rate:
default: 120/h
POST: 10/h
exempted_networks:
- 127.0.0.0/8
- 192.168.100.0/23
- 128.93.193.29
- 131.107.174.0/24
# OpenAIRE
- 213.135.60.145
- 213.135.60.146
# in private data:
# deposit_basic_auth_swhworker_username
# deposit_basic_auth_swhworker_password
swh::deploy::webapp::config:
search: "%{alias('swh::remote_service::search::config')}"
storage: "%{alias('swh::remote_service::storage::config')}"
vault: "%{alias('swh::remote_service::vault::config::writable')}"
indexer_storage: "%{alias('swh::remote_service::indexer::config')}"
scheduler: "%{alias('swh::remote_service::scheduler::config::writable')}"
log_dir: "%{hiera('swh::deploy::webapp::conf::log_dir')}"
secret_key: "%{hiera('swh::deploy::webapp::conf::secret_key')}"
content_display_max_size: 1048576
throttling: "%{alias('swh::deploy::webapp::config::throttling')}"
production_db: "%{hiera('swh::deploy::webapp::production_db')}"
es_workers_index_url: "%{alias('swh::deploy::webapp::config::es_workers_index_url')}"
deposit:
private_api_url: "%{hiera('swh::deploy::webapp::deposit::private::url')}"
private_api_user: "%{hiera('deposit_basic_auth_swhworker_username')}"
private_api_password: "%{hiera('deposit_basic_auth_swhworker_password')}"
client_config:
sentry_dsn: "%{lookup('swh::deploy::webapp::sentry_dsn')}"
swh::deploy::webapp::locked_endpoints:
- /api/1/content/[^/]+/symbol/
- /api/1/entity/
- /api/1/provenance/
# local configuration for the scheduler
swh::deploy::scheduler::config::local: &swh_scheduler_local_config
scheduler:
cls: local
args:
db: "host=%{hiera('swh::deploy::scheduler::db::host')} port=%{hiera('swh::deploy::scheduler::db::port')} dbname=%{hiera('swh::deploy::scheduler::db::dbname')} user=%{hiera('swh::deploy::scheduler::db::user')} password=%{hiera('swh::deploy::scheduler::db::password')}"
swh::deploy::scheduler::sentry_swh_package: swh.scheduler
swh::deploy::scheduler::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::scheduler::conf_file: "%{hiera('swh::conf_directory')}/scheduler.yml"
swh::deploy::scheduler::user: swhscheduler
swh::deploy::scheduler::group: swhscheduler
swh::deploy::scheduler::db::host: db.internal.softwareheritage.org
swh::deploy::scheduler::db::port: "%{alias('swh::deploy::db::pgbouncer::port')}"
swh::deploy::scheduler::db::dbname: softwareheritage-scheduler
swh::deploy::scheduler::db::user: swhscheduler
# swh::deploy::scheduler::db::password in private data
# swh::deploy::scheduler::task_broker::password in private data
swh::deploy::scheduler::task_broker: "amqp://swhproducer:%{hiera('swh::deploy::scheduler::task_broker::password')}@rabbitmq:5672//"
swh::deploy::scheduler::listener::log_level: INFO
swh::deploy::scheduler::runner::log_level: INFO
swh::deploy::scheduler::config:
<<: *swh_scheduler_local_config
celery:
task_broker: "%{alias('swh::deploy::scheduler::task_broker')}"
swh::deploy::scheduler::remote::sentry_swh_package: swh.scheduler
swh::deploy::scheduler::remote::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::scheduler::remote::conf_dir: "%{hiera('swh::conf_directory')}/backend"
swh::deploy::scheduler::remote::conf_file: "%{hiera('swh::deploy::scheduler::remote::conf_dir')}/scheduler.yml"
swh::deploy::scheduler::remote::user: swhscheduler
swh::deploy::scheduler::remote::group: swhscheduler
swh::deploy::scheduler::remote::backend::listen::host: 127.0.0.1
swh::deploy::scheduler::remote::backend::listen::port: "%{alias('swh::remote_service::scheduler::port')}"
swh::deploy::scheduler::remote::backend::workers: 16
swh::deploy::scheduler::remote::backend::reload_mercy: 3600
swh::deploy::scheduler::remote::backend::http_keepalive: 5
swh::deploy::scheduler::remote::backend::http_timeout: 3600
swh::deploy::scheduler::remote::backend::max_requests: 10000
swh::deploy::scheduler::remote::backend::max_requests_jitter: 1000
swh::deploy::scheduler::remote::backend::server_names:
- "%{::swh_hostname.internal_fqdn}"
- "%{::hostname}"
- 127.0.0.1
- localhost
- "::1"
swh::deploy::scheduler::remote::config: "%{alias('swh::deploy::scheduler::config::local')}"
swh::elasticsearch::nodes:
- host: esnode2.internal.softwareheritage.org
port: 9200
- host: esnode3.internal.softwareheritage.org
port: 9200
- host: esnode1.internal.softwareheritage.org
port: 9200
swh::deploy::scheduler::archive::conf_dir: "%{hiera('swh::conf_directory')}/backend"
swh::deploy::scheduler::archive::conf_file: "%{hiera('swh::deploy::scheduler::archive::conf_dir')}/elastic.yml"
swh::deploy::scheduler::archive::user: "%{hiera('swh::deploy::scheduler::user')}"
swh::deploy::scheduler::archive::config:
scheduler: "%{alias('swh::remote_service::scheduler::config::writable')}"
elasticsearch:
cls: local
args:
index_name_prefix: swh-tasks
storage_nodes:
"%{alias('swh::elasticsearch::nodes')}"
client_options:
sniff_on_start: false
sniff_on_connection_fail: true
http_compress: false
sniffer_timeout: 60
# Main lister configuration
swh::deploy::worker::lister::db::user: swh-lister
swh::deploy::worker::lister::db::name: swh-lister
swh::deploy::worker::lister::db::host: db.internal.softwareheritage.org
swh::deploy::worker::lister::db::port: "%{alias('swh::deploy::db::pgbouncer::port')}"
# swh::deploy::lister::db::password in private data
# swh::deploy::worker::task_broker::password in private data
swh::deploy::worker::task_broker: "amqp://swhconsumer:%{hiera('swh::deploy::worker::task_broker::password')}@rabbitmq:5672//"
swh::deploy::worker::instances:
- loader_debian
- loader_git
- lister
swh::deploy::loader_git::sentry_swh_package: swh.loader.git
swh::deploy::loader_git::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::worker::loader_git::config_file: "%{hiera('swh::conf_directory')}/loader_git.yml"
swh::deploy::worker::loader_git::concurrency: 4
swh::deploy::worker::loader_git::loglevel: info
swh::deploy::worker::loader_git::save_data_path: /srv/storage/space/data/sharded_packfiles
swh::deploy::worker::loader_git::config:
storage: "%{alias('swh::deploy::worker::storage::pipeline')}"
max_content_size: 104857600
save_data: false
save_data_path: "%{alias('swh::deploy::worker::loader_git::save_data_path')}"
directory_packet_size: 100
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_queues:
- swh.loader.git.tasks.UpdateGitRepository
# loader-git-disk
- swh.loader.git.tasks.LoadDiskGitRepository
- swh.loader.git.tasks.UncompressAndLoadDiskGitRepository
# for all loader packages
swh::deploy::loader_core::sentry_swh_package: swh.loader.core
swh::deploy::loader_core::sentry_environment: "%{hiera('swh::deploy::environment')}"
swh::deploy::worker::loader_debian::config_file: "%{hiera('swh::conf_directory')}/loader_debian.yml"
swh::deploy::worker::loader_debian::private_tmp: true
swh::deploy::worker::loader_debian::concurrency: 1
swh::deploy::worker::loader_debian::loglevel: info
swh::deploy::worker::loader_debian::config:
storage: "%{alias('swh::deploy::worker::storage::pipeline')}"
max_content_size: 104857600
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_queues:
- swh.loader.package.debian.tasks.LoadDebian
swh::deploy::worker::loader_archive::config_file: "%{hiera('swh::conf_directory')}/loader_archive.yml"
swh::deploy::worker::loader_archive::private_tmp: true
swh::deploy::worker::loader_archive::concurrency: 1
swh::deploy::worker::loader_archive::loglevel: info
swh::deploy::worker::loader_archive::config:
storage: "%{alias('swh::deploy::worker::storage::pipeline')}"
max_content_size: 104857600
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_queues:
- swh.loader.package.archive.tasks.LoadArchive
swh::deploy::worker::loader_cran::config_file: "%{hiera('swh::conf_directory')}/loader_cran.yml"
swh::deploy::worker::loader_cran::private_tmp: true
swh::deploy::worker::loader_cran::concurrency: 1
swh::deploy::worker::loader_cran::loglevel: info
swh::deploy::worker::loader_cran::config:
storage: "%{alias('swh::deploy::worker::storage::pipeline')}"
max_content_size: 104857600
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_queues:
- swh.loader.package.cran.tasks.LoadCRAN
swh::deploy::lister::db::local:
cls: local
args:
db: "postgresql://%{hiera('swh::deploy::worker::lister::db::user')}:%{hiera('swh::deploy::lister::db::password')}@%{hiera('swh::deploy::worker::lister::db::host')}:%{hiera('swh::deploy::worker::lister::db::port')}/%{hiera('swh::deploy::worker::lister::db::name')}"
swh::deploy::lister::sentry_swh_package: swh.lister
swh::deploy::lister::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::worker::lister::config_file: "%{hiera('swh::conf_directory')}/lister.yml"
swh::deploy::worker::lister::concurrency: 5
swh::deploy::worker::lister::loglevel: warning
swh::deploy::worker::lister::config:
storage: "%{alias('swh::remote_service::storage::config::writable')}"
scheduler: "%{alias('swh::remote_service::scheduler::config::writable')}"
lister: "%{alias('swh::deploy::lister::db::local')}"
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_queues:
- swh.lister.bitbucket.tasks.IncrementalBitBucketLister
- swh.lister.bitbucket.tasks.FullBitBucketRelister
- swh.lister.cgit.tasks.CGitListerTask
- swh.lister.cran.tasks.CRANListerTask
- swh.lister.debian.tasks.DebianListerTask
- swh.lister.gitlab.tasks.IncrementalGitLabLister
- swh.lister.gitlab.tasks.RangeGitLabLister
- swh.lister.gitlab.tasks.FullGitLabRelister
- swh.lister.gnu.tasks.GNUListerTask
- swh.lister.npm.tasks.NpmListerTask
- swh.lister.phabricator.tasks.FullPhabricatorLister
- swh.lister.pypi.tasks.PyPIListerTask
credentials: "%{alias('swh::deploy::worker::lister::config::credentials')}"
swh::deploy::loader_mercurial::sentry_swh_package: swh.loader.mercurial
swh::deploy::loader_mercurial::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::worker::loader_mercurial::config_file: "%{hiera('swh::conf_directory')}/loader_mercurial.yml"
swh::deploy::worker::loader_mercurial::concurrency: 1
swh::deploy::worker::loader_mercurial::private_tmp: true
swh::deploy::worker::loader_mercurial::loglevel: info
swh::deploy::worker::loader_mercurial::config:
storage: "%{alias('swh::deploy::worker::storage::pipeline')}"
max_content_size: 104857600
reduce_effort: False
clone_timeout_seconds: 7200
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_queues:
- swh.loader.mercurial.tasks.LoadMercurial
- swh.loader.mercurial.tasks.LoadArchiveMercurial
swh::deploy::worker::loader_pypi::config_file: "%{hiera('swh::conf_directory')}/loader_pypi.yml"
swh::deploy::worker::loader_pypi::concurrency: 1
swh::deploy::worker::loader_pypi::private_tmp: true
swh::deploy::worker::loader_pypi::loglevel: info
swh::deploy::worker::loader_pypi::config:
storage: "%{alias('swh::deploy::worker::storage::pipeline')}"
max_content_size: 104857600
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_queues:
- swh.loader.package.pypi.tasks.LoadPyPI
swh::deploy::worker::loader_npm::config_file: "%{hiera('swh::conf_directory')}/loader_npm.yml"
swh::deploy::worker::loader_npm::concurrency: 1
swh::deploy::worker::loader_npm::private_tmp: true
swh::deploy::worker::loader_npm::loglevel: info
swh::deploy::worker::loader_npm::config:
storage: "%{alias('swh::deploy::worker::storage::pipeline')}"
max_content_size: 104857600
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_queues:
- swh.loader.package.npm.tasks.LoadNpm
swh::deploy::loader_svn::sentry_swh_package: swh.loader.svn
swh::deploy::loader_svn::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::worker::loader_svn::config_file: "%{hiera('swh::conf_directory')}/loader_svn.yml"
swh::deploy::worker::loader_svn::concurrency: 1
swh::deploy::worker::loader_svn::private_tmp: true
swh::deploy::worker::loader_svn::limit_no_file: 8192
swh::deploy::worker::loader_svn::loglevel: info
# Contains a password: in private data
swh::deploy::worker::loader_svn::config:
storage: "%{alias('swh::deploy::worker::storage::pipeline')}"
max_content_size: 104857600
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_queues:
- swh.loader.svn.tasks.LoadSvnRepository
- swh.loader.svn.tasks.MountAndLoadSvnRepository
- swh.loader.svn.tasks.DumpMountAndLoadSvnRepository
swh::deploy::base_indexer::config_directory: "%{hiera('swh::conf_directory')}/indexer"
swh::deploy::indexer_journal_client::config_file: "journal_client.yml"
swh::deploy::indexer_journal_client::user: swhstorage
swh::deploy::indexer_journal_client::group: swhstorage
swh::deploy::indexer_journal_client::config:
journal:
brokers: "%{alias('swh::deploy::journal::brokers')}"
group_id: swh.indexer.journal_client
scheduler: "%{alias('swh::remote_service::scheduler::config::writable')}"
# for all indexers
swh::deploy::indexer::sentry_swh_package: swh.indexer
swh::deploy::indexer::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::worker::indexer_content_mimetype::config_file: "%{hiera('swh::conf_directory')}/indexer_content_mimetype.yml"
swh::deploy::worker::indexer_content_mimetype::concurrency: 1
swh::deploy::worker::indexer_content_mimetype::loglevel: info
# Contains a password: in private data
swh::deploy::worker::indexer_content_mimetype::config:
scheduler: "%{alias('swh::remote_service::scheduler::config::writable')}"
indexer_storage: "%{alias('swh::remote_service::indexer::config::writable')}"
objstorage: "%{alias('swh::remote_service::objstorage::config')}"
storage: "%{alias('swh::remote_service::storage::config')}"
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_modules:
- swh.indexer.tasks
task_queues:
- swh.indexer.tasks.ContentMimetype
- swh.indexer.tasks.ContentRangeMimetype
tools:
name: file
version: 1:5.30-1+deb9u1
configuration:
type: library
debian-package: python3-magic
write_batch_size: 1000
swh::deploy::worker::indexer_origin_intrinsic_metadata::config_file: "%{hiera('swh::conf_directory')}/indexer_origin_intrinsic_metadata.yml"
swh::deploy::worker::indexer_origin_intrinsic_metadata::concurrency: 1
swh::deploy::worker::indexer_origin_intrinsic_metadata::loglevel: info
# Contains a password: in private data
swh::deploy::worker::indexer_origin_intrinsic_metadata::config:
scheduler: "%{alias('swh::remote_service::scheduler::config::writable')}"
indexer_storage: "%{alias('swh::remote_service::indexer::config::writable')}"
objstorage: "%{alias('swh::remote_service::objstorage::config')}"
storage: "%{alias('swh::remote_service::storage::config')}"
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_modules:
- swh.indexer.tasks
task_queues:
- swh.indexer.tasks.OriginMetadata
tools:
name: swh-metadata-detector
version: 0.0.2
configuration: {}
swh::deploy::worker::indexer_rehash::config_file: "rehash.yml"
swh::deploy::worker::indexer_rehash::concurrency: 5
swh::deploy::worker::indexer_rehash::loglevel: info
# Contains a password: in private data
swh::deploy::worker::indexer_rehash::config:
storage: "%{alias('swh::remote_service::storage::config::writable')}"
objstorage: "%{alias('swh::remote_service::objstorage::config')}"
compute_checksums:
- blake2s256
batch_size_retrieve_content: 10000
batch_size_update: 5000
swh::deploy::worker::indexer_fossology_license::config_file: "%{hiera('swh::conf_directory')}/indexer_fossology_license.yml"
swh::deploy::worker::indexer_fossology_license::concurrency: 1
swh::deploy::worker::indexer_fossology_license::loglevel: info
# Contains a password: in private data
swh::deploy::worker::indexer_fossology_license::config:
indexer_storage: "%{alias('swh::remote_service::indexer::config::writable')}"
objstorage: "%{alias('swh::remote_service::objstorage::config')}"
storage: "%{alias('swh::remote_service::storage::config')}"
workdir: /tmp/swh/indexer.fossology.license/
tools:
name: 'nomos'
version: '3.1.0rc2-31-ga2cbb8c'
configuration:
command_line: "nomossa <filepath>"
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_modules:
- swh.indexer.tasks
task_queues:
- swh.indexer.tasks.ContentFossologyLicense
- swh.indexer.tasks.ContentRangeFossologyLicense
write_batch_size: 1000
swh::deploy::worker::indexer_content_ctags::config_file: "%{hiera('swh::conf_directory')}/indexer_content_ctags.yml"
swh::deploy::worker::indexer_content_ctags::concurrency: 2
swh::deploy::worker::indexer_content_ctags::loglevel: info
# Contains a password: in private data
# objstorage configuration from swh::azure_objstorage::config is merged in the manifest
swh::deploy::worker::indexer_content_ctags::config:
indexer_storage: "%{alias('swh::remote_service::indexer::config::writable')}"
objstorage: "%{alias('swh::remote_service::objstorage::config')}"
workdir: /tmp/swh/indexer.ctags/
tools:
name: 'universal-ctags'
version: '0+git20181215-2'
configuration:
command_line: "ctags --fields=+lnz --sort=no --links=no --output-format=json <filepath>"
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_modules:
- swh.indexer.tasks
task_queues:
- swh.indexer.tasks.Ctags
languages:
abap: ''
abnf: ''
actionscript: ''
actionscript-3: ''
ada: Ada
adl: ''
agda: ''
alloy: ''
ambienttalk: ''
antlr: ''
antlr-with-actionscript-target: ''
antlr-with-c#-target: ''
antlr-with-cpp-target: ''
antlr-with-java-target: ''
antlr-with-objectivec-target: ''
antlr-with-perl-target: ''
antlr-with-python-target: ''
antlr-with-ruby-target: ''
apacheconf: ''
apl: ''
applescript: ''
arduino: ''
aspectj: ''
aspx-cs: ''
aspx-vb: ''
asymptote: ''
autohotkey: ''
autoit: ''
awk: Awk
base-makefile: Make
bash: Sh
bash-session: Sh
batchfile: DosBatch
bbcode: ''
bc: ''
befunge: ''
blitzbasic: Basic
blitzmax: ''
bnf: ''
boo: ''
boogie: ''
brainfuck: ''
bro: ''
bugs: ''
c: C
c#: C#
c++: C++
c-objdump: asm
ca65-assembler: asm
cadl: ''
camkes: ''
cbm-basic-v2: ''
ceylon: Java
cfengine3: ''
cfstatement: ''
chaiscript: ''
chapel: ''
cheetah: ''
cirru: ''
clay: ''
clojure: Clojure
clojurescript: Clojure
cmake: Make
cobol: Cobol
cobolfree: Cobol
coffeescript: CoffeeScript
coldfusion-cfc: HTML
coldfusion-html: HTML
common-lisp: Lisp
component-pascal: Pascal
coq: ''
cpp-objdump: Asm
cpsa: ''
crmsh: Sh
croc: ''
cryptol: ''
csound-document: ''
csound-orchestra: ''
csound-score: ''
css: CSS
css+django/jinja: CSS
css+genshi-text: CSS
css+lasso: CSS
css+mako: CSS
css+mozpreproc: CSS
css+myghty: CSS
css+php: CSS
css+ruby: CSS
css+smarty: CSS
cuda: ''
cypher: ''
cython: Python
d: D
d-objdump: Asm
darcs-patch: Diff
dart: ''
debian-control-file: ''
debian-sourcelist: ''
delphi: ''
dg: ''
diff: Diff
django/jinja: Python
docker: Iniconf
dtd: ''
duel: ''
dylan: ''
dylan-session: ''
dylanlid: ''
earl-grey: ''
easytrieve: ''
ebnf: ''
ec: ''
ecl: ''
eiffel: Eiffel
elixir: ''
elixir-iex-session: ''
elm: ''
emacslisp: Lisp
embedded-ragel: ''
erb: Ruby
erlang: Erlang
erlang-erl-session: Erlang
evoque: ''
ezhil: ''
factor: ''
fancy: ''
fantom: ''
felix: ''
fish: ''
fortran: Fortran
fortranfixed: Fortran
nfoxpro: ''
fsharp: Ocaml
gap: ''
gas: ''
genshi: ''
genshi-text: ''
gettext-catalog: ''
gherkin: ''
glsl: ''
gnuplot: ''
go: Go
golo: ''
gooddata-cl: ''
gosu: ''
gosu-template: ''
groff: ''
groovy: ''
haml: ''
handlebars: ''
haskell: ''
haxe: ''
hexdump: ''
html: HTML
html+cheetah: HTML
html+django/jinja: HTML
html+evoque: HTML
html+genshi: HTML
html+handlebars: HTML
html+lasso: HTML
html+mako: HTML
html+myghty: HTML
html+php: HTML
html+smarty: HTML
html+twig: HTML
html+velocity: HTML
http: ''
hxml: ''
hy: Lisp
hybris: ''
idl: ''
idris: ''
igor: ''
inform-6: ''
inform-6-template: ''
inform-7: ''
ini: Iniconf
io: ''
ioke: ''
irc-logs: ''
isabelle: ''
j: ''
jade: ''
jags: ''
jasmin: ''
java: Java
java-server-page: Java
javascript: JavaScript
javascript+cheetah: JavaScript
javascript+django/jinja: JavaScript
javascript+genshi-text: JavaScript
javascript+lasso: JavaScript
javascript+mako: JavaScript
javascript+mozpreproc: JavaScript
javascript+myghty: JavaScript
javascript+php: JavaScript
javascript+ruby: JavaScript
javascript+smarty: JavaScript
jcl: ''
json: JSON
json-ld: JSON
julia: ''
julia-console: ''
kal: ''
kconfig: ''
koka: ''
kotlin: ''
lasso: ''
lean: ''
lesscss: CSS
lighttpd-configuration-file: Iniconf
limbo: ''
liquid: ''
literate-agda: ''
literate-cryptol: ''
literate-haskell: ''
literate-idris: ''
livescript: ''
llvm: ''
logos: ''
logtalk: ''
lsl: ''
lua: Lua
makefile: Make
mako: ''
maql: ''
mask: ''
mason: ''
mathematica: MatLab
matlab: MatLab
matlab-session: MatLab
minid: ''
modelica: ''
modula-2: ''
moinmoin/trac-wiki-markup: ''
monkey: ''
moocode: ''
moonscript: ''
mozhashpreproc: ''
mozpercentpreproc: ''
mql: ''
mscgen: ''
msdos-session: ''
mupad: ''
mxml: ''
myghty: ''
mysql: SQL
nasm: Asm
nemerle: ''
nesc: ''
newlisp: Lisp
newspeak: ''
nginx-configuration-file: ''
nimrod: ''
nit: ''
nix: ''
nsis: ''
numpy: ''
objdump: Asm
objdump-nasm: Asm
objective-c: ObjectiveC
objective-c++: Objective-C
objective-j: ''
ocaml: Ocaml
octave: ''
odin: ''
ooc: ''
opa: ''
openedge-abl: ''
pacmanconf: ''
pan: ''
parasail: ''
pawn: ''
perl: Perl
perl6: Perl6
php: PHP
pig: ''
pike: ''
pkgconfig: ''
pl/pgsql: SQL
postgresql-console-(psql): ''
postgresql-sql-dialect: SQL
postscript: ''
povray: ''
powershell: ''
powershell-session: ''
praat: ''
prolog: ''
properties: Iniconf
protocol-buffer: Protobuf
puppet: ''
pypy-log: ''
python: Python
python-3: Python
python-3.0-traceback: Python
python-console-session: Python
python-traceback: Python
qbasic: ''
qml: ''
qvto: ''
racket: LISP
ragel: ''
ragel-in-c-host: ''
ragel-in-cpp-host: ''
ragel-in-d-host: ''
ragel-in-java-host: ''
ragel-in-objective-c-host: ''
ragel-in-ruby-host: ''
raw-token-data: ''
rconsole: ''
rd: ''
rebol: ''
red: ''
redcode: ''
reg: ''
resourcebundle: ''
restructuredtext: reStructuredText
rexx: REXX
rhtml: ''
roboconf-graph: ''
roboconf-instances: ''
robotframework: ''
rpmspec: RpmSpec
rql: ''
rsl: ''
ruby: Ruby
ruby-irb-session: Sh
rust: Rust
s: ''
sass: ''
scala: Java
scalate-server-page: ''
scaml: SML
scheme: Lisp
scilab: ''
scss: ''
shen: ''
slim: ''
smali: ''
smalltalk: ''
smarty: ''
snobol: ''
sourcepawn: ''
sparql: ''
sql: SQL
sqlite3con: SQL
squidconf: ''
stan: ''
standard-ml: SML
supercollider: ''
swift: ''
swig: ''
systemverilog: SystemVerilog
tads-3: ''
tap: ''
tcl: ''
tcsh: Sh
tcsh-session: Sh
tea: ''
termcap: ''
terminfo: ''
terraform: ''
tex: Tex
text-only: ''
thrift: ''
todotxt: ''
trafficscript: ''
treetop: ''
turtle: ''
twig: ''
typescript: ''
urbiscript: ''
vala: ''
vb.net: Basic
vctreestatus: ''
velocity: ''
verilog: Verilog
vgl: ''
vhdl: VHDL
viml: Vim
x10: ''
xml: ''
xml+cheetah: ''
xml+django/jinja: ''
xml+evoque: ''
xml+lasso: ''
xml+mako: ''
xml+myghty: ''
xml+php: ''
xml+ruby: ''
xml+smarty: ''
xml+velocity: ''
xquery: ''
xslt: XSLT
xtend: ''
xul+mozpreproc: ''
yaml: ''
yaml+jinja: ''
zephir: Zephir
unknown: ''
swh::deploy::vault_cooker::sentry_swh_package: swh.vault
swh::deploy::vault_cooker::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::worker::vault_cooker::config_file: "%{hiera('swh::conf_directory')}/vault_cooker.yml"
swh::deploy::worker::vault_cooker::concurrency: 20
swh::deploy::worker::vault_cooker::loglevel: info
swh::deploy::worker::vault_cooker::conf_file: "%{hiera('swh::conf_directory')}/vault/cooker.yml"
swh::deploy::worker::vault_cooker::config:
storage: "%{alias('swh::remote_service::storage::config')}"
vault: "%{alias('swh::remote_service::vault::config::writable')}"
celery:
task_broker: "%{alias('swh::deploy::worker::task_broker')}"
task_modules:
- swh.vault.cooking_tasks
task_queues:
- swh.vault.cooking_tasks.SWHCookingTask
- swh.vault.cooking_tasks.SWHBatchCookingTask
max_bundle_size: 1073741824 # 1GiB
desktop::printers:
MFP_C:
uri: lpd://print.paris.inria.fr/MFP_C-pro
description: Impression couleur
location: Partout
ppd: "%{hiera('desktop::printers::ppd_dir')}/MFP_Paris.ppd"
ppd_options:
ColorType: Color
MFP:
uri: lpd://print.paris.inria.fr/MFP-pro
description: Impression Noir et Blanc
location: Partout
ppd: "%{hiera('desktop::printers::ppd_dir')}/MFP_Paris.ppd"
ppd_options:
ColorType: Mono
desktop::printers::default: MFP
desktop::printers::ppd_dir: /usr/share/ppd/softwareheritage
desktop::printers::cups_usernames:
ardumont: andumont
morane: mgruenpe
olasd: ndandrim
seirl: apietri
zack: zacchiro
zookeeper::clusters:
rocquencourt:
'11': esnode1.internal.softwareheritage.org
'12': esnode2.internal.softwareheritage.org
'13': esnode3.internal.softwareheritage.org
azure:
'1': kafka01.euwest.azure.internal.softwareheritage.org
'2': kafka02.euwest.azure.internal.softwareheritage.org
'3': kafka03.euwest.azure.internal.softwareheritage.org
'4': kafka04.euwest.azure.internal.softwareheritage.org
'5': kafka05.euwest.azure.internal.softwareheritage.org
'6': kafka06.euwest.azure.internal.softwareheritage.org
zookeeper::datastore: /var/lib/zookeeper
zookeeper::client_port: 2181
zookeeper::election_port: 2888
zookeeper::leader_port: 3888
kafka::version: '2.2.0'
kafka::scala_version: '2.12'
kafka::mirror_url: https://mirrors.ircam.fr/pub/apache/
kafka::logdirs:
- /srv/kafka/logdir
kafka::broker_config:
log.dirs: "%{alias('kafka::logdirs')}"
num.recovery.threads.per.data.dir: 10
kafka::clusters:
rocquencourt:
zookeeper::chroot: '/kafka/softwareheritage'
zookeeper::servers:
- esnode1.internal.softwareheritage.org
- esnode2.internal.softwareheritage.org
- esnode3.internal.softwareheritage.org
brokers:
esnode1.internal.softwareheritage.org:
id: 11
esnode2.internal.softwareheritage.org:
id: 12
esnode3.internal.softwareheritage.org:
id: 13
broker::heap_opts: "-Xmx4G -Xms4G"
+ tls: false
azure:
zookeeper::chroot: '/kafka/softwareheritage'
zookeeper::servers:
- kafka01.euwest.azure.internal.softwareheritage.org
- kafka02.euwest.azure.internal.softwareheritage.org
- kafka03.euwest.azure.internal.softwareheritage.org
- kafka04.euwest.azure.internal.softwareheritage.org
- kafka05.euwest.azure.internal.softwareheritage.org
- kafka06.euwest.azure.internal.softwareheritage.org
brokers:
kafka01.euwest.azure.internal.softwareheritage.org:
id: 1
kafka02.euwest.azure.internal.softwareheritage.org:
id: 2
kafka03.euwest.azure.internal.softwareheritage.org:
id: 3
kafka04.euwest.azure.internal.softwareheritage.org:
id: 4
kafka05.euwest.azure.internal.softwareheritage.org:
id: 5
kafka06.euwest.azure.internal.softwareheritage.org:
id: 6
broker::heap_opts: "-Xmx1G -Xms1G"
+ tls: true
# Real exported files from munin
stats_export::export_path: "/var/www/stats.export.softwareheritage.org"
stats_export::export_file: "%{hiera('stats_export::export_path')}/history_counters.json"
# Exposed through the following host's apache venv
stats_export::vhost::name: stats.export.softwareheritage.org
stats_export::vhost::docroot: "/var/www/%{hiera('stats_export::vhost::name')}"
stats_export::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
stats_export::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
stats_export::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
stats_export::vhost::hsts_header: "%{hiera('apache::hsts_header')}"
icinga2::role: agent
icinga2::master::zonename: master
icinga2::master::db::username: icinga2
# icinga2::master::db::password in private data
icinga2::master::db::database: icinga2
icinga2::icingaweb2::db::username: icingaweb2
# icinga2::icingaweb2::db::password in private data
icinga2::icingaweb2::db::database: icingaweb2
icinga2::icingaweb2::protected_customvars:
- "*pw*"
- "*pass*"
- community
- http_auth_pair
icinga2::icingaweb2::vhost::name: icinga.softwareheritage.org
icinga2::icingaweb2::vhost::aliases:
- icinga.internal.softwareheritage.org
icinga2::icingaweb2::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
icinga2::icingaweb2::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
icinga2::icingaweb2::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
icinga2::icingaweb2::vhost::hsts_header: "%{hiera('apache::hsts_header')}"
icinga2::parent_zone: master
icinga2::parent_endpoints:
pergamon.softwareheritage.org:
host: 192.168.100.29
icinga2::network: "%{lookup('internal_network')}"
icinga2::features:
- checker
- mainlog
icinga2::service_configuration:
load:
default:
load_wload1: 30
load_wload5: 28
load_wload15: 26
load_cload1: 50
load_cload5: 45
load_cload15: 40
high:
load_wload1: 140
load_wload5: 120
load_wload15: 100
load_cload1: 240
load_cload5: 220
load_cload15: 200
icinga2::host::vars:
os: Linux
cores: "%{::processorcount}"
virtual_machine: "%{::is_virtual}"
distro: "%{::operatingsystem}"
disks:
'disk /':
disk_partitions: '/'
icinga2::disk::excludes:
- ^/srv/containers/
- ^/var/lib/docker/overlay2/
- ^/run/schroot/
icinga2::apiusers:
root:
# password in private data
permissions:
- '*'
icinga2::exported_checks::filename: "/etc/icinga2/zones.d/%{hiera('icinga2::parent_zone')}/exported-checks.conf"
systemd_journal::logstash_hosts:
- 'logstash.internal.softwareheritage.org:5044'
memcached::server::bind: 127.0.0.1
memcached::server::port: 11211
memcached::server::max_memory: '5%'
mountpoints:
/srv/storage/space:
device: uffizi:/srv/storage/space
fstype: nfs
options:
- rw
- soft
- intr
- rsize=8192
- wsize=8192
- noauto
- x-systemd.automount
- x-systemd.device-timeout=10
/srv/softwareheritage/objects:
device: uffizi:/srv/softwareheritage/objects
fstype: nfs
options:
- rw
- soft
- intr
- rsize=8192
- wsize=8192
- noauto
- x-systemd.automount
- x-systemd.device-timeout=10
ceph::release: luminous
ceph::fsid: b3e34018-388e-499b-9579-d1c0d57e8c09
# needs to match the values of $::hostname on the ceph monitors
ceph::mon_initial_members:
- ceph-mon1
ceph::mon_host:
- 192.168.100.170
ceph::keys:
admin:
secret: "%{hiera('ceph::secrets::admin')}"
cap_mds: allow
cap_mgr: allow *
cap_mon: allow *
cap_osd: allow *
bootstrap-osd:
secret: "%{hiera('ceph::secrets::bootstrap_osd')}"
cap_mon: allow profile bootstrap-osd
proxmox-rbd:
secret: "%{hiera('ceph::secrets::proxmox_rbd')}"
cap_mon: profile rbd
cap_osd: profile rbd pool=rbd
swh-contents:
secret: "%{hiera('ceph::secrets::swh_contents')}"
cap_mon: allow r
cap_osd: allow r pool=swh_contents
swh-contents-rw:
secret: "%{hiera('ceph::secrets::swh_contents_rw')}"
cap_mon: allow r
cap_osd: allow rw pool=swh_contents
swh-contents-test:
secret: "%{hiera('ceph::secrets::swh_contents_test')}"
cap_mon: allow r
cap_osd: allow r pool=swh_contents_test
swh-contents-test-rw:
secret: "%{hiera('ceph::secrets::swh_contents_test_rw')}"
cap_mon: allow r
cap_osd: allow rw pool=swh_contents_test
ceph::default_client_keyring: /etc/softwareheritage/ceph-keyring
ceph::client_keyrings:
'/etc/softwareheritage/ceph-keyring':
owner: root
group: swhdev
mode: '0644'
keys:
- swh-contents
- swh-contents-test
swh::deploy::objstorage::ceph::keyring: "%{alias('ceph::default_client_keyring')}"
swh::deploy::objstorage::ceph::pool_name: swh_contents
swh::deploy::objstorage::ceph::rados_id: swh-contents
swh::deploy::objstorage::ceph::config:
cls: rados
args:
pool_name: "%{alias('swh::deploy::objstorage::ceph::pool_name')}"
rados_id: "%{alias('swh::deploy::objstorage::ceph::rados_id')}"
ceph_config:
keyring: "%{alias('swh::deploy::objstorage::ceph::keyring')}"
nginx::package_name: nginx-light
nginx::accept_mutex: 'off'
nginx::names_hash_bucket_size: 128
nginx::names_hash_max_size: 1024
nginx::worker_processes: "%{::processorcount}"
prometheus::server::defaults_config:
web:
enable_admin_api: true
storage:
tsdb:
retention: '1y'
prometheus::server::config::global:
scrape_interval: 1m
scrape_timeout: 45s
prometheus::server::listen_network: "%{lookup('internal_network')}"
prometheus::server::listen_port: 9090
prometheus::server::certname: pergamon.softwareheritage.org
swh::deploy::environment: production
prometheus::static_labels:
instance: "%{::swh_hostname.internal_fqdn}"
environment: "%{lookup('swh::deploy::environment')}"
prometheus::node::listen_network: "%{lookup('internal_network')}"
prometheus::node::listen_port: 9100
prometheus::node::textfile_directory: /var/lib/prometheus/node-exporter
prometheus::node::defaults_config:
collector:
diskstats:
ignored_devices: "^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$"
filesystem:
ignored_mount_points: "^/(sys|proc|dev|run|srv/softwareheritage/objects/[0-9a-f][0-9a-f])($|/)"
systemd: true
logind: true
loadavg: true
ntp: true
netstat: true
netdev:
ignored_devices: "^lo$"
textfile:
directory: "%{lookup('prometheus::node::textfile_directory')}"
prometheus::node::scripts::directory: /var/lib/prometheus/node-exporter-scripts
prometheus::node::scripts:
puppet-classes:
mode: cron
cron:
user: root
specification:
minute: fqdn_rand
apt:
mode: cron
cron:
user: root
specification:
minute: fqdn_rand
prometheus::statsd::listen_network: "%{lookup('internal_network')}"
prometheus::statsd::listen_port: 9102
prometheus::statsd::defaults_config: {}
prometheus::statsd::statsd_listen_tcp: 127.0.0.1:8125
prometheus::statsd::statsd_listen_udp: 127.0.0.1:8125
prometheus::statsd::mapping:
defaults:
timer_type: histogram
buckets:
- .005
- .01
- .025
- .05
- .1
- .25
- .5
- .75
- 1
- 2
- 5
- 10
- 15
- 30
- 45
- 60
- 120
- 300
- 600
- 900
- 1800
- 2700
- 3600
- 7200
prometheus::sql::listen_network: "%{lookup('internal_network')}"
prometheus::sql::listen_port: 9237
prometheus::sql::config_snippets:
- activity
- queries
- replication
- wal
prometheus::jmx::version: 0.11.0
prometheus::kafka::listen_network: "%{lookup('internal_network')}"
prometheus::kafka::listen_port: 7071
prometheus::kafka_consumer_group::listen_network: "%{lookup('internal_network')}"
prometheus::kafka_consumer_group::base_port: 9208
prometheus::rabbitmq::listen_network: "%{lookup('internal_network')}"
prometheus::rabbitmq::listen_port: 9419
# Include first, then skip
prometheus::rabbitmq::include_vhost: .*
prometheus::rabbitmq::skip_vhost: ^$
prometheus::rabbitmq::include_queues: .*
prometheus::rabbitmq::skip_queues: ^(.*\.pidbox|amq\.gen.*|.*\.tasks\.ping)$
prometheus::rabbitmq::rabbit_capabilities:
- bert
- no_sort
prometheus::rabbitmq::rabbit_exporters:
- exchange
- node
- queue
prometheus::rabbitmq::rabbit_timeout: 30
prometheus::rabbitmq::exclude_metrics: []
grafana::db::database: grafana
grafana::db::username: grafana
# grafana::db::password in private-data
grafana::backend::port: 3000
grafana::vhost::name: grafana.softwareheritage.org
grafana::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
grafana::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
grafana::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
grafana::vhost::hsts_header: "%{hiera('apache::hsts_header')}"
grafana::config:
app_mode: production
server:
root_url: "https://%{lookup('grafana::vhost::name')}/"
http_port: "%{alias('grafana::backend::port')}"
users:
allow_sign_up: false
auth.anonymous:
enabled: true
org_name: Software Heritage
org_role: Viewer
smtp:
enabled: true
skip_verify: true
from_address: grafana@softwareheritage.org
grafana::objects::organizations:
- name: Software Heritage
id: 1
grafana::objects::users: []
grafana::objects::datasources:
- name: Prometheus (Pergamon)
url: "http://pergamon.internal.softwareheritage.org:%{hiera('prometheus::server::listen_port')}"
type: prometheus
organization: 1
access_mode: proxy
is_default: true
java::distribution: jre
jenkins::backend::url: http://thyssen.internal.softwareheritage.org:8080/
jenkins::vhost::name: jenkins.softwareheritage.org
jenkins::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
jenkins::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
jenkins::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
jenkins::vhost::hsts_header: "%{hiera('apache::hsts_header')}"
jenkins::agent::jar_url: "https://%{hiera('jenkins::vhost::name')}/jnlpJars/agent.jar"
jenkins::agent::jnlp::url: "%{hiera('jenkins::backend::url')}computer/%{::swh_hostname.internal_fqdn}/slave-agent.jnlp"
# jenkins::agent::jnlp::token in private_data
weekly_report_bot::user: nobody
weekly_report_bot::cron:
minute: 0
hour: 12
weekday: fri
swh::postgres::service::users:
- root
- zack
- ardumont
swh::postgres::service::dbs:
- alias: swh
name: "%{hiera('swh::deploy::storage::db::dbname')}"
host: "%{hiera('swh::deploy::storage::db::host')}"
user: "%{hiera('swh::deploy::storage::db::user')}"
port: "%{hiera('swh::deploy::db::pgbouncer::port')}"
passwd: "%{hiera('swh::deploy::storage::db::password')}"
- alias: swh-deposit
name: "%{hiera('swh::deploy::deposit::db::dbname')}"
host: "%{hiera('swh::deploy::deposit::db::host')}"
user: "%{hiera('swh::deploy::deposit::db::dbuser')}"
port: "%{hiera('swh::deploy::db::pgbouncer::port')}"
passwd: "%{hiera('swh::deploy::deposit::db::password')}"
- alias: swh-scheduler
name: "%{hiera('swh::deploy::scheduler::db::dbname')}"
host: "%{hiera('swh::deploy::scheduler::db::host')}"
user: "%{hiera('swh::deploy::scheduler::db::user')}"
port: "%{hiera('swh::deploy::db::pgbouncer::port')}"
passwd: "%{hiera('swh::deploy::scheduler::db::password')}"
- alias: swh-vault
name: "%{hiera('swh::deploy::vault::db::dbname')}"
host: "%{hiera('swh::deploy::vault::db::host')}"
user: "%{hiera('swh::deploy::vault::db::user')}"
port: "%{hiera('swh::deploy::db::pgbouncer::port')}"
passwd: "%{hiera('swh::deploy::vault::db::password')}"
- alias: swh-lister
name: "%{hiera('swh::deploy::worker::lister::db::name')}"
host: "%{hiera('swh::deploy::worker::lister::db::host')}"
user: "%{hiera('swh::deploy::worker::lister::db::name')}"
port: "%{hiera('swh::deploy::db::pgbouncer::port')}"
passwd: "%{hiera('swh::deploy::lister::db::password')}"
- alias: swh-replica
name: "%{hiera('swh::deploy::storage::db::dbname')}"
host: somerset.internal.softwareheritage.org
user: "%{hiera('swh::deploy::db::pgbouncer::user::login')}"
port: "%{hiera('swh::deploy::db::pgbouncer::port')}"
passwd: "%{hiera('swh::deploy::storage::db::password')}"
- alias: swh-indexer
name: "%{hiera('swh::deploy::indexer::storage::db::dbname')}"
host: "%{hiera('swh::deploy::indexer::storage::db::host')}"
user: "%{hiera('swh::deploy::indexer::storage::db::user')}"
port: "%{hiera('swh::deploy::db::pgbouncer::port')}"
passwd: "%{hiera('swh::deploy::indexer::storage::db::password')}"
elastic::elk_version: '7.5.2'
kibana::listen_network: "%{lookup('internal_network')}"
kibana::server_name: "%{::swh_hostname.internal_fqdn}"
kibana::config:
server.name: "%{alias('kibana::server_name')}"
elasticsearch.hosts:
- http://esnode1.internal.softwareheritage.org:9200
- http://esnode2.internal.softwareheritage.org:9200
- http://esnode3.internal.softwareheritage.org:9200
kibana.index: .kibana-6
# sentry::secret_key in private-data
sentry::postgres::host: db.internal.softwareheritage.org
sentry::postgres::port: 5432
sentry::postgres::dbname: sentry
sentry::postgres::user: sentry
# sentry::postgres::password in private-data
sentry::kafka_cluster: rocquencourt
sentry::admin_email: sysop+sentry@softwareheritage.org
sentry::mail::host: "%{lookup('smtp::relay_hostname')}"
sentry::mail::from: sentry@softwareheritage.org
sentry::backend::url: http://riverside.internal.softwareheritage.org:9000/
sentry::vhost::name: sentry.softwareheritage.org
sentry::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
sentry::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
sentry::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
sentry::vhost::hsts_header: "%{hiera('apache::hsts_header')}"
keycloak::vhost::name: idp.softwareheritage.org
keycloak::vhost::ssl_protocol: "%{hiera('apache::ssl_protocol')}"
keycloak::vhost::ssl_honorcipherorder: "%{hiera('apache::ssl_honorcipherorder')}"
keycloak::vhost::ssl_cipher: "%{hiera('apache::ssl_cipher')}"
keycloak::vhost::hsts_header: "%{hiera('apache::hsts_header')}"
keycloak::backend::port: 8080
keycloak::backend::url: "http://kelvingrove.internal.softwareheritage.org:%{lookup('keycloak::backend::port')}/"
keycloak::admin::user: keycloak-admin
# keycloak::admin::password in private-data
keycloak::postgres::host: db.internal.softwareheritage.org
keycloak::postgres::port: 5432
keycloak::postgres::dbname: keycloak
keycloak::postgres::user: keycloak
# keycloak::postgres::password in private-data
cassandra::release: 311x
cassandra::cluster: azure
cassandra::listen_network: "%{lookup('internal_network')}"
cassandra::baseline_settings:
# NOTE:
# See http://wiki.apache.org/cassandra/StorageConfiguration for
# full explanations of configuration directives
# /NOTE
# This defines the number of tokens randomly assigned to this node on the ring
# The more tokens, relative to other nodes, the larger the proportion of data
# that this node will store. You probably want all nodes to have the same number
# of tokens assuming they have equal hardware capability.
#
# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
# and will use the initial_token as described below.
#
# Specifying initial_token will override this setting on the node's initial start,
# on subsequent starts, this setting will apply even if initial token is set.
#
# If you already have a cluster with 1 token per node, and wish to migrate to
# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
num_tokens: 256
# Triggers automatic allocation of num_tokens tokens for this node. The allocation
# algorithm attempts to choose tokens in a way that optimizes replicated load over
# the nodes in the datacenter for the replication strategy used by the specified
# keyspace.
#
# The load assigned to each node will be close to proportional to its number of
# vnodes.
#
# Only supported with the Murmur3Partitioner.
# allocate_tokens_for_keyspace: KEYSPACE
# initial_token allows you to specify tokens manually. While you can use it with
# vnodes (num_tokens > 1, above) -- in which case you should provide a
# comma-separated list -- it's primarily used when adding nodes to legacy clusters
# that do not have vnodes enabled.
# initial_token:
# See http://wiki.apache.org/cassandra/HintedHandoff
# May either be "true" or "false" to enable globally
hinted_handoff_enabled: true
# When hinted_handoff_enabled is true, a black list of data centers that will not
# perform hinted handoff
# hinted_handoff_disabled_datacenters:
# - DC1
# - DC2
# this defines the maximum amount of time a dead host will have hints
# generated. After it has been dead this long, new hints for it will not be
# created until it has been seen alive and gone down again.
max_hint_window_in_ms: 10800000 # 3 hours
# Maximum throttle in KBs per second, per delivery thread. This will be
# reduced proportionally to the number of nodes in the cluster. (If there
# are two nodes in the cluster, each delivery thread will use the maximum
# rate; if there are three, each will throttle to half of the maximum,
# since we expect two nodes to be delivering hints simultaneously.)
hinted_handoff_throttle_in_kb: 1024
# Number of threads with which to deliver hints;
# Consider increasing this number when you have multi-dc deployments, since
# cross-dc handoff tends to be slower
max_hints_delivery_threads: 2
# How often hints should be flushed from the internal buffers to disk.
# Will *not* trigger fsync.
hints_flush_period_in_ms: 10000
# Maximum size for a single hints file, in megabytes.
max_hints_file_size_in_mb: 128
# Compression to apply to the hint files. If omitted, hints files
# will be written uncompressed. LZ4, Snappy, and Deflate compressors
# are supported.
#hints_compression:
# - class_name: LZ4Compressor
# parameters:
# -
# Maximum throttle in KBs per second, total. This will be
# reduced proportionally to the number of nodes in the cluster.
batchlog_replay_throttle_in_kb: 1024
# Authentication backend, implementing IAuthenticator; used to identify users
# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
# PasswordAuthenticator}.
#
# - AllowAllAuthenticator performs no checks - set it to disable authentication.
# - PasswordAuthenticator relies on username/password pairs to authenticate
# users. It keeps usernames and hashed passwords in system_auth.roles table.
# Please increase system_auth keyspace replication factor if you use this authenticator.
# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
authenticator: AllowAllAuthenticator
# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
# CassandraAuthorizer}.
#
# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
# - CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please
# increase system_auth keyspace replication factor if you use this authorizer.
authorizer: AllowAllAuthorizer
# Part of the Authentication & Authorization backend, implementing IRoleManager; used
# to maintain grants and memberships between roles.
# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager,
# which stores role information in the system_auth keyspace. Most functions of the
# IRoleManager require an authenticated login, so unless the configured IAuthenticator
# actually implements authentication, most of this functionality will be unavailable.
#
# - CassandraRoleManager stores role data in the system_auth keyspace. Please
# increase system_auth keyspace replication factor if you use this role manager.
role_manager: CassandraRoleManager
# Validity period for roles cache (fetching granted roles can be an expensive
# operation depending on the role manager, CassandraRoleManager is one example)
# Granted roles are cached for authenticated sessions in AuthenticatedUser and
# after the period specified here, become eligible for (async) reload.
# Defaults to 2000, set to 0 to disable caching entirely.
# Will be disabled automatically for AllowAllAuthenticator.
roles_validity_in_ms: 2000
# Refresh interval for roles cache (if enabled).
# After this interval, cache entries become eligible for refresh. Upon next
# access, an async reload is scheduled and the old value returned until it
# completes. If roles_validity_in_ms is non-zero, then this must be
# also.
# Defaults to the same value as roles_validity_in_ms.
# roles_update_interval_in_ms: 2000
# Validity period for permissions cache (fetching permissions can be an
# expensive operation depending on the authorizer, CassandraAuthorizer is
# one example). Defaults to 2000, set to 0 to disable.
# Will be disabled automatically for AllowAllAuthorizer.
permissions_validity_in_ms: 2000
# Refresh interval for permissions cache (if enabled).
# After this interval, cache entries become eligible for refresh. Upon next
# access, an async reload is scheduled and the old value returned until it
# completes. If permissions_validity_in_ms is non-zero, then this must be
# also.
# Defaults to the same value as permissions_validity_in_ms.
# permissions_update_interval_in_ms: 2000
# Validity period for credentials cache. This cache is tightly coupled to
# the provided PasswordAuthenticator implementation of IAuthenticator. If
# another IAuthenticator implementation is configured, this cache will not
# be automatically used and so the following settings will have no effect.
# Please note, credentials are cached in their encrypted form, so while
# activating this cache may reduce the number of queries made to the
# underlying table, it may not bring a significant reduction in the
# latency of individual authentication attempts.
# Defaults to 2000, set to 0 to disable credentials caching.
credentials_validity_in_ms: 2000
# Refresh interval for credentials cache (if enabled).
# After this interval, cache entries become eligible for refresh. Upon next
# access, an async reload is scheduled and the old value returned until it
# completes. If credentials_validity_in_ms is non-zero, then this must be
# also.
# Defaults to the same value as credentials_validity_in_ms.
# credentials_update_interval_in_ms: 2000
# The partitioner is responsible for distributing groups of rows (by
# partition key) across nodes in the cluster. You should leave this
# alone for new clusters. The partitioner can NOT be changed without
# reloading all data, so when upgrading you should set this to the
# same partitioner you were already using.
#
# Besides Murmur3Partitioner, partitioners included for backwards
# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
# OrderPreservingPartitioner.
#
partitioner: org.apache.cassandra.dht.Murmur3Partitioner
# Enable / disable CDC functionality on a per-node basis. This modifies the logic used
# for write path allocation rejection (standard: never reject. cdc: reject Mutation
# containing a CDC-enabled table if at space limit in cdc_raw_directory).
cdc_enabled: false
# Policy for data disk failures:
#
# die
# shut down gossip and client transports and kill the JVM for any fs errors or
# single-sstable errors, so the node can be replaced.
#
# stop_paranoid
# shut down gossip and client transports even for single-sstable errors,
# kill the JVM for errors during startup.
#
# stop
# shut down gossip and client transports, leaving the node effectively dead, but
# can still be inspected via JMX, kill the JVM for errors during startup.
#
# best_effort
# stop using the failed disk and respond to requests based on
# remaining available sstables. This means you WILL see obsolete
# data at CL.ONE!
#
# ignore
# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
disk_failure_policy: stop
# Policy for commit disk failures:
#
# die
# shut down gossip and Thrift and kill the JVM, so the node can be replaced.
#
# stop
# shut down gossip and Thrift, leaving the node effectively dead, but
# can still be inspected via JMX.
#
# stop_commit
# shutdown the commit log, letting writes collect but
# continuing to service reads, as in pre-2.0.5 Cassandra
#
# ignore
# ignore fatal errors and let the batches fail
commit_failure_policy: stop
# Maximum size of the native protocol prepared statement cache
#
# Valid values are either "auto" (omitting the value) or a value greater 0.
#
# Note that specifying a too large value will result in long running GCs and possbily
# out-of-memory errors. Keep the value at a small fraction of the heap.
#
# If you constantly see "prepared statements discarded in the last minute because
# cache limit reached" messages, the first step is to investigate the root cause
# of these messages and check whether prepared statements are used correctly -
# i.e. use bind markers for variable parts.
#
# Do only change the default value, if you really have more prepared statements than
# fit in the cache. In most cases it is not neccessary to change this value.
# Constantly re-preparing statements is a performance penalty.
#
# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
#prepared_statements_cache_size_mb:
# Maximum size of the Thrift prepared statement cache
#
# If you do not use Thrift at all, it is safe to leave this value at "auto".
#
# See description of 'prepared_statements_cache_size_mb' above for more information.
#
# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
#thrift_prepared_statements_cache_size_mb:
# Maximum size of the key cache in memory.
#
# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
# minimum, sometimes more. The key cache is fairly tiny for the amount of
# time it saves, so it's worthwhile to use it at large numbers.
# The row cache saves even more time, but must contain the entire row,
# so it is extremely space-intensive. It's best to only use the
# row cache if you have hot rows or static rows.
#
# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
#
# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
#key_cache_size_in_mb:
# Duration in seconds after which Cassandra should
# save the key cache. Caches are saved to saved_caches_directory as
# specified in this configuration file.
#
# Saved caches greatly improve cold-start speeds, and is relatively cheap in
# terms of I/O for the key cache. Row cache saving is much more expensive and
# has limited use.
#
# Default is 14400 or 4 hours.
key_cache_save_period: 14400
# Number of keys from the key cache to save
# Disabled by default, meaning all keys are going to be saved
# key_cache_keys_to_save: 100
# Row cache implementation class name. Available implementations:
#
# org.apache.cassandra.cache.OHCProvider
# Fully off-heap row cache implementation (default).
#
# org.apache.cassandra.cache.SerializingCacheProvider
# This is the row cache implementation availabile
# in previous releases of Cassandra.
# row_cache_class_name: org.apache.cassandra.cache.OHCProvider
# Maximum size of the row cache in memory.
# Please note that OHC cache implementation requires some additional off-heap memory to manage
# the map structures and some in-flight memory during operations before/after cache entries can be
# accounted against the cache capacity. This overhead is usually small compared to the whole capacity.
# Do not specify more memory that the system can afford in the worst usual situation and leave some
# headroom for OS block level cache. Do never allow your system to swap.
#
# Default value is 0, to disable row caching.
row_cache_size_in_mb: 0
# Duration in seconds after which Cassandra should save the row cache.
# Caches are saved to saved_caches_directory as specified in this configuration file.
#
# Saved caches greatly improve cold-start speeds, and is relatively cheap in
# terms of I/O for the key cache. Row cache saving is much more expensive and
# has limited use.
#
# Default is 0 to disable saving the row cache.
row_cache_save_period: 0
# Number of keys from the row cache to save.
# Specify 0 (which is the default), meaning all keys are going to be saved
# row_cache_keys_to_save: 100
# Maximum size of the counter cache in memory.
#
# Counter cache helps to reduce counter locks' contention for hot counter cells.
# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before
# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration
# of the lock hold, helping with hot counter cell updates, but will not allow skipping
# the read entirely. Only the local (clock, count) tuple of a counter cell is kept
# in memory, not the whole counter, so it's relatively cheap.
#
# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
#
# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
#counter_cache_size_in_mb:
# Duration in seconds after which Cassandra should
# save the counter cache (keys only). Caches are saved to saved_caches_directory as
# specified in this configuration file.
#
# Default is 7200 or 2 hours.
counter_cache_save_period: 7200
# Number of keys from the counter cache to save
# Disabled by default, meaning all keys are going to be saved
# counter_cache_keys_to_save: 100
# commitlog_sync may be either "periodic" or "batch."
#
# When in batch mode, Cassandra won't ack writes until the commit log
# has been fsynced to disk. It will wait
# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
# This window should be kept short because the writer threads will
# be unable to do extra work while waiting. (You may need to increase
# concurrent_writes for the same reason.)
#
# commitlog_sync: batch
# commitlog_sync_batch_window_in_ms: 2
#
# the other option is "periodic" where writes may be acked immediately
# and the CommitLog is simply synced every commitlog_sync_period_in_ms
# milliseconds.
commitlog_sync: periodic
commitlog_sync_period_in_ms: 10000
# The size of the individual commitlog file segments. A commitlog
# segment may be archived, deleted, or recycled once all the data
# in it (potentially from each columnfamily in the system) has been
# flushed to sstables.
#
# The default size is 32, which is almost always fine, but if you are
# archiving commitlog segments (see commitlog_archiving.properties),
# then you probably want a finer granularity of archiving; 8 or 16 MB
# is reasonable.
# Max mutation size is also configurable via max_mutation_size_in_kb setting in
# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
# This should be positive and less than 2048.
#
# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must
# be set to at least twice the size of max_mutation_size_in_kb / 1024
#
commitlog_segment_size_in_mb: 32
# Compression to apply to the commit log. If omitted, the commit log
# will be written uncompressed. LZ4, Snappy, and Deflate compressors
# are supported.
# commitlog_compression:
# - class_name: LZ4Compressor
# parameters:
# -
# For workloads with more data than can fit in memory, Cassandra's
# bottleneck will be reads that need to fetch data from
# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
# order to allow the operations to enqueue low enough in the stack
# that the OS and drives can reorder them. Same applies to
# "concurrent_counter_writes", since counter writes read the current
# values before incrementing and writing them back.
#
# On the other hand, since writes are almost never IO bound, the ideal
# number of "concurrent_writes" is dependent on the number of cores in
# your system; (8 * number_of_cores) is a good rule of thumb.
concurrent_reads: 64
concurrent_writes: 96
concurrent_counter_writes: 64
# For materialized view writes, as there is a read involved, so this should
# be limited by the less of concurrent reads or concurrent writes.
concurrent_materialized_view_writes: 32
# Maximum memory to use for sstable chunk cache and buffer pooling.
# 32MB of this are reserved for pooling buffers, the rest is used as an
# cache that holds uncompressed sstable chunks.
# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap,
# so is in addition to the memory allocated for heap. The cache also has on-heap
# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size
# if the default 64k chunk size is used).
# Memory is only allocated when needed.
# file_cache_size_in_mb: 512
# Flag indicating whether to allocate on or off heap when the sstable buffer
# pool is exhausted, that is when it has exceeded the maximum memory
# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.
# buffer_pool_use_heap_if_exhausted: true
# The strategy for optimizing disk read
# Possible values are:
# ssd (for solid state disks, the default)
# spinning (for spinning disks)
# disk_optimization_strategy: ssd
# Total permitted memory to use for memtables. Cassandra will stop
# accepting writes when the limit is exceeded until a flush completes,
# and will trigger a flush based on memtable_cleanup_threshold
# If omitted, Cassandra will set both to 1/4 the size of the heap.
# memtable_heap_space_in_mb: 2048
# memtable_offheap_space_in_mb: 2048
# memtable_cleanup_threshold is deprecated. The default calculation
# is the only reasonable choice. See the comments on memtable_flush_writers
# for more information.
#
# Ratio of occupied non-flushing memtable size to total permitted size
# that will trigger a flush of the largest memtable. Larger mct will
# mean larger flushes and hence less compaction, but also less concurrent
# flush activity which can make it difficult to keep your disks fed
# under heavy write load.
#
# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)
# memtable_cleanup_threshold: 0.11
# Specify the way Cassandra allocates and manages memtable memory.
# Options are:
#
# heap_buffers
# on heap nio buffers
#
# offheap_buffers
# off heap (direct) nio buffers
#
# offheap_objects
# off heap objects
memtable_allocation_type: heap_buffers
# Total space to use for commit logs on disk.
#
# If space gets above this value, Cassandra will flush every dirty CF
# in the oldest segment and remove it. So a small total commitlog space
# will tend to cause more flush activity on less-active columnfamilies.
#
# The default value is the smaller of 8192, and 1/4 of the total space
# of the commitlog volume.
#
# commitlog_total_space_in_mb: 8192
# This sets the number of memtable flush writer threads per disk
# as well as the total number of memtables that can be flushed concurrently.
# These are generally a combination of compute and IO bound.
#
# Memtable flushing is more CPU efficient than memtable ingest and a single thread
# can keep up with the ingest rate of a whole server on a single fast disk
# until it temporarily becomes IO bound under contention typically with compaction.
# At that point you need multiple flush threads. At some point in the future
# it may become CPU bound all the time.
#
# You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation
# metric which should be 0, but will be non-zero if threads are blocked waiting on flushing
# to free memory.
#
# memtable_flush_writers defaults to two for a single data directory.
# This means that two memtables can be flushed concurrently to the single data directory.
# If you have multiple data directories the default is one memtable flushing at a time
# but the flush will use a thread per data directory so you will get two or more writers.
#
# Two is generally enough to flush on a fast disk [array] mounted as a single data directory.
# Adding more flush writers will result in smaller more frequent flushes that introduce more
# compaction overhead.
#
# There is a direct tradeoff between number of memtables that can be flushed concurrently
# and flush size and frequency. More is not better you just need enough flush writers
# to never stall waiting for flushing to free memory.
#
#memtable_flush_writers: 2
# Total space to use for change-data-capture logs on disk.
#
# If space gets above this value, Cassandra will throw WriteTimeoutException
# on Mutations including tables with CDC enabled. A CDCCompactor is responsible
# for parsing the raw CDC logs and deleting them when parsing is completed.
#
# The default value is the min of 4096 mb and 1/8th of the total space
# of the drive where cdc_raw_directory resides.
# cdc_total_space_in_mb: 4096
# When we hit our cdc_raw limit and the CDCCompactor is either running behind
# or experiencing backpressure, we check at the following interval to see if any
# new space for cdc-tracked tables has been made available. Default to 250ms
# cdc_free_space_check_interval_ms: 250
# A fixed memory pool size in MB for for SSTable index summaries. If left
# empty, this will default to 5% of the heap size. If the memory usage of
# all index summaries exceeds this limit, SSTables with low read rates will
# shrink their index summaries in order to meet this limit. However, this
# is a best-effort process. In extreme conditions Cassandra may need to use
# more than this amount of memory.
#index_summary_capacity_in_mb:
# How frequently index summaries should be resampled. This is done
# periodically to redistribute memory from the fixed-size pool to sstables
# proportional their recent read rates. Setting to -1 will disable this
# process, leaving existing index summaries at their current sampling level.
index_summary_resize_interval_in_minutes: 60
# Whether to, when doing sequential writing, fsync() at intervals in
# order to force the operating system to flush the dirty
# buffers. Enable this to avoid sudden dirty buffer flushing from
# impacting read latencies. Almost always a good idea on SSDs; not
# necessarily on platters.
trickle_fsync: true
trickle_fsync_interval_in_kb: 10240
# TCP port, for commands and data
# For security reasons, you should not expose this port to the internet. Firewall it if needed.
storage_port: 7000
# SSL port, for encrypted communication. Unused unless enabled in
# encryption_options
# For security reasons, you should not expose this port to the internet. Firewall it if needed.
ssl_storage_port: 7001
# Set listen_address OR listen_interface, not both. Interfaces must correspond
# to a single address, IP aliasing is not supported.
# listen_interface: eth0
# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
# listen_interface_prefer_ipv6: false
# Address to broadcast to other Cassandra nodes
# Leaving this blank will set it to the same value as listen_address
# broadcast_address: 1.2.3.4
# When using multiple physical network interfaces, set this
# to true to listen on broadcast_address in addition to
# the listen_address, allowing nodes to communicate in both
# interfaces.
# Ignore this property if the network configuration automatically
# routes between the public and private networks such as EC2.
# listen_on_broadcast_address: false
# Internode authentication backend, implementing IInternodeAuthenticator;
# used to allow/disallow connections from peer nodes.
# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
# Whether to start the native transport server.
# Please note that the address on which the native transport is bound is the
# same as the rpc_address. The port however is different and specified below.
start_native_transport: true
# port for the CQL native transport to listen for clients on
# For security reasons, you should not expose this port to the internet. Firewall it if needed.
native_transport_port: 9042
# Enabling native transport encryption in client_encryption_options allows you to either use
# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
# standard native_transport_port.
# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
# for native_transport_port. Setting native_transport_port_ssl to a different value
# from native_transport_port will use encryption for native_transport_port_ssl while
# keeping native_transport_port unencrypted.
# native_transport_port_ssl: 9142
# The maximum threads for handling requests when the native transport is used.
# This is similar to rpc_max_threads though the default differs slightly (and
# there is no native_transport_min_threads, idle threads will always be stopped
# after 30 seconds).
# native_transport_max_threads: 128
#
# The maximum size of allowed frame. Frame (requests) larger than this will
# be rejected as invalid. The default is 256MB. If you're changing this parameter,
# you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048.
# native_transport_max_frame_size_in_mb: 256
# The maximum number of concurrent client connections.
# The default is -1, which means unlimited.
# native_transport_max_concurrent_connections: -1
# The maximum number of concurrent client connections per source ip.
# The default is -1, which means unlimited.
# native_transport_max_concurrent_connections_per_ip: -1
# Whether to start the thrift rpc server.
start_rpc: false
# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
# to a single address, IP aliasing is not supported.
# rpc_interface: eth1
# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
# rpc_interface_prefer_ipv6: false
# port for Thrift to listen for clients on
rpc_port: 9160
# RPC address to broadcast to drivers and other Cassandra nodes. This cannot
# be set to 0.0.0.0. If left blank, this will be set to the value of
# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
# be set.
# broadcast_rpc_address: 1.2.3.4
# enable or disable keepalive on rpc/native connections
rpc_keepalive: true
# Cassandra provides two out-of-the-box options for the RPC Server:
#
# sync
# One thread per thrift connection. For a very large number of clients, memory
# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
# per thread, and that will correspond to your use of virtual memory (but physical memory
# may be limited depending on use of stack space).
#
# hsha
# Stands for "half synchronous, half asynchronous." All thrift clients are handled
# asynchronously using a small number of threads that does not vary with the amount
# of thrift clients (and thus scales well to many clients). The rpc requests are still
# synchronous (one thread per active request). If hsha is selected then it is essential
# that rpc_max_threads is changed from the default value of unlimited.
#
# The default is sync because on Windows hsha is about 30% slower. On Linux,
# sync/hsha performance is about the same, with hsha of course using less memory.
#
# Alternatively, can provide your own RPC server by providing the fully-qualified class name
# of an o.a.c.t.TServerFactory that can create an instance of it.
rpc_server_type: sync
# Uncomment rpc_min|max_thread to set request pool size limits.
#
# Regardless of your choice of RPC server (see above), the number of maximum requests in the
# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
# RPC server, it also dictates the number of clients that can be connected at all).
#
# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
#
# rpc_min_threads: 16
# rpc_max_threads: 2048
# uncomment to set socket buffer sizes on rpc connections
# rpc_send_buff_size_in_bytes:
# rpc_recv_buff_size_in_bytes:
# Uncomment to set socket buffer size for internode communication
# Note that when setting this, the buffer size is limited by net.core.wmem_max
# and when not setting it it is defined by net.ipv4.tcp_wmem
# See also:
# /proc/sys/net/core/wmem_max
# /proc/sys/net/core/rmem_max
# /proc/sys/net/ipv4/tcp_wmem
# /proc/sys/net/ipv4/tcp_wmem
# and 'man tcp'
# internode_send_buff_size_in_bytes:
# Uncomment to set socket buffer size for internode communication
# Note that when setting this, the buffer size is limited by net.core.wmem_max
# and when not setting it it is defined by net.ipv4.tcp_wmem
# internode_recv_buff_size_in_bytes:
# Frame size for thrift (maximum message length).
thrift_framed_transport_size_in_mb: 15
# Set to true to have Cassandra create a hard link to each sstable
# flushed or streamed locally in a backups/ subdirectory of the
# keyspace data. Removing these links is the operator's
# responsibility.
incremental_backups: false
# Whether or not to take a snapshot before each compaction. Be
# careful using this option, since Cassandra won't clean up the
# snapshots for you. Mostly useful if you're paranoid when there
# is a data format change.
snapshot_before_compaction: false
# Whether or not a snapshot is taken of the data before keyspace truncation
# or dropping of column families. The STRONGLY advised default of true
# should be used to provide data safety. If you set this flag to false, you will
# lose data on truncation or drop.
auto_snapshot: true
# Granularity of the collation index of rows within a partition.
# Increase if your rows are large, or if you have a very large
# number of rows per partition. The competing goals are these:
#
# - a smaller granularity means more index entries are generated
# and looking up rows withing the partition by collation column
# is faster
# - but, Cassandra will keep the collation index in memory for hot
# rows (as part of the key cache), so a larger granularity means
# you can cache more hot rows
column_index_size_in_kb: 64
# Per sstable indexed key cache entries (the collation index in memory
# mentioned above) exceeding this size will not be held on heap.
# This means that only partition information is held on heap and the
# index entries are read from disk.
#
# Note that this size refers to the size of the
# serialized index information and not the size of the partition.
column_index_cache_size_in_kb: 2
# Number of simultaneous compactions to allow, NOT including
# validation "compactions" for anti-entropy repair. Simultaneous
# compactions can help preserve read performance in a mixed read/write
# workload, by mitigating the tendency of small sstables to accumulate
# during a single long running compactions. The default is usually
# fine and if you experience problems with compaction running too
# slowly or too fast, you should look at
# compaction_throughput_mb_per_sec first.
#
# concurrent_compactors defaults to the smaller of (number of disks,
# number of cores), with a minimum of 2 and a maximum of 8.
#
# If your data directories are backed by SSD, you should increase this
# to the number of cores.
#concurrent_compactors: 1
# Throttles compaction to the given total throughput across the entire
# system. The faster you insert data, the faster you need to compact in
# order to keep the sstable count down, but in general, setting this to
# 16 to 32 times the rate you are inserting data is more than sufficient.
# Setting this to 0 disables throttling. Note that this account for all types
# of compaction, including validation compaction.
compaction_throughput_mb_per_sec: 16
# When compacting, the replacement sstable(s) can be opened before they
# are completely written, and used in place of the prior sstables for
# any range that has been written. This helps to smoothly transfer reads
# between the sstables, reducing page cache churn and keeping hot rows hot
sstable_preemptive_open_interval_in_mb: 50
# Throttles all outbound streaming file transfers on this node to the
# given total throughput in Mbps. This is necessary because Cassandra does
# mostly sequential IO when streaming data during bootstrap or repair, which
# can lead to saturating the network connection and degrading rpc performance.
# When unset, the default is 200 Mbps or 25 MB/s.
# stream_throughput_outbound_megabits_per_sec: 200
# Throttles all streaming file transfer between the datacenters,
# this setting allows users to throttle inter dc stream throughput in addition
# to throttling all network stream traffic as configured with
# stream_throughput_outbound_megabits_per_sec
# When unset, the default is 200 Mbps or 25 MB/s
# inter_dc_stream_throughput_outbound_megabits_per_sec: 200
# How long the coordinator should wait for read operations to complete
read_request_timeout_in_ms: 5000
# How long the coordinator should wait for seq or index scans to complete
range_request_timeout_in_ms: 10000
# How long the coordinator should wait for writes to complete
write_request_timeout_in_ms: 2000
# How long the coordinator should wait for counter writes to complete
counter_write_request_timeout_in_ms: 5000
# How long a coordinator should continue to retry a CAS operation
# that contends with other proposals for the same row
cas_contention_timeout_in_ms: 1000
# How long the coordinator should wait for truncates to complete
# (This can be much longer, because unless auto_snapshot is disabled
# we need to flush first so we can snapshot before removing the data.)
truncate_request_timeout_in_ms: 60000
# The default timeout for other, miscellaneous operations
request_timeout_in_ms: 10000
# How long before a node logs slow queries. Select queries that take longer than
# this timeout to execute, will generate an aggregated log message, so that slow queries
# can be identified. Set this value to zero to disable slow query logging.
slow_query_log_timeout_in_ms: 500
# Enable operation timeout information exchange between nodes to accurately
# measure request timeouts. If disabled, replicas will assume that requests
# were forwarded to them instantly by the coordinator, which means that
# under overload conditions we will waste that much extra time processing
# already-timed-out requests.
#
# Warning: before enabling this property make sure to ntp is installed
# and the times are synchronized between the nodes.
cross_node_timeout: false
# Set keep-alive period for streaming
# This node will send a keep-alive message periodically with this period.
# If the node does not receive a keep-alive message from the peer for
# 2 keep-alive cycles the stream session times out and fail
# Default value is 300s (5 minutes), which means stalled stream
# times out in 10 minutes by default
# streaming_keep_alive_period_in_secs: 300
# phi value that must be reached for a host to be marked down.
# most users should never need to adjust this.
# phi_convict_threshold: 8
# endpoint_snitch -- Set this to a class that implements
# IEndpointSnitch. The snitch has two functions:
#
# - it teaches Cassandra enough about your network topology to route
# requests efficiently
# - it allows Cassandra to spread replicas around your cluster to avoid
# correlated failures. It does this by grouping machines into
# "datacenters" and "racks." Cassandra will do its best not to have
# more than one replica on the same "rack" (which may not actually
# be a physical location)
#
# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH
# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss.
# This means that if you start with the default SimpleSnitch, which
# locates every node on "rack1" in "datacenter1", your only options
# if you need to add another datacenter are GossipingPropertyFileSnitch
# (and the older PFS). From there, if you want to migrate to an
# incompatible snitch like Ec2Snitch you can do it by adding new nodes
# under Ec2Snitch (which will locate them in a new "datacenter") and
# decommissioning the old ones.
#
# Out of the box, Cassandra provides:
#
# SimpleSnitch:
# Treats Strategy order as proximity. This can improve cache
# locality when disabling read repair. Only appropriate for
# single-datacenter deployments.
#
# GossipingPropertyFileSnitch
# This should be your go-to snitch for production use. The rack
# and datacenter for the local node are defined in
# cassandra-rackdc.properties and propagated to other nodes via
# gossip. If cassandra-topology.properties exists, it is used as a
# fallback, allowing migration from the PropertyFileSnitch.
#
# PropertyFileSnitch:
# Proximity is determined by rack and data center, which are
# explicitly configured in cassandra-topology.properties.
#
# Ec2Snitch:
# Appropriate for EC2 deployments in a single Region. Loads Region
# and Availability Zone information from the EC2 API. The Region is
# treated as the datacenter, and the Availability Zone as the rack.
# Only private IPs are used, so this will not work across multiple
# Regions.
#
# Ec2MultiRegionSnitch:
# Uses public IPs as broadcast_address to allow cross-region
# connectivity. (Thus, you should set seed addresses to the public
# IP as well.) You will need to open the storage_port or
# ssl_storage_port on the public IP firewall. (For intra-Region
# traffic, Cassandra will switch to the private IP after
# establishing a connection.)
#
# RackInferringSnitch:
# Proximity is determined by rack and data center, which are
# assumed to correspond to the 3rd and 2nd octet of each node's IP
# address, respectively. Unless this happens to match your
# deployment conventions, this is best used as an example of
# writing a custom Snitch class and is provided in that spirit.
#
# You can use a custom Snitch by setting this to the full class name
# of the snitch, which will be assumed to be on your classpath.
endpoint_snitch: SimpleSnitch
# controls how often to perform the more expensive part of host score
# calculation
dynamic_snitch_update_interval_in_ms: 100
# controls how often to reset all host scores, allowing a bad host to
# possibly recover
dynamic_snitch_reset_interval_in_ms: 600000
# if set greater than zero and read_repair_chance is < 1.0, this will allow
# 'pinning' of replicas to hosts in order to increase cache capacity.
# The badness threshold will control how much worse the pinned host has to be
# before the dynamic snitch will prefer other replicas over it. This is
# expressed as a double which represents a percentage. Thus, a value of
# 0.2 means Cassandra would continue to prefer the static snitch values
# until the pinned host was 20% worse than the fastest.
dynamic_snitch_badness_threshold: 0.1
# request_scheduler -- Set this to a class that implements
# RequestScheduler, which will schedule incoming client requests
# according to the specific policy. This is useful for multi-tenancy
# with a single Cassandra cluster.
# NOTE: This is specifically for requests from the client and does
# not affect inter node communication.
# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
# client requests to a node with a separate queue for each
# request_scheduler_id. The scheduler is further customized by
# request_scheduler_options as described below.
request_scheduler: org.apache.cassandra.scheduler.NoScheduler
# Scheduler Options vary based on the type of scheduler
#
# NoScheduler
# Has no options
#
# RoundRobin
# throttle_limit
# The throttle_limit is the number of in-flight
# requests per client. Requests beyond
# that limit are queued up until
# running requests can complete.
# The value of 80 here is twice the number of
# concurrent_reads + concurrent_writes.
# default_weight
# default_weight is optional and allows for
# overriding the default which is 1.
# weights
# Weights are optional and will default to 1 or the
# overridden default_weight. The weight translates into how
# many requests are handled during each turn of the
# RoundRobin, based on the scheduler id.
#
# request_scheduler_options:
# throttle_limit: 80
# default_weight: 5
# weights:
# Keyspace1: 1
# Keyspace2: 5
# request_scheduler_id -- An identifier based on which to perform
# the request scheduling. Currently the only valid option is keyspace.
# request_scheduler_id: keyspace
# Enable or disable inter-node encryption
# JVM defaults for supported SSL socket protocols and cipher suites can
# be replaced using custom encryption options. This is not recommended
# unless you have policies in place that dictate certain settings, or
# need to disable vulnerable ciphers or protocols in case the JVM cannot
# be updated.
# FIPS compliant settings can be configured at JVM level and should not
# involve changing encryption settings here:
# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html
# *NOTE* No custom encryption options are enabled at the moment
# The available internode options are : all, none, dc, rack
#
# If set to dc cassandra will encrypt the traffic between the DCs
# If set to rack cassandra will encrypt the traffic between the racks
#
# The passwords used in these options must match the passwords used when generating
# the keystore and truststore. For instructions on generating these files, see:
# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
#
server_encryption_options:
internode_encryption: none
keystore: conf/.keystore
keystore_password: cassandra
truststore: conf/.truststore
truststore_password: cassandra
# More advanced defaults below:
# protocol: TLS
# algorithm: SunX509
# store_type: JKS
# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
# require_client_auth: false
# require_endpoint_verification: false
# enable or disable client/server encryption.
client_encryption_options:
enabled: false
# If enabled and optional is set to true encrypted and unencrypted connections are handled.
optional: false
keystore: conf/.keystore
keystore_password: cassandra
# require_client_auth: false
# Set trustore and truststore_password if require_client_auth is true
# truststore: conf/.truststore
# truststore_password: cassandra
# More advanced defaults below:
# protocol: TLS
# algorithm: SunX509
# store_type: JKS
# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
# internode_compression controls whether traffic between nodes is
# compressed.
# Can be:
#
# all
# all traffic is compressed
#
# dc
# traffic between different datacenters is compressed
#
# none
# nothing is compressed.
internode_compression: dc
# Enable or disable tcp_nodelay for inter-dc communication.
# Disabling it will result in larger (but fewer) network packets being sent,
# reducing overhead from the TCP protocol itself, at the cost of increasing
# latency if you block for cross-datacenter responses.
inter_dc_tcp_nodelay: false
# TTL for different trace types used during logging of the repair process.
tracetype_query_ttl: 86400
tracetype_repair_ttl: 604800
# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
# This threshold can be adjusted to minimize logging if necessary
# gc_log_threshold_in_ms: 200
# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at
# INFO level
# UDFs (user defined functions) are disabled by default.
# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.
enable_user_defined_functions: true
# Enables scripted UDFs (JavaScript UDFs).
# Java UDFs are always enabled, if enable_user_defined_functions is true.
# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider.
# This option has no effect, if enable_user_defined_functions is false.
enable_scripted_user_defined_functions: false
# Enables materialized view creation on this node.
# Materialized views are considered experimental and are not recommended for production use.
enable_materialized_views: true
# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation.
# Lowering this value on Windows can provide much tighter latency and better throughput, however
# some virtualized environments may see a negative performance impact from changing this setting
# below their system default. The sysinternals 'clockres' tool can confirm your system's default
# setting.
windows_timer_interval: 1
# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from
# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by
# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys
# can still (and should!) be in the keystore and will be used on decrypt operations
# (to handle the case of key rotation).
#
# It is strongly recommended to download and install Java Cryptography Extension (JCE)
# Unlimited Strength Jurisdiction Policy Files for your version of the JDK.
# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)
#
# Currently, only the following file types are supported for transparent data encryption, although
# more are coming in future cassandra releases: commitlog, hints
transparent_data_encryption_options:
enabled: false
chunk_length_kb: 64
cipher: AES/CBC/PKCS5Padding
key_alias: testing:1
# CBC IV length for AES needs to be 16 bytes (which is also the default size)
# iv_length: 16
key_provider:
- class_name: org.apache.cassandra.security.JKSKeyProvider
parameters:
- keystore: conf/.keystore
keystore_password: cassandra
store_type: JCEKS
key_password: cassandra
#####################
# SAFETY THRESHOLDS #
#####################
# When executing a scan, within or across a partition, we need to keep the
# tombstones seen in memory so we can return them to the coordinator, which
# will use them to make sure other replicas also know about the deleted rows.
# With workloads that generate a lot of tombstones, this can cause performance
# problems and even exaust the server heap.
# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
# Adjust the thresholds here if you understand the dangers and want to
# scan more tombstones anyway. These thresholds may also be adjusted at runtime
# using the StorageService mbean.
tombstone_warn_threshold: 1000
tombstone_failure_threshold: 100000
# Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default.
# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
batch_size_warn_threshold_in_kb: 5
# Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.
batch_size_fail_threshold_in_kb: 50
# Log WARN on any batches not of type LOGGED than span across more partitions than this limit
unlogged_batch_across_partitions_warn_threshold: 10
# Log a warning when compacting partitions larger than this value
compaction_large_partition_warning_threshold_mb: 100
# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level
# Adjust the threshold based on your application throughput requirement
# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
gc_warn_threshold_in_ms: 1000
# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption
# early. Any value size larger than this threshold will result into marking an SSTable
# as corrupted. This should be positive and less than 2048.
# max_value_size_in_mb: 256
# Back-pressure settings #
# If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation
# sent to replicas, with the aim of reducing pressure on overloaded replicas.
back_pressure_enabled: false
# The back-pressure strategy applied.
# The default implementation, RateBasedBackPressure, takes three arguments:
# high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests.
# If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor;
# if above high ratio, the rate limiting is increased by the given factor;
# such factor is usually best configured between 1 and 10, use larger values for a faster recovery
# at the expense of potentially more dropped mutations;
# the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica,
# if SLOW at the speed of the slowest one.
# New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and
# provide a public constructor accepting a Map<String, Object>.
back_pressure_strategy:
- class_name: org.apache.cassandra.net.RateBasedBackPressure
parameters:
- high_ratio: 0.90
factor: 5
flow: FAST
# Coalescing Strategies #
# Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more).
# On bare metal, the floor for packet processing throughput is high enough that many applications won't notice, but in
# virtualized environments, the point at which an application can be bound by network packet processing can be
# surprisingly low compared to the throughput of task processing that is possible inside a VM. It's not that bare metal
# doesn't benefit from coalescing messages, it's that the number of packets a bare metal network interface can process
# is sufficient for many applications such that no load starvation is experienced even without coalescing.
# There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages
# per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one
# trip to read from a socket, and all the task submission work can be done at the same time reducing context switching
# and increasing cache friendliness of network message processing.
# See CASSANDRA-8692 for details.
# Strategy to use for coalescing messages in OutboundTcpConnection.
# Can be fixed, movingaverage, timehorizon, disabled (default).
# You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name.
# otc_coalescing_strategy: DISABLED
# How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first
# message is received before it will be sent with any accompanying messages. For moving average this is the
# maximum amount of time that will be waited as well as the interval at which messages must arrive on average
# for coalescing to be enabled.
# otc_coalescing_window_us: 200
# Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128.
# otc_coalescing_enough_coalesced_messages: 8
# How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection.
# Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory
# taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value
# will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU
# time and queue contention while iterating the backlog of messages.
# An interval of 0 disables any wait time, which is the behavior of former Cassandra versions.
#
# otc_backlog_expiration_interval_ms: 200
cassandra::clusters:
azure:
cluster_name: SWH on Azure
seed_provider:
- class_name: org.apache.cassandra.locator.SimpleSeedProvider
parameters:
- seeds: 192.168.200.27 # cassandra01.euwest.azure.internal.softwareheritage.org
borg::repository_user: borg
borg::repository_group: borg
borg::base_path: /srv/borg
borg::repository_path: "%{lookup('borg::base_path')}/repositories"
borg::repository_server: banco.internal.softwareheritage.org
borg::encryption: repokey-blake2
swh::deploy::search::sentry_swh_package: swh.search
swh::deploy::search::sentry_environment: "%{alias('swh::deploy::environment')}"
swh::deploy::search::conf_directory: "%{hiera('swh::conf_directory')}"
swh::deploy::search::conf_file: "%{hiera('swh::deploy::search::conf_directory')}/search.yml"
swh::deploy::search::user: swhstorage
swh::deploy::search::group: swhstorage
swh::deploy::search::config:
search:
cls: elasticsearch
args:
hosts: "%{alias('swh::elasticsearch::nodes')}"
swh::deploy::search::backend::listen::host: 127.0.0.1
swh::deploy::search::backend::listen::port: "%{alias('swh::remote_service::search::port')}"
swh::deploy::search::backend::workers: 4
swh::deploy::search::backend::reload_mercy: 3600
swh::deploy::search::backend::http_keepalive: 5
swh::deploy::search::backend::http_timeout: 3600
swh::deploy::search::backend::max_requests: 10000
swh::deploy::search::backend::max_requests_jitter: 1000
swh::deploy::search::backend::server_names:
- "%{::swh_hostname.internal_fqdn}"
- "%{::hostname}"
- 127.0.0.1
- localhost
- "::1"
diff --git a/site-modules/profile/manifests/kafka/broker.pp b/site-modules/profile/manifests/kafka/broker.pp
index cbfe5878..d380cd87 100644
--- a/site-modules/profile/manifests/kafka/broker.pp
+++ b/site-modules/profile/manifests/kafka/broker.pp
@@ -1,117 +1,139 @@
# Kafka broker profile
class profile::kafka::broker {
include ::profile::zookeeper
include ::profile::kafka
$base_kafka_config = lookup('kafka::broker_config', Hash)
$kafka_clusters = lookup('kafka::clusters', Hash)
$kafka_cluster = $kafka_clusters.filter |$cluster, $data| {
member($data['brokers'].keys(), $::swh_hostname['internal_fqdn'])
}.keys()[0]
$kafka_cluster_config = $kafka_clusters[$kafka_cluster]
$zookeeper_chroot = $kafka_cluster_config['zookeeper::chroot']
$zookeeper_servers = $kafka_cluster_config['zookeeper::servers']
$zookeeper_port = lookup('zookeeper::client_port', Integer)
$zookeeper_server_string = join(
$zookeeper_servers.map |$server| {"${server}:${zookeeper_port}"},
','
)
$zookeeper_connect_string = "${zookeeper_server_string}${zookeeper_chroot}"
$broker_id = $kafka_cluster_config['brokers'][$::swh_hostname['internal_fqdn']]['id']
$kafka_config = $base_kafka_config + {
'zookeeper.connect' => $zookeeper_connect_string,
'broker.id' => $broker_id
}
$heap_opts = $kafka_cluster_config['broker::heap_opts']
$kafka_logdirs = lookup('kafka::logdirs', Array)
$kafka_logdirs.each |$logdir| {
file {$logdir:
ensure => directory,
owner => 'kafka',
group => 'kafka',
mode => '0750',
} -> Service['kafka']
}
+ $do_tls = $kafka_cluster_config['tls']
+
+ if $do_tls {
+ include ::profile::letsencrypt::host_cert
+ $cert_paths = ::profile::letsencrypt::certificate_paths($trusted['certname'])
+ # $cert_paths['cert'], $cert_paths['chain'], $cert_paths['privkey']
+
+ $ks_password = fqdn_rand_string(16, '', lookup('kafka::broker::truststore_seed'))
+
+ $ks_location = '/etc/kafka/broker.ts'
+
+ java_ks {'kafka:broker':
+ ensure => latest,
+ certificate => $cert_paths['chain'],
+ private_key => $cert_paths['privkey'],
+ target => $ks_location,
+ password => $ks_password,
+ trustcacerts => true,
+ }
+ }
+
+
include ::profile::prometheus::jmx
$exporter = $::profile::prometheus::jmx::jar_path
$exporter_network = lookup('prometheus::kafka::listen_network', Optional[String], 'first', undef)
$exporter_address = lookup('prometheus::kafka::listen_address', Optional[String], 'first', undef)
$actual_exporter_address = pick($exporter_address, ip_for_network($exporter_network))
$exporter_port = lookup('prometheus::kafka::listen_port')
$target = "${actual_exporter_address}:${exporter_port}"
$exporter_config = "${::profile::prometheus::jmx::base_directory}/kafka.yml"
file {$exporter_config:
owner => 'root',
group => 'root',
mode => '0644',
source => 'puppet:///modules/profile/kafka/jmx_exporter.yml',
}
class {'::kafka::broker':
config => $kafka_config,
opts => "-javaagent:${exporter}=${exporter_port}:${exporter_config}",
limit_nofile => '65536',
heap_opts => $heap_opts,
require => [
File[$exporter],
File[$exporter_config],
],
}
::systemd::dropin_file {"kafka/restart.conf":
ensure => present,
unit => "kafka.service",
filename => 'restart.conf',
content => "[Service]\nRestart=on-failure\nRestartSec=5\n",
}
::systemd::dropin_file {"kafka/stop-timeout.conf":
ensure => present,
unit => "kafka.service",
filename => 'stop-timeout.conf',
content => "[Service]\nTimeoutStopSec=infinity\n",
}
::systemd::dropin_file {"kafka/exitcode.conf":
ensure => present,
unit => "kafka.service",
filename => 'exitcode.conf',
content => "[Service]\nSuccessExitStatus=143\n",
}
::profile::prometheus::export_scrape_config {'kafka':
target => $target,
labels => {
cluster => $kafka_cluster,
}
}
::profile::cron::d {'kafka-purge-logs':
command => 'find /var/log/kafka -type f -ctime +60 -exec rm {} \+',
target => 'kafka',
minute => 'fqdn_rand',
hour => 2,
}
::profile::cron::d {'kafka-zip-logs':
command => 'find /var/log/kafka -type f -not -name *.gz -a -ctime +1 -exec gzip {} \+',
target => 'kafka',
minute => 'fqdn_rand',
hour => 3,
}
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Wed, Jun 4, 7:13 PM (1 w, 10 h ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3313798
Attached To
rSPSITE puppet-swh-site
Event Timeline
Log In to Comment