diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5c239db..2a33ea1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,57 +1,60 @@ name: Test on: # Run CI on all pushes to the master and release/** branches, and on all new # pull requests, and on all pushes to pull requests (even if a pull request # is not against master). push: branches: - "master" - "release/**" pull_request: env: DOCKER_COMPOSE_VERSION: 1.24.1 defaults: run: shell: bash jobs: unit-test: - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 name: "unit tests" steps: - name: Checkout uses: actions/checkout@v2 - name: Unit Tests working-directory: install run: find ./ -type f -name "*-test.sh" -exec "./{}" \; integration-test: - runs-on: ubuntu-18.04 - name: "test" + runs-on: ubuntu-20.04 + name: "integration test" steps: - name: Pin docker-compose run: | sudo rm /usr/local/bin/docker-compose curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose chmod +x docker-compose sudo mv docker-compose /usr/local/bin - name: Checkout uses: actions/checkout@v2 - name: Integration Test run: | echo "Testing initial install" + # Create ./certificates here because install.sh will create it with root:root + # and then run.sh (-> setup.sh) won't be able to write to it. + mkdir certificates ./install.sh - ./test.sh + ./_integration-test/run.sh echo "Testing in-place upgrade" # Also test plugin installation here echo "sentry-auth-oidc" >> sentry/requirements.txt ./install.sh --minimize-downtime - ./test.sh + ./_integration-test/run.sh - name: Inspect failure if: failure() run: | docker-compose ps docker-compose logs diff --git a/.gitignore b/.gitignore index 8a16904..c8967cd 100644 --- a/.gitignore +++ b/.gitignore @@ -1,89 +1,96 @@ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt sentry_install_log*.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py # Sphinx documentation docs/_build/ # PyBuilder target/ # Ipython Notebook .ipynb_checkpoints # pyenv .python-version # https://docs.docker.com/compose/extends/ docker-compose.override.yml *.tar data/ .vscode/tags # custom Sentry config sentry/sentry.conf.py sentry/config.yml sentry/*.bak sentry/requirements.txt relay/credentials.json relay/config.yml symbolicator/config.yml geoip/GeoIP.conf geoip/*.mmdb geoip/.geoipupdate.lock # wal2json download postgres/wal2json + +# custom certificate authorities +certificates + +# integration testing +_integration-test/custom-ca-roots/nginx/* +sentry/test-custom-ca-roots.py diff --git a/CHANGELOG.md b/CHANGELOG.md index f3f25f3..611babd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,22 +1,26 @@ # Changelog +## Unreleased + +- feat: Support custom CA roots ([#27062](https://github.com/getsentry/sentry/pull/27062)), see the [docs](https://develop.sentry.dev/self-hosted/custom-ca-roots/) for more details. + ## 21.7.0 - No documented changes. ## 21.6.3 - No documented changes. ## 21.6.2 - BREAKING CHANGE: The frontend bundle will be loaded asynchronously (via [#25744](https://github.com/getsentry/sentry/pull/25744)). This is a breaking change that can affect custom plugins that access certain globals in the django template. Please see https://forum.sentry.io/t/breaking-frontend-changes-for-custom-plugins/14184 for more information. ## 21.6.1 - No documented changes. ## 21.6.0 - feat: Add healthchecks for redis, memcached and postgres (#975) diff --git a/_integration-test/custom-ca-roots/docker-compose.test.yml b/_integration-test/custom-ca-roots/docker-compose.test.yml new file mode 100644 index 0000000..2bc40ba --- /dev/null +++ b/_integration-test/custom-ca-roots/docker-compose.test.yml @@ -0,0 +1,12 @@ +version: '3.4' +services: + fixture-custom-ca-roots: + image: nginx:1.21.0-alpine + restart: unless-stopped + volumes: + - ./_integration-test/custom-ca-roots/nginx:/etc/nginx:ro + networks: + default: + aliases: + - self.test + - fail.test diff --git a/_integration-test/custom-ca-roots/nginx/nginx.conf b/_integration-test/custom-ca-roots/nginx/nginx.conf new file mode 100644 index 0000000..517aea4 --- /dev/null +++ b/_integration-test/custom-ca-roots/nginx/nginx.conf @@ -0,0 +1,32 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + server { + listen 443 ssl; + server_name "self.test"; + ssl_certificate "/etc/nginx/self.test.crt"; + ssl_certificate_key "/etc/nginx/self.test.key"; + location / { + add_header Content-Type text/plain; + return 200 'ok'; + } + } + server { + listen 443 ssl; + server_name "fake.test"; + ssl_certificate "/etc/nginx/fake.test.crt"; + ssl_certificate_key "/etc/nginx/fake.test.key"; + location / { + add_header Content-Type text/plain; + return 200 'bad'; + } + } +} diff --git a/_integration-test/custom-ca-roots/setup.sh b/_integration-test/custom-ca-roots/setup.sh new file mode 100755 index 0000000..a8cb2f1 --- /dev/null +++ b/_integration-test/custom-ca-roots/setup.sh @@ -0,0 +1,47 @@ +#! /usr/bin/env bash +set -e + +export COMPOSE_FILE="../docker-compose.yml:./custom-ca-roots/docker-compose.test.yml" + +TEST_NGINX_CONF_PATH="./custom-ca-roots/nginx" +CUSTOM_CERTS_PATH="../certificates" + +# generate tightly constrained CA +# NB: `-addext` requires LibreSSL 3.1.0+, or OpenSSL (brew install openssl) +openssl req -x509 -new -nodes -newkey rsa:2048 -keyout $TEST_NGINX_CONF_PATH/ca.key \ +-sha256 -days 1 -out $TEST_NGINX_CONF_PATH/ca.crt -batch \ +-subj "/CN=TEST CA *DO NOT TRUST*" \ +-addext "keyUsage = critical, keyCertSign, cRLSign" \ +-addext "nameConstraints = critical, permitted;DNS:self.test" + +## Lines like the following are debug helpers ... +# openssl x509 -in nginx/ca.crt -text -noout + +mkdir -p $CUSTOM_CERTS_PATH +cp $TEST_NGINX_CONF_PATH/ca.crt $CUSTOM_CERTS_PATH/test-custom-ca-roots.crt + +# generate server certificate +openssl req -new -nodes -newkey rsa:2048 -keyout $TEST_NGINX_CONF_PATH/self.test.key \ +-addext "subjectAltName=DNS:self.test" \ +-out $TEST_NGINX_CONF_PATH/self.test.req -batch -subj "/CN=Self Signed with CA Test Server" + +# openssl req -in nginx/self.test.req -text -noout + +openssl x509 -req -in $TEST_NGINX_CONF_PATH/self.test.req -CA $TEST_NGINX_CONF_PATH/ca.crt -CAkey $TEST_NGINX_CONF_PATH/ca.key \ +-extfile <(printf "subjectAltName=DNS:self.test") \ +-CAcreateserial -out $TEST_NGINX_CONF_PATH/self.test.crt -days 1 -sha256 + +# openssl x509 -in nginx/self.test.crt -text -noout + +# sanity check that signed certificate passes OpenSSL's validation +openssl verify -CAfile $TEST_NGINX_CONF_PATH/ca.crt $TEST_NGINX_CONF_PATH/self.test.crt + +# self signed certificate, for sanity check of not just accepting all certs +openssl req -x509 -newkey rsa:2048 -nodes -days 1 -keyout $TEST_NGINX_CONF_PATH/fake.test.key \ +-out $TEST_NGINX_CONF_PATH/fake.test.crt -addext "subjectAltName=DNS:fake.test" -subj "/CN=Self Signed Test Server" + +# openssl x509 -in nginx/fake.test.crt -text -noout + +cp ./custom-ca-roots/test.py ../sentry/test-custom-ca-roots.py + +$dc up -d fixture-custom-ca-roots diff --git a/_integration-test/custom-ca-roots/teardown.sh b/_integration-test/custom-ca-roots/teardown.sh new file mode 100755 index 0000000..059f69b --- /dev/null +++ b/_integration-test/custom-ca-roots/teardown.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +$dc rm -s -f -v fixture-custom-ca-roots +rm -f ../certificates/test-custom-ca-roots.crt ../sentry/test-custom-ca-roots.py +unset COMPOSE_FILE diff --git a/_integration-test/custom-ca-roots/test.py b/_integration-test/custom-ca-roots/test.py new file mode 100644 index 0000000..0f9b501 --- /dev/null +++ b/_integration-test/custom-ca-roots/test.py @@ -0,0 +1,15 @@ +import unittest +import requests + + +class CustomCATests(unittest.TestCase): + def test_valid_self_signed(self): + self.assertEqual(requests.get("https://self.test").text, 'ok') + + def test_invalid_self_signed(self): + with self.assertRaises(requests.exceptions.SSLError): + requests.get("https://fail.test") + + +if __name__ == '__main__': + unittest.main() diff --git a/test.sh b/_integration-test/run.sh similarity index 94% rename from test.sh rename to _integration-test/run.sh index 37d7af5..f25a302 100755 --- a/test.sh +++ b/_integration-test/run.sh @@ -1,121 +1,128 @@ #!/usr/bin/env bash set -e -source "$(dirname $0)/install/_lib.sh" +source "$(dirname $0)/../install/_lib.sh" echo "${_group}Setting up variables and helpers ..." export SENTRY_TEST_HOST="${SENTRY_TEST_HOST:-http://localhost:9000}" TEST_USER='test@example.com' TEST_PASS='test123TEST' COOKIE_FILE=$(mktemp) # Courtesy of https://stackoverflow.com/a/2183063/90297 trap_with_arg() { func="$1" ; shift for sig ; do trap "$func $sig "'$LINENO' "$sig" done } DID_CLEAN_UP=0 # the cleanup function will be the exit point cleanup () { if [ "$DID_CLEAN_UP" -eq 1 ]; then return 0; fi DID_CLEAN_UP=1 if [ "$1" != "EXIT" ]; then echo "An error occurred, caught SIG$1 on line $2"; fi echo "Cleaning up..." rm $COOKIE_FILE echo "Done." } trap_with_arg cleanup ERR INT TERM EXIT echo "${_endgroup}" echo "${_group}Starting Sentry for tests ..." # Disable beacon for e2e tests echo 'SENTRY_BEACON=False' >> $SENTRY_CONFIG_PY $dcr web createuser --superuser --email $TEST_USER --password $TEST_PASS || true $dc up -d printf "Waiting for Sentry to be up"; timeout 60 bash -c 'until $(curl -Isf -o /dev/null $SENTRY_TEST_HOST); do printf '.'; sleep 0.5; done' +echo "" echo "${_endgroup}" echo "${_group}Running tests ..." get_csrf_token () { awk '$6 == "sc" { print $7 }' $COOKIE_FILE; } sentry_api_request () { curl -s -H 'Accept: application/json; charset=utf-8' -H "Referer: $SENTRY_TEST_HOST" -H 'Content-Type: application/json' -H "X-CSRFToken: $(get_csrf_token)" -b "$COOKIE_FILE" -c "$COOKIE_FILE" "$SENTRY_TEST_HOST/api/0/$1" ${@:2}; } login () { INITIAL_AUTH_REDIRECT=$(curl -sL -o /dev/null $SENTRY_TEST_HOST -w %{url_effective}) if [ "$INITIAL_AUTH_REDIRECT" != "$SENTRY_TEST_HOST/auth/login/sentry/" ]; then echo "Initial /auth/login/ redirect failed, exiting..." echo "$INITIAL_AUTH_REDIRECT" exit -1 fi CSRF_TOKEN_FOR_LOGIN=$(curl $SENTRY_TEST_HOST -sL -c "$COOKIE_FILE" | awk -F "'" ' /csrfmiddlewaretoken/ { print $4 "=" $6; exit; }') curl -sL --data-urlencode 'op=login' --data-urlencode "username=$TEST_USER" --data-urlencode "password=$TEST_PASS" --data-urlencode "$CSRF_TOKEN_FOR_LOGIN" "$SENTRY_TEST_HOST/auth/login/sentry/" -H "Referer: $SENTRY_TEST_HOST/auth/login/sentry/" -b "$COOKIE_FILE" -c "$COOKIE_FILE"; } LOGIN_RESPONSE=$(login); declare -a LOGIN_TEST_STRINGS=( '"isAuthenticated":true' '"username":"test@example.com"' '"isSuperuser":true' ) for i in "${LOGIN_TEST_STRINGS[@]}" do echo "Testing '$i'..." echo "$LOGIN_RESPONSE" | grep "$i[,}]" >& /dev/null echo "Pass." done echo "${_endgroup}" echo "${_group}Running moar tests !!!" # Set up initial/required settings (InstallWizard request) sentry_api_request "internal/options/?query=is:required" -X PUT --data '{"mail.use-tls":false,"mail.username":"","mail.port":25,"system.admin-email":"ben@byk.im","mail.password":"","mail.from":"root@localhost","system.url-prefix":"'"$SENTRY_TEST_HOST"'","auth.allow-registration":false,"beacon.anonymous":true}' > /dev/null SENTRY_DSN=$(sentry_api_request "projects/sentry/internal/keys/" | awk 'BEGIN { RS=",|:{\n"; FS="\""; } $2 == "public" && $4 ~ "^http" { print $4; exit; }') # We ignore the protocol and the host as we already know those DSN_PIECES=(`echo $SENTRY_DSN | sed -ne 's|^https\{0,1\}://\([0-9a-z]\{1,\}\)@[^/]\{1,\}/\([0-9]\{1,\}\)$|\1 \2|p' | tr ' ' '\n'`) SENTRY_KEY=${DSN_PIECES[0]} PROJECT_ID=${DSN_PIECES[1]} TEST_EVENT_ID=$(export LC_ALL=C; head /dev/urandom | tr -dc "a-f0-9" | head -c 32) # Thanks @untitaker - https://forum.sentry.io/t/how-can-i-post-with-curl-a-sentry-event-which-authentication-credentials/4759/2?u=byk echo "Creating test event..." curl -sf --data '{"event_id": "'"$TEST_EVENT_ID"'","level":"error","message":"a failure","extra":{"object":"42"}}' -H 'Content-Type: application/json' -H "X-Sentry-Auth: Sentry sentry_version=7, sentry_key=$SENTRY_KEY, sentry_client=test-bash/0.1" "$SENTRY_TEST_HOST/api/$PROJECT_ID/store/" -o /dev/null EVENT_PATH="projects/sentry/internal/events/$TEST_EVENT_ID/" export -f sentry_api_request get_csrf_token export SENTRY_TEST_HOST COOKIE_FILE EVENT_PATH printf "Getting the test event back" timeout 30 bash -c 'until $(sentry_api_request "$EVENT_PATH" -Isf -X GET -o /dev/null); do printf '.'; sleep 0.5; done' -echo ""; +echo " got it!"; EVENT_RESPONSE=$(sentry_api_request "$EVENT_PATH") declare -a EVENT_TEST_STRINGS=( '"eventID":"'"$TEST_EVENT_ID"'"' '"message":"a failure"' '"title":"a failure"' '"object":"42"' ) for i in "${EVENT_TEST_STRINGS[@]}" do echo "Testing '$i'..." echo "$EVENT_RESPONSE" | grep "$i[,}]" >& /dev/null echo "Pass." done echo "${_endgroup}" echo "${_group}Ensure cleanup crons are working ..." $dc ps | grep -q -- "-cleanup_.\+[[:space:]]\+Up[[:space:]]\+" echo "${_endgroup}" + +echo "${_group}Test custom CAs work ..." +source ./custom-ca-roots/setup.sh +$dcr --no-deps web python3 /etc/sentry/test-custom-ca-roots.py +source ./custom-ca-roots/teardown.sh +echo "${_endgroup}" diff --git a/cron/entrypoint.sh b/cron/entrypoint.sh index baa833a..383c8b2 100755 --- a/cron/entrypoint.sh +++ b/cron/entrypoint.sh @@ -1,15 +1,19 @@ #!/usr/bin/env bash +if [ "$(ls -A /usr/local/share/ca-certificates/)" ]; then + update-ca-certificates +fi + # Prior art: # - https://git.io/fjNOg # - https://blog.knoldus.com/running-a-cron-job-in-docker-container/ declare -p | grep -Ev 'BASHOPTS|BASH_VERSINFO|EUID|PPID|SHELLOPTS|UID' > /container.env { for cron_job in "$@"; do echo -e "SHELL=/bin/bash BASH_ENV=/container.env ${cron_job} > /proc/1/fd/1 2>/proc/1/fd/2"; done } \ | sed --regexp-extended 's/\\(.)/\1/g' \ | crontab - crontab -l exec cron -f -l -L 15 diff --git a/docker-compose.yml b/docker-compose.yml index 0467027..86f3ed4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,327 +1,336 @@ version: "3.4" x-restart-policy: &restart_policy restart: unless-stopped x-healthcheck-defaults: &healthcheck_defaults # Avoid setting the interval too small, as docker uses much more CPU than one would expect. # Related issues: # https://github.com/moby/moby/issues/39102 # https://github.com/moby/moby/issues/39388 # https://github.com/getsentry/onpremise/issues/1000 interval: 30s timeout: 5s retries: 3 start_period: 10s x-sentry-defaults: &sentry_defaults <<: *restart_policy image: "$SENTRY_IMAGE" depends_on: - redis - postgres - memcached - smtp - snuba-api - snuba-consumer - snuba-outcomes-consumer - snuba-sessions-consumer - snuba-transactions-consumer - snuba-subscription-consumer-events - snuba-subscription-consumer-transactions - snuba-replacer - symbolicator - kafka entrypoint: "/etc/sentry/entrypoint.sh" command: ["run", "web"] environment: PYTHONUSERBASE: "/data/custom-packages" SENTRY_CONF: "/etc/sentry" SNUBA: "http://snuba-api:1218" + # Force everything to use the system CA bundle + # This is mostly needed to support installing custom CA certs + # This one is used by botocore + DEFAULT_CA_BUNDLE: &ca_bundle "/etc/ssl/certs/ca-certificates.crt" + # This one is used by requests + REQUESTS_CA_BUNDLE: *ca_bundle + # This one is used by grpc/google modules + GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR: *ca_bundle # Leaving the value empty to just pass whatever is set # on the host system (or in the .env file) SENTRY_EVENT_RETENTION_DAYS: volumes: - "sentry-data:/data" - "./sentry:/etc/sentry" - "./geoip:/geoip:ro" + - "./certificates:/usr/local/share/ca-certificates:ro" x-snuba-defaults: &snuba_defaults <<: *restart_policy depends_on: - redis - clickhouse - kafka image: "$SNUBA_IMAGE" environment: SNUBA_SETTINGS: docker CLICKHOUSE_HOST: clickhouse DEFAULT_BROKERS: "kafka:9092" REDIS_HOST: redis UWSGI_MAX_REQUESTS: "10000" UWSGI_DISABLE_LOGGING: "true" # Leaving the value empty to just pass whatever is set # on the host system (or in the .env file) SENTRY_EVENT_RETENTION_DAYS: services: smtp: <<: *restart_policy image: tianon/exim4 volumes: - "sentry-smtp:/var/spool/exim4" - "sentry-smtp-log:/var/log/exim4" memcached: <<: *restart_policy image: "memcached:1.6.9-alpine" healthcheck: <<: *healthcheck_defaults # From: https://stackoverflow.com/a/31877626/5155484 test: echo stats | nc 127.0.0.1 11211 redis: <<: *restart_policy image: "redis:6.2.4-alpine" healthcheck: <<: *healthcheck_defaults test: redis-cli ping volumes: - "sentry-redis:/data" ulimits: nofile: soft: 10032 hard: 10032 postgres: <<: *restart_policy image: "postgres:9.6" healthcheck: <<: *healthcheck_defaults # Using default user "postgres" from sentry/sentry.conf.example.py or value of POSTGRES_USER if provided test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres}"] command: [ "postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=1", "-c", "max_wal_senders=1", ] environment: POSTGRES_HOST_AUTH_METHOD: "trust" entrypoint: /opt/sentry/postgres-entrypoint.sh volumes: - "sentry-postgres:/var/lib/postgresql/data" - type: bind read_only: true source: ./postgres/ target: /opt/sentry/ zookeeper: <<: *restart_policy image: "confluentinc/cp-zookeeper:5.5.0" environment: ZOOKEEPER_CLIENT_PORT: "2181" CONFLUENT_SUPPORT_METRICS_ENABLE: "false" ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: "WARN" ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: "WARN" KAFKA_OPTS: "-Dzookeeper.4lw.commands.whitelist=ruok" volumes: - "sentry-zookeeper:/var/lib/zookeeper/data" - "sentry-zookeeper-log:/var/lib/zookeeper/log" - "sentry-secrets:/etc/zookeeper/secrets" healthcheck: <<: *healthcheck_defaults test: ["CMD-SHELL", 'echo "ruok" | nc -w 2 -q 2 localhost 2181 | grep imok'] kafka: <<: *restart_policy depends_on: - zookeeper image: "confluentinc/cp-kafka:5.5.0" environment: KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181" KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9092" KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1" KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1" KAFKA_LOG_RETENTION_HOURS: "24" KAFKA_MESSAGE_MAX_BYTES: "50000000" #50MB or bust KAFKA_MAX_REQUEST_SIZE: "50000000" #50MB on requests apparently too CONFLUENT_SUPPORT_METRICS_ENABLE: "false" KAFKA_LOG4J_LOGGERS: "kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN" KAFKA_LOG4J_ROOT_LOGLEVEL: "WARN" KAFKA_TOOLS_LOG4J_LOGLEVEL: "WARN" volumes: - "sentry-kafka:/var/lib/kafka/data" - "sentry-kafka-log:/var/lib/kafka/log" - "sentry-secrets:/etc/kafka/secrets" healthcheck: <<: *healthcheck_defaults test: ["CMD-SHELL", "nc -z localhost 9092"] clickhouse: <<: *restart_policy image: "yandex/clickhouse-server:20.3.9.70" ulimits: nofile: soft: 262144 hard: 262144 volumes: - "sentry-clickhouse:/var/lib/clickhouse" - "sentry-clickhouse-log:/var/log/clickhouse-server" - type: bind read_only: true source: ./clickhouse/config.xml target: /etc/clickhouse-server/config.d/sentry.xml environment: # This limits Clickhouse's memory to 30% of the host memory # If you have high volume and your search return incomplete results # You might want to change this to a higher value (and ensure your host has enough memory) MAX_MEMORY_USAGE_RATIO: 0.3 geoipupdate: image: "maxmindinc/geoipupdate:v4.7.1" # Override the entrypoint in order to avoid using envvars for config. # Futz with settings so we can keep mmdb and conf in same dir on host # (image looks for them in separate dirs by default). entrypoint: ["/usr/bin/geoipupdate", "-d", "/sentry", "-f", "/sentry/GeoIP.conf"] volumes: - "./geoip:/sentry" snuba-api: <<: *snuba_defaults # Kafka consumer responsible for feeding events into Clickhouse snuba-consumer: <<: *snuba_defaults command: consumer --storage errors --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding outcomes into Clickhouse # Use --auto-offset-reset=earliest to recover up to 7 days of TSDB data # since we did not do a proper migration snuba-outcomes-consumer: <<: *snuba_defaults command: consumer --storage outcomes_raw --auto-offset-reset=earliest --max-batch-time-ms 750 # Kafka consumer responsible for feeding session data into Clickhouse snuba-sessions-consumer: <<: *snuba_defaults command: consumer --storage sessions_raw --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding transactions data into Clickhouse snuba-transactions-consumer: <<: *snuba_defaults command: consumer --storage transactions --consumer-group transactions_group --auto-offset-reset=latest --max-batch-time-ms 750 --commit-log-topic=snuba-commit-log snuba-replacer: <<: *snuba_defaults command: replacer --storage errors --auto-offset-reset=latest --max-batch-size 3 snuba-subscription-consumer-events: <<: *snuba_defaults command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-events-subscriptions-consumers --topic=events --result-topic=events-subscription-results --dataset=events --commit-log-topic=snuba-commit-log --commit-log-group=snuba-consumers --delay-seconds=60 --schedule-ttl=60 snuba-subscription-consumer-transactions: <<: *snuba_defaults command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-transactions-subscriptions-consumers --topic=events --result-topic=transactions-subscription-results --dataset=transactions --commit-log-topic=snuba-commit-log --commit-log-group=transactions_group --delay-seconds=60 --schedule-ttl=60 snuba-cleanup: <<: *snuba_defaults image: snuba-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SNUBA_IMAGE" command: '"*/5 * * * * gosu snuba snuba cleanup --storage errors --dry-run False"' snuba-transactions-cleanup: <<: *snuba_defaults image: snuba-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SNUBA_IMAGE" command: '"*/5 * * * * gosu snuba snuba cleanup --storage transactions --dry-run False"' symbolicator: <<: *restart_policy image: "$SYMBOLICATOR_IMAGE" volumes: - "sentry-symbolicator:/data" - type: bind read_only: true source: ./symbolicator target: /etc/symbolicator command: run -c /etc/symbolicator/config.yml symbolicator-cleanup: <<: *restart_policy image: symbolicator-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SYMBOLICATOR_IMAGE" command: '"55 23 * * * gosu symbolicator symbolicator cleanup"' volumes: - "sentry-symbolicator:/data" web: <<: *sentry_defaults cron: <<: *sentry_defaults command: run cron worker: <<: *sentry_defaults command: run worker ingest-consumer: <<: *sentry_defaults command: run ingest-consumer --all-consumer-types post-process-forwarder: <<: *sentry_defaults # Increase `--commit-batch-size 1` below to deal with high-load environments. command: run post-process-forwarder --commit-batch-size 1 subscription-consumer-events: <<: *sentry_defaults command: run query-subscription-consumer --commit-batch-size 1 --topic events-subscription-results subscription-consumer-transactions: <<: *sentry_defaults command: run query-subscription-consumer --commit-batch-size 1 --topic transactions-subscription-results sentry-cleanup: <<: *sentry_defaults image: sentry-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SENTRY_IMAGE" entrypoint: "/entrypoint.sh" command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"' nginx: <<: *restart_policy ports: - "$SENTRY_BIND:80/tcp" image: "nginx:1.21.0-alpine" volumes: - type: bind read_only: true source: ./nginx target: /etc/nginx depends_on: - web - relay relay: <<: *restart_policy image: "$RELAY_IMAGE" volumes: - type: bind read_only: true source: ./relay target: /work/.relay - type: bind read_only: true source: ./geoip target: /geoip depends_on: - kafka - redis - web volumes: sentry-data: external: true sentry-postgres: external: true sentry-redis: external: true sentry-zookeeper: external: true sentry-kafka: external: true sentry-clickhouse: external: true sentry-symbolicator: external: true sentry-secrets: sentry-smtp: sentry-zookeeper-log: sentry-kafka-log: sentry-smtp-log: sentry-clickhouse-log: diff --git a/install/_lib.sh b/install/_lib.sh index 2d7517f..0b5417f 100644 --- a/install/_lib.sh +++ b/install/_lib.sh @@ -1,49 +1,49 @@ set -euo pipefail test "${DEBUG:-}" && set -x # Thanks to https://unix.stackexchange.com/a/145654/108960 log_file="sentry_install_log-`date +'%Y-%m-%d_%H-%M-%S'`.txt" exec &> >(tee -a "$log_file") # Work from /install/ for install.sh, project root otherwise -if [[ "$(basename $0)" = "install.sh" || "$(basename $0)" = "test.sh" ]]; then +if [[ "$(basename $0)" = "install.sh" ]]; then cd "$(dirname $0)/install/" else - cd "$(dirname $0)" # assume we're a *-test.sh script + cd "$(dirname $0)" # assume we're a test script or some such fi _ENV="$(realpath ../.env)" # Read .env for default values with a tip o' the hat to https://stackoverflow.com/a/59831605/90297 t=$(mktemp) && export -p > "$t" && set -a && . $_ENV && set +a && . "$t" && rm "$t" && unset t if [ "${GITHUB_ACTIONS:-}" = "true" ]; then _group="::group::" _endgroup="::endgroup::" else _group="▶ " _endgroup="" fi dc="docker-compose --no-ansi" dcr="$dc run --rm" # A couple of the config files are referenced from other subscripts, so they # get vars, while multiple subscripts call ensure_file_from_example. function ensure_file_from_example { if [[ -f "$1" ]]; then echo "$1 already exists, skipped creation." else echo "Creating $1..." cp -n $(echo "$1" | sed 's/\.[^.]*$/.example&/') "$1" # sed from https://stackoverflow.com/a/25123013/90297 fi } SENTRY_CONFIG_PY='../sentry/sentry.conf.py' SENTRY_CONFIG_YML='../sentry/config.yml' # Increase the default 10 second SIGTERM timeout # to ensure celery queues are properly drained # between upgrades as task signatures may change across # versions STOP_TIMEOUT=60 # seconds diff --git a/sentry/entrypoint.sh b/sentry/entrypoint.sh index 55c7e41..2f2614a 100755 --- a/sentry/entrypoint.sh +++ b/sentry/entrypoint.sh @@ -1,16 +1,20 @@ #!/bin/bash set -e +if [ "$(ls -A /usr/local/share/ca-certificates/)" ]; then + update-ca-certificates +fi + req_file="/etc/sentry/requirements.txt" plugins_dir="/data/custom-packages" checksum_file="$plugins_dir/.checksum" if [[ -s "$req_file" ]] && ! cat "$req_file" | grep '^[^#[:space:]]' | shasum -s -c "$checksum_file" 2>/dev/null; then echo "Installing additional dependencies..." mkdir -p "$plugins_dir" pip install --user -r "$req_file" cat "$req_file" | grep '^[^#[:space:]]' | shasum > "$checksum_file" echo "" fi source /docker-entrypoint.sh