diff --git a/.env b/.env index d49dae9..a7830cf 100644 --- a/.env +++ b/.env @@ -1,10 +1,12 @@ COMPOSE_PROJECT_NAME=sentry_onpremise SENTRY_EVENT_RETENTION_DAYS=90 # You can either use a port number or an IP:PORT combo for SENTRY_BIND # See https://docs.docker.com/compose/compose-file/#ports for more SENTRY_BIND=9000 +# Set SENTRY_MAIL_HOST to a valid FQDN (host/domain name) to be able to send emails! +# SENTRY_MAIL_HOST=example.com SENTRY_IMAGE=getsentry/sentry:nightly SNUBA_IMAGE=getsentry/snuba:nightly RELAY_IMAGE=getsentry/relay:nightly SYMBOLICATOR_IMAGE=getsentry/symbolicator:nightly -WAL2JSON_VERSION=latest +WAL2JSON_VERSION=latest \ No newline at end of file diff --git a/_integration-test/run.sh b/_integration-test/run.sh index dc13de4..6b66052 100755 --- a/_integration-test/run.sh +++ b/_integration-test/run.sh @@ -1,128 +1,128 @@ #!/usr/bin/env bash set -e source "$(dirname $0)/../install/_lib.sh" echo "${_group}Setting up variables and helpers ..." export SENTRY_TEST_HOST="${SENTRY_TEST_HOST:-http://localhost:9000}" TEST_USER='test@example.com' TEST_PASS='test123TEST' COOKIE_FILE=$(mktemp) # Courtesy of https://stackoverflow.com/a/2183063/90297 trap_with_arg() { func="$1" ; shift for sig ; do trap "$func $sig "'$LINENO' "$sig" done } DID_CLEAN_UP=0 # the cleanup function will be the exit point cleanup () { if [ "$DID_CLEAN_UP" -eq 1 ]; then return 0; fi DID_CLEAN_UP=1 if [ "$1" != "EXIT" ]; then echo "An error occurred, caught SIG$1 on line $2"; fi echo "Cleaning up..." rm $COOKIE_FILE echo "Done." } trap_with_arg cleanup ERR INT TERM EXIT echo "${_endgroup}" echo "${_group}Starting Sentry for tests ..." # Disable beacon for e2e tests echo 'SENTRY_BEACON=False' >> $SENTRY_CONFIG_PY $dcr web createuser --superuser --email $TEST_USER --password $TEST_PASS || true $dc up -d printf "Waiting for Sentry to be up"; timeout 60 bash -c 'until $(curl -Isf -o /dev/null $SENTRY_TEST_HOST); do printf '.'; sleep 0.5; done' echo "" echo "${_endgroup}" echo "${_group}Running tests ..." get_csrf_token () { awk '$6 == "sc" { print $7 }' $COOKIE_FILE; } sentry_api_request () { curl -s -H 'Accept: application/json; charset=utf-8' -H "Referer: $SENTRY_TEST_HOST" -H 'Content-Type: application/json' -H "X-CSRFToken: $(get_csrf_token)" -b "$COOKIE_FILE" -c "$COOKIE_FILE" "$SENTRY_TEST_HOST/api/0/$1" ${@:2}; } login () { INITIAL_AUTH_REDIRECT=$(curl -sL -o /dev/null $SENTRY_TEST_HOST -w %{url_effective}) if [ "$INITIAL_AUTH_REDIRECT" != "$SENTRY_TEST_HOST/auth/login/sentry/" ]; then echo "Initial /auth/login/ redirect failed, exiting..." echo "$INITIAL_AUTH_REDIRECT" exit -1 fi CSRF_TOKEN_FOR_LOGIN=$(curl $SENTRY_TEST_HOST -sL -c "$COOKIE_FILE" | awk -F "['\"]" ' /csrfmiddlewaretoken/ { print $4 "=" $6; exit; }') curl -sL --data-urlencode 'op=login' --data-urlencode "username=$TEST_USER" --data-urlencode "password=$TEST_PASS" --data-urlencode "$CSRF_TOKEN_FOR_LOGIN" "$SENTRY_TEST_HOST/auth/login/sentry/" -H "Referer: $SENTRY_TEST_HOST/auth/login/sentry/" -b "$COOKIE_FILE" -c "$COOKIE_FILE"; } LOGIN_RESPONSE=$(login); declare -a LOGIN_TEST_STRINGS=( '"isAuthenticated":true' '"username":"test@example.com"' '"isSuperuser":true' ) for i in "${LOGIN_TEST_STRINGS[@]}" do echo "Testing '$i'..." echo "$LOGIN_RESPONSE" | grep "$i[,}]" >& /dev/null echo "Pass." done echo "${_endgroup}" echo "${_group}Running moar tests !!!" # Set up initial/required settings (InstallWizard request) -sentry_api_request "internal/options/?query=is:required" -X PUT --data '{"mail.use-tls":false,"mail.username":"","mail.port":25,"system.admin-email":"ben@byk.im","mail.password":"","mail.from":"root@localhost","system.url-prefix":"'"$SENTRY_TEST_HOST"'","auth.allow-registration":false,"beacon.anonymous":true}' > /dev/null +sentry_api_request "internal/options/?query=is:required" -X PUT --data '{"mail.use-tls":false,"mail.username":"","mail.port":25,"system.admin-email":"ben@byk.im","mail.password":"","system.url-prefix":"'"$SENTRY_TEST_HOST"'","auth.allow-registration":false,"beacon.anonymous":true}' > /dev/null SENTRY_DSN=$(sentry_api_request "projects/sentry/internal/keys/" | awk 'BEGIN { RS=",|:{\n"; FS="\""; } $2 == "public" && $4 ~ "^http" { print $4; exit; }') # We ignore the protocol and the host as we already know those DSN_PIECES=(`echo $SENTRY_DSN | sed -ne 's|^https\{0,1\}://\([0-9a-z]\{1,\}\)@[^/]\{1,\}/\([0-9]\{1,\}\)$|\1 \2|p' | tr ' ' '\n'`) SENTRY_KEY=${DSN_PIECES[0]} PROJECT_ID=${DSN_PIECES[1]} TEST_EVENT_ID=$(export LC_ALL=C; head /dev/urandom | tr -dc "a-f0-9" | head -c 32) # Thanks @untitaker - https://forum.sentry.io/t/how-can-i-post-with-curl-a-sentry-event-which-authentication-credentials/4759/2?u=byk echo "Creating test event..." curl -sf --data '{"event_id": "'"$TEST_EVENT_ID"'","level":"error","message":"a failure","extra":{"object":"42"}}' -H 'Content-Type: application/json' -H "X-Sentry-Auth: Sentry sentry_version=7, sentry_key=$SENTRY_KEY, sentry_client=test-bash/0.1" "$SENTRY_TEST_HOST/api/$PROJECT_ID/store/" -o /dev/null EVENT_PATH="projects/sentry/internal/events/$TEST_EVENT_ID/" export -f sentry_api_request get_csrf_token export SENTRY_TEST_HOST COOKIE_FILE EVENT_PATH printf "Getting the test event back" timeout 30 bash -c 'until $(sentry_api_request "$EVENT_PATH" -Isf -X GET -o /dev/null); do printf '.'; sleep 0.5; done' echo " got it!"; EVENT_RESPONSE=$(sentry_api_request "$EVENT_PATH") declare -a EVENT_TEST_STRINGS=( '"eventID":"'"$TEST_EVENT_ID"'"' '"message":"a failure"' '"title":"a failure"' '"object":"42"' ) for i in "${EVENT_TEST_STRINGS[@]}" do echo "Testing '$i'..." echo "$EVENT_RESPONSE" | grep "$i[,}]" >& /dev/null echo "Pass." done echo "${_endgroup}" echo "${_group}Ensure cleanup crons are working ..." $dc ps | grep -q -- "-cleanup_.\+[[:space:]]\+Up[[:space:]]\+" echo "${_endgroup}" echo "${_group}Test custom CAs work ..." source ./custom-ca-roots/setup.sh $dcr --no-deps web python3 /etc/sentry/test-custom-ca-roots.py source ./custom-ca-roots/teardown.sh echo "${_endgroup}" diff --git a/docker-compose.yml b/docker-compose.yml index c5d9ec7..6314f88 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,377 +1,379 @@ x-restart-policy: &restart_policy restart: unless-stopped x-depends_on-healthy: &depends_on-healthy condition: service_healthy x-depends_on-default: &depends_on-default condition: service_started x-healthcheck-defaults: &healthcheck_defaults # Avoid setting the interval too small, as docker uses much more CPU than one would expect. # Related issues: # https://github.com/moby/moby/issues/39102 # https://github.com/moby/moby/issues/39388 # https://github.com/getsentry/onpremise/issues/1000 interval: 30s timeout: 5s retries: 5 start_period: 10s x-sentry-defaults: &sentry_defaults <<: *restart_policy image: "$SENTRY_IMAGE" depends_on: redis: <<: *depends_on-healthy kafka: <<: *depends_on-healthy postgres: <<: *depends_on-healthy memcached: <<: *depends_on-default smtp: <<: *depends_on-default snuba-api: <<: *depends_on-default snuba-consumer: <<: *depends_on-default snuba-outcomes-consumer: <<: *depends_on-default snuba-sessions-consumer: <<: *depends_on-default snuba-transactions-consumer: <<: *depends_on-default snuba-subscription-consumer-events: <<: *depends_on-default snuba-subscription-consumer-transactions: <<: *depends_on-default snuba-replacer: <<: *depends_on-default symbolicator: <<: *depends_on-default entrypoint: "/etc/sentry/entrypoint.sh" command: ["run", "web"] environment: PYTHONUSERBASE: "/data/custom-packages" SENTRY_CONF: "/etc/sentry" SNUBA: "http://snuba-api:1218" # Force everything to use the system CA bundle # This is mostly needed to support installing custom CA certs # This one is used by botocore DEFAULT_CA_BUNDLE: &ca_bundle "/etc/ssl/certs/ca-certificates.crt" # This one is used by requests REQUESTS_CA_BUNDLE: *ca_bundle # This one is used by grpc/google modules GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR: *ca_bundle # Leaving the value empty to just pass whatever is set # on the host system (or in the .env file) SENTRY_EVENT_RETENTION_DAYS: + SENTRY_MAIL_HOST: volumes: - "sentry-data:/data" - "./sentry:/etc/sentry" - "./geoip:/geoip:ro" - "./certificates:/usr/local/share/ca-certificates:ro" x-snuba-defaults: &snuba_defaults <<: *restart_policy depends_on: clickhouse: <<: *depends_on-healthy kafka: <<: *depends_on-healthy redis: <<: *depends_on-healthy image: "$SNUBA_IMAGE" environment: SNUBA_SETTINGS: docker CLICKHOUSE_HOST: clickhouse DEFAULT_BROKERS: "kafka:9092" REDIS_HOST: redis UWSGI_MAX_REQUESTS: "10000" UWSGI_DISABLE_LOGGING: "true" # Leaving the value empty to just pass whatever is set # on the host system (or in the .env file) SENTRY_EVENT_RETENTION_DAYS: services: smtp: <<: *restart_policy image: tianon/exim4 + hostname: ${SENTRY_MAIL_HOST:-} volumes: - "sentry-smtp:/var/spool/exim4" - "sentry-smtp-log:/var/log/exim4" memcached: <<: *restart_policy image: "memcached:1.6.9-alpine" healthcheck: <<: *healthcheck_defaults # From: https://stackoverflow.com/a/31877626/5155484 test: echo stats | nc 127.0.0.1 11211 redis: <<: *restart_policy image: "redis:6.2.4-alpine" healthcheck: <<: *healthcheck_defaults test: redis-cli ping volumes: - "sentry-redis:/data" ulimits: nofile: soft: 10032 hard: 10032 postgres: <<: *restart_policy image: "postgres:9.6" healthcheck: <<: *healthcheck_defaults # Using default user "postgres" from sentry/sentry.conf.example.py or value of POSTGRES_USER if provided test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres}"] command: [ "postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=1", "-c", "max_wal_senders=1", ] environment: POSTGRES_HOST_AUTH_METHOD: "trust" entrypoint: /opt/sentry/postgres-entrypoint.sh volumes: - "sentry-postgres:/var/lib/postgresql/data" - type: bind read_only: true source: ./postgres/ target: /opt/sentry/ zookeeper: <<: *restart_policy image: "confluentinc/cp-zookeeper:5.5.0" environment: ZOOKEEPER_CLIENT_PORT: "2181" CONFLUENT_SUPPORT_METRICS_ENABLE: "false" ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: "WARN" ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: "WARN" KAFKA_OPTS: "-Dzookeeper.4lw.commands.whitelist=ruok" volumes: - "sentry-zookeeper:/var/lib/zookeeper/data" - "sentry-zookeeper-log:/var/lib/zookeeper/log" - "sentry-secrets:/etc/zookeeper/secrets" healthcheck: <<: *healthcheck_defaults test: ["CMD-SHELL", 'echo "ruok" | nc -w 2 -q 2 localhost 2181 | grep imok'] kafka: <<: *restart_policy depends_on: zookeeper: <<: *depends_on-healthy image: "confluentinc/cp-kafka:5.5.0" environment: KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181" KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9092" KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1" KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1" KAFKA_LOG_RETENTION_HOURS: "24" KAFKA_MESSAGE_MAX_BYTES: "50000000" #50MB or bust KAFKA_MAX_REQUEST_SIZE: "50000000" #50MB on requests apparently too CONFLUENT_SUPPORT_METRICS_ENABLE: "false" KAFKA_LOG4J_LOGGERS: "kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN" KAFKA_LOG4J_ROOT_LOGLEVEL: "WARN" KAFKA_TOOLS_LOG4J_LOGLEVEL: "WARN" volumes: - "sentry-kafka:/var/lib/kafka/data" - "sentry-kafka-log:/var/lib/kafka/log" - "sentry-secrets:/etc/kafka/secrets" healthcheck: <<: *healthcheck_defaults test: ["CMD-SHELL", "nc -z localhost 9092"] clickhouse: <<: *restart_policy image: "yandex/clickhouse-server:20.3.9.70" ulimits: nofile: soft: 262144 hard: 262144 volumes: - "sentry-clickhouse:/var/lib/clickhouse" - "sentry-clickhouse-log:/var/log/clickhouse-server" - type: bind read_only: true source: ./clickhouse/config.xml target: /etc/clickhouse-server/config.d/sentry.xml environment: # This limits Clickhouse's memory to 30% of the host memory # If you have high volume and your search return incomplete results # You might want to change this to a higher value (and ensure your host has enough memory) MAX_MEMORY_USAGE_RATIO: 0.3 healthcheck: test: [ "CMD-SHELL", "wget -nv -t1 --spider 'http://localhost:8123/' || exit 1", ] interval: 3s timeout: 600s retries: 200 geoipupdate: image: "maxmindinc/geoipupdate:v4.7.1" # Override the entrypoint in order to avoid using envvars for config. # Futz with settings so we can keep mmdb and conf in same dir on host # (image looks for them in separate dirs by default). entrypoint: ["/usr/bin/geoipupdate", "-d", "/sentry", "-f", "/sentry/GeoIP.conf"] volumes: - "./geoip:/sentry" snuba-api: <<: *snuba_defaults # Kafka consumer responsible for feeding events into Clickhouse snuba-consumer: <<: *snuba_defaults command: consumer --storage errors --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding outcomes into Clickhouse # Use --auto-offset-reset=earliest to recover up to 7 days of TSDB data # since we did not do a proper migration snuba-outcomes-consumer: <<: *snuba_defaults command: consumer --storage outcomes_raw --auto-offset-reset=earliest --max-batch-time-ms 750 # Kafka consumer responsible for feeding session data into Clickhouse snuba-sessions-consumer: <<: *snuba_defaults command: consumer --storage sessions_raw --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding transactions data into Clickhouse snuba-transactions-consumer: <<: *snuba_defaults command: consumer --storage transactions --consumer-group transactions_group --auto-offset-reset=latest --max-batch-time-ms 750 --commit-log-topic=snuba-commit-log snuba-replacer: <<: *snuba_defaults command: replacer --storage errors --auto-offset-reset=latest --max-batch-size 3 snuba-subscription-consumer-events: <<: *snuba_defaults command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-events-subscriptions-consumers --topic=events --result-topic=events-subscription-results --dataset=events --commit-log-topic=snuba-commit-log --commit-log-group=snuba-consumers --delay-seconds=60 --schedule-ttl=60 snuba-subscription-consumer-transactions: <<: *snuba_defaults command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-transactions-subscriptions-consumers --topic=events --result-topic=transactions-subscription-results --dataset=transactions --commit-log-topic=snuba-commit-log --commit-log-group=transactions_group --delay-seconds=60 --schedule-ttl=60 snuba-cleanup: <<: *snuba_defaults image: snuba-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SNUBA_IMAGE" command: '"*/5 * * * * gosu snuba snuba cleanup --storage errors --dry-run False"' snuba-transactions-cleanup: <<: *snuba_defaults image: snuba-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SNUBA_IMAGE" command: '"*/5 * * * * gosu snuba snuba cleanup --storage transactions --dry-run False"' symbolicator: <<: *restart_policy image: "$SYMBOLICATOR_IMAGE" volumes: - "sentry-symbolicator:/data" - type: bind read_only: true source: ./symbolicator target: /etc/symbolicator command: run -c /etc/symbolicator/config.yml symbolicator-cleanup: <<: *restart_policy image: symbolicator-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SYMBOLICATOR_IMAGE" command: '"55 23 * * * gosu symbolicator symbolicator cleanup"' volumes: - "sentry-symbolicator:/data" web: <<: *sentry_defaults healthcheck: <<: *healthcheck_defaults test: - "CMD" - "/bin/bash" - '-c' # Courtesy of https://unix.stackexchange.com/a/234089/108960 - 'exec 3<>/dev/tcp/127.0.0.1/9000 && echo -e "GET /_health/ HTTP/1.1\r\nhost: 127.0.0.1\r\n\r\n" >&3 && grep ok -s -m 1 <&3' cron: <<: *sentry_defaults command: run cron worker: <<: *sentry_defaults command: run worker ingest-consumer: <<: *sentry_defaults command: run ingest-consumer --all-consumer-types post-process-forwarder: <<: *sentry_defaults # Increase `--commit-batch-size 1` below to deal with high-load environments. command: run post-process-forwarder --commit-batch-size 1 subscription-consumer-events: <<: *sentry_defaults command: run query-subscription-consumer --commit-batch-size 1 --topic events-subscription-results subscription-consumer-transactions: <<: *sentry_defaults command: run query-subscription-consumer --commit-batch-size 1 --topic transactions-subscription-results sentry-cleanup: <<: *sentry_defaults image: sentry-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SENTRY_IMAGE" entrypoint: "/entrypoint.sh" command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"' nginx: <<: *restart_policy ports: - "$SENTRY_BIND:80/tcp" image: "nginx:1.21.0-alpine" volumes: - type: bind read_only: true source: ./nginx target: /etc/nginx depends_on: - web - relay relay: <<: *restart_policy image: "$RELAY_IMAGE" volumes: - type: bind read_only: true source: ./relay target: /work/.relay - type: bind read_only: true source: ./geoip target: /geoip depends_on: kafka: <<: *depends_on-healthy redis: <<: *depends_on-healthy web: <<: *depends_on-healthy volumes: sentry-data: external: true sentry-postgres: external: true sentry-redis: external: true sentry-zookeeper: external: true sentry-kafka: external: true sentry-clickhouse: external: true sentry-symbolicator: external: true sentry-secrets: sentry-smtp: sentry-zookeeper-log: sentry-kafka-log: sentry-smtp-log: sentry-clickhouse-log: diff --git a/sentry/config.example.yml b/sentry/config.example.yml index 951916c..22a236a 100644 --- a/sentry/config.example.yml +++ b/sentry/config.example.yml @@ -1,110 +1,116 @@ # While a lot of configuration in Sentry can be changed via the UI, for all # new-style config (as of 8.0) you can also declare values here in this file # to enforce defaults or to ensure they cannot be changed via the UI. For more # information see the Sentry documentation. ############### # Mail Server # ############### # mail.backend: 'smtp' # Use dummy if you want to disable email entirely mail.host: 'smtp' # mail.port: 25 # mail.username: '' # mail.password: '' # mail.use-tls: false # mail.use-ssl: false + +# NOTE: The following 2 configs (mail.from and mail.list-namespace) are set +# through SENTRY_MAIL_HOST in sentry.conf.py so remove those first if +# you want your values in this file to be effective! + + # The email address to send on behalf of # mail.from: 'root@localhost' # The mailing list namespace for emails sent by this Sentry server. # This should be a domain you own (often the same domain as the domain # part of the `mail.from` configuration parameter value) or `localhost`. # mail.list-namespace: 'localhost' # If you'd like to configure email replies, enable this. # mail.enable-replies: true # When email-replies are enabled, this value is used in the Reply-To header # mail.reply-hostname: '' # If you're using mailgun for inbound mail, set your API key and configure a # route to forward to /api/hooks/mailgun/inbound/ # Also don't forget to set `mail.enable-replies: true` above. # mail.mailgun-api-key: '' ################### # System Settings # ################### # If this file ever becomes compromised, it's important to generate a new key. # Changing this value will result in all current sessions being invalidated. # A new key can be generated with `$ sentry config generate-secret-key` system.secret-key: '!!changeme!!' # The ``redis.clusters`` setting is used, unsurprisingly, to configure Redis # clusters. These clusters can be then referred to by name when configuring # backends such as the cache, digests, or TSDB backend. # redis.clusters: # default: # hosts: # 0: # host: 127.0.0.1 # port: 6379 ################ # File storage # ################ # Uploaded media uses these `filestore` settings. The available # backends are either `filesystem` or `s3`. filestore.backend: 'filesystem' filestore.options: location: '/data/files' dsym.cache-path: '/data/dsym-cache' releasefile.cache-path: '/data/releasefile-cache' # filestore.backend: 's3' # filestore.options: # access_key: 'AKIXXXXXX' # secret_key: 'XXXXXXX' # bucket_name: 's3-bucket-name' system.internal-url-prefix: 'http://web:9000' symbolicator.enabled: true symbolicator.options: url: "http://symbolicator:3021" transaction-events.force-disable-internal-project: true ###################### # GitHub Integration # ###################### # github-login.extended-permissions: ['repo'] # github-app.id: GITHUB_APP_ID # github-app.name: 'GITHUB_APP_NAME' # github-app.webhook-secret: 'GITHUB_WEBHOOK_SECRET' # Use only if configured in GitHub # github-app.client-id: 'GITHUB_CLIENT_ID' # github-app.client-secret: 'GITHUB_CLIENT_SECRET' # github-app.private-key: | # -----BEGIN RSA PRIVATE KEY----- # privatekeyprivatekeyprivatekeyprivatekey # privatekeyprivatekeyprivatekeyprivatekey # privatekeyprivatekeyprivatekeyprivatekey # privatekeyprivatekeyprivatekeyprivatekey # privatekeyprivatekeyprivatekeyprivatekey # -----END RSA PRIVATE KEY----- ##################### # Slack Integration # ##################### # Refer to https://develop.sentry.dev/integrations/slack/ for setup instructions. # slack.client-id: <'client id'> # slack.client-secret: # slack.signing-secret: ## If legacy-app is True use verfication-token instead of signing-secret # slack.verification-token: diff --git a/sentry/sentry.conf.example.py b/sentry/sentry.conf.example.py index 2d89b56..355ae63 100644 --- a/sentry/sentry.conf.example.py +++ b/sentry/sentry.conf.example.py @@ -1,274 +1,281 @@ # This file is just Python, with a touch of Django which means # you can inherit and tweak settings to your hearts content. from sentry.conf.server import * # NOQA # Generously adapted from pynetlinux: https://git.io/JJmga def get_internal_network(): import ctypes import fcntl import math import socket import struct iface = b"eth0" sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ifreq = struct.pack(b"16sH14s", iface, socket.AF_INET, b"\x00" * 14) try: ip = struct.unpack( b"!I", struct.unpack(b"16sH2x4s8x", fcntl.ioctl(sockfd, 0x8915, ifreq))[2] )[0] netmask = socket.ntohl( struct.unpack(b"16sH2xI8x", fcntl.ioctl(sockfd, 0x891B, ifreq))[2] ) except IOError: return () base = socket.inet_ntoa(struct.pack(b"!I", ip & netmask)) netmask_bits = 32 - int(round(math.log(ctypes.c_uint32(~netmask).value + 1, 2), 1)) return "{0:s}/{1:d}".format(base, netmask_bits) INTERNAL_SYSTEM_IPS = (get_internal_network(),) DATABASES = { "default": { "ENGINE": "sentry.db.postgres", "NAME": "postgres", "USER": "postgres", "PASSWORD": "", "HOST": "postgres", "PORT": "", } } # You should not change this setting after your database has been created # unless you have altered all schemas first SENTRY_USE_BIG_INTS = True # If you're expecting any kind of real traffic on Sentry, we highly recommend # configuring the CACHES and Redis settings ########### # General # ########### # Instruct Sentry that this install intends to be run by a single organization # and thus various UI optimizations should be enabled. SENTRY_SINGLE_ORGANIZATION = True SENTRY_OPTIONS["system.event-retention-days"] = int( env("SENTRY_EVENT_RETENTION_DAYS", "90") ) ######### # Redis # ######### # Generic Redis configuration used as defaults for various things including: # Buffers, Quotas, TSDB SENTRY_OPTIONS["redis.clusters"] = { "default": { "hosts": {0: {"host": "redis", "password": "", "port": "6379", "db": "0"}} } } ######### # Queue # ######### # See https://develop.sentry.dev/services/queue/ for more # information on configuring your queue broker and workers. Sentry relies # on a Python framework called Celery to manage queues. rabbitmq_host = None if rabbitmq_host: BROKER_URL = "amqp://{username}:{password}@{host}/{vhost}".format( username="guest", password="guest", host=rabbitmq_host, vhost="/" ) else: BROKER_URL = "redis://:{password}@{host}:{port}/{db}".format( **SENTRY_OPTIONS["redis.clusters"]["default"]["hosts"][0] ) ######### # Cache # ######### # Sentry currently utilizes two separate mechanisms. While CACHES is not a # requirement, it will optimize several high throughput patterns. CACHES = { "default": { "BACKEND": "django.core.cache.backends.memcached.MemcachedCache", "LOCATION": ["memcached:11211"], "TIMEOUT": 3600, } } # A primary cache is required for things such as processing events SENTRY_CACHE = "sentry.cache.redis.RedisCache" DEFAULT_KAFKA_OPTIONS = { "bootstrap.servers": "kafka:9092", "message.max.bytes": 50000000, "socket.timeout.ms": 1000, } SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream" SENTRY_EVENTSTREAM_OPTIONS = {"producer_configuration": DEFAULT_KAFKA_OPTIONS} KAFKA_CLUSTERS["default"] = DEFAULT_KAFKA_OPTIONS ############### # Rate Limits # ############### # Rate limits apply to notification handlers and are enforced per-project # automatically. SENTRY_RATELIMITER = "sentry.ratelimits.redis.RedisRateLimiter" ################## # Update Buffers # ################## # Buffers (combined with queueing) act as an intermediate layer between the # database and the storage API. They will greatly improve efficiency on large # numbers of the same events being sent to the API in a short amount of time. # (read: if you send any kind of real data to Sentry, you should enable buffers) SENTRY_BUFFER = "sentry.buffer.redis.RedisBuffer" ########## # Quotas # ########## # Quotas allow you to rate limit individual projects or the Sentry install as # a whole. SENTRY_QUOTAS = "sentry.quotas.redis.RedisQuota" ######## # TSDB # ######## # The TSDB is used for building charts as well as making things like per-rate # alerts possible. SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB" ######### # SNUBA # ######### SENTRY_SEARCH = "sentry.search.snuba.EventsDatasetSnubaSearchBackend" SENTRY_SEARCH_OPTIONS = {} SENTRY_TAGSTORE_OPTIONS = {} ########### # Digests # ########### # The digest backend powers notification summaries. SENTRY_DIGESTS = "sentry.digests.backends.redis.RedisBackend" ############## # Web Server # ############## SENTRY_WEB_HOST = "0.0.0.0" SENTRY_WEB_PORT = 9000 SENTRY_WEB_OPTIONS = { "http": "%s:%s" % (SENTRY_WEB_HOST, SENTRY_WEB_PORT), "protocol": "uwsgi", # This is needed in order to prevent https://git.io/fj7Lw "uwsgi-socket": None, "so-keepalive": True, # Keep this between 15s-75s as that's what Relay supports "http-keepalive": 15, "http-chunked-input": True, # the number of web workers "workers": 3, "threads": 4, "memory-report": False, # Some stuff so uwsgi will cycle workers sensibly "max-requests": 100000, "max-requests-delta": 500, "max-worker-lifetime": 86400, # Duplicate options from sentry default just so we don't get # bit by sentry changing a default value that we depend on. "thunder-lock": True, "log-x-forwarded-for": False, "buffer-size": 32768, "limit-post": 209715200, "disable-logging": True, "reload-on-rss": 600, "ignore-sigpipe": True, "ignore-write-errors": True, "disable-write-exception": True, } ########### # SSL/TLS # ########### # If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto # header and enable the settings below # SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # SESSION_COOKIE_SECURE = True # CSRF_COOKIE_SECURE = True # SOCIAL_AUTH_REDIRECT_IS_HTTPS = True # End of SSL/TLS settings +######## +# Mail # +######## + +SENTRY_OPTIONS["mail.list-namespace"] = env('SENTRY_MAIL_HOST', 'localhost') +SENTRY_OPTIONS["mail.from"] = f"sentry@{SENTRY_OPTIONS['mail.list-namespace']}" + ############ # Features # ############ SENTRY_FEATURES["projects:sample-events"] = False SENTRY_FEATURES.update( { feature: True for feature in ( "organizations:discover", "organizations:events", "organizations:global-views", "organizations:incidents", "organizations:integrations-issue-basic", "organizations:integrations-issue-sync", "organizations:invite-members", "organizations:metric-alert-builder-aggregate", "organizations:sso-basic", "organizations:sso-rippling", "organizations:sso-saml2", "organizations:performance-view", "organizations:advanced-search", "projects:custom-inbound-filters", "projects:data-forwarding", "projects:discard-groups", "projects:plugins", "projects:rate-limits", "projects:servicehooks", ) } ) ####################### # MaxMind Integration # ####################### GEOIP_PATH_MMDB = '/geoip/GeoLite2-City.mmdb' ######################### # Bitbucket Integration # ######################### # BITBUCKET_CONSUMER_KEY = 'YOUR_BITBUCKET_CONSUMER_KEY' # BITBUCKET_CONSUMER_SECRET = 'YOUR_BITBUCKET_CONSUMER_SECRET'