diff --git a/.env b/.env index f6fd533..aad7129 100644 --- a/.env +++ b/.env @@ -1,10 +1,12 @@ COMPOSE_PROJECT_NAME=sentry_onpremise SENTRY_EVENT_RETENTION_DAYS=90 # You can either use a port number or an IP:PORT combo for SENTRY_BIND # See https://docs.docker.com/compose/compose-file/#ports for more SENTRY_BIND=9000 -SENTRY_IMAGE=getsentry/sentry:21.8.0 -SNUBA_IMAGE=getsentry/snuba:21.8.0 -RELAY_IMAGE=getsentry/relay:21.8.0 +# Set SENTRY_MAIL_HOST to a valid FQDN (host/domain name) to be able to send emails! +# SENTRY_MAIL_HOST=example.com +SENTRY_IMAGE=getsentry/sentry:21.9.0 +SNUBA_IMAGE=getsentry/snuba:21.9.0 +RELAY_IMAGE=getsentry/relay:21.9.0 SYMBOLICATOR_IMAGE=getsentry/symbolicator:0.3.4 -WAL2JSON_VERSION=latest +WAL2JSON_VERSION=latest \ No newline at end of file diff --git a/.github/workflows/issue-routing-helper.yml b/.github/workflows/issue-routing-helper.yml index 22e381c..a361795 100644 --- a/.github/workflows/issue-routing-helper.yml +++ b/.github/workflows/issue-routing-helper.yml @@ -1,44 +1,46 @@ name: Issue Routing Helper on: issues: types: [labeled] env: # Use GH_RELEASE_PAT as github-actions bot is not allowed to ping teams GH_TOKEN: ${{ secrets.GH_RELEASE_PAT }} GH_REPO: ${{ github.repository }} jobs: route: runs-on: ubuntu-latest if: >- + github.event.issue.state == 'open' + && startsWith(github.event.label.name, 'Team: ') && !contains(github.event.issue.labels.*.name, 'Status: Backlog') && !contains(github.event.issue.labels.*.name, 'Status: In Progress') steps: - name: "Ensure a single 'Team: *' label with 'Status: Untriaged'" run: | labels_to_remove=$(gh api --paginate "/repos/$GH_REPO/labels" -q '[.[].name | select((startswith("Team: ") or startswith("Status: ")) and . != "${{ github.event.label.name }}" and . != "Status: Untriaged")] | join(",")') gh issue edit ${{ github.event.issue.number }} --remove-label "$labels_to_remove" --add-label '${{ github.event.label.name }},Status: Untriaged' - name: "Mention/ping assigned team for triage" run: | # Get team label mention name: team_label='${{ github.event.label.name }}' team_name="${team_label:6}" # Strip the first 6 chars, which is the 'Team: ' part team_slug="${team_name// /-}" # Replace spaces with hyphens for url/slug friendliness mention_slug=$(gh api "/orgs/getsentry/teams/$team_slug" -q .slug || true) if [[ -z "$mention_slug" ]]; then echo "Couldn't find team mention from slug, trying the label description" team_slug=$(gh api "/repos/$GH_REPO/labels/$team_label" -q '.description') mention_slug=$(gh api "/orgs/getsentry/teams/$team_slug" -q .slug || true) fi if [[ -n "$mention_slug" ]]; then echo "Routing to @getsentry/$mention_slug for [triage](https://develop.sentry.dev/processing-tickets/#3-triage). ⏲️" > comment_body else echo "[Failed]($GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID) to route to \`${{ github.event.label.name }}\`. 😕" > comment_body echo "" >> comment_body echo "Defaulting to @getsentry/open-source for [triage](https://develop.sentry.dev/processing-tickets/#3-triage). ⏲️" >> comment_body fi gh issue comment ${{ github.event.issue.number }} --body-file comment_body diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 282537a..1c5a938 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,60 +1,56 @@ name: Test on: # Run CI on all pushes to the master and release/** branches, and on all new # pull requests, and on all pushes to pull requests (even if a pull request # is not against master). push: branches: - "master" - "release/**" pull_request: -env: - DOCKER_COMPOSE_VERSION: 1.29.2 defaults: run: shell: bash jobs: unit-test: runs-on: ubuntu-20.04 name: "unit tests" steps: - name: Checkout uses: actions/checkout@v2 - name: Unit Tests working-directory: install run: find ./ -type f -name "*-test.sh" -exec "./{}" \; integration-test: runs-on: ubuntu-20.04 name: "integration test" steps: - - name: Pin docker-compose - run: | - sudo rm /usr/local/bin/docker-compose - curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose - chmod +x docker-compose - sudo mv docker-compose /usr/local/bin - - name: Checkout uses: actions/checkout@v2 + + - name: Pin docker-compose + run: | + COMPOSE_PATH=/usr/local/bin/docker-compose + source ./install/_min-requirements.sh + sudo rm $COMPOSE_PATH + sudo curl -L https://github.com/docker/compose/releases/download/${MIN_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` -o $COMPOSE_PATH + sudo chmod +x $COMPOSE_PATH - name: Integration Test run: | echo "Testing initial install" - # Create ./certificates here because install.sh will create it with root:root - # and then run.sh (-> setup.sh) won't be able to write to it. - mkdir certificates ./install.sh ./_integration-test/run.sh echo "Testing in-place upgrade" # Also test plugin installation here echo "sentry-auth-oidc" >> sentry/requirements.txt ./install.sh --minimize-downtime ./_integration-test/run.sh - name: Inspect failure if: failure() run: | docker-compose ps docker-compose logs diff --git a/.gitignore b/.gitignore index c8967cd..4a86daa 100644 --- a/.gitignore +++ b/.gitignore @@ -1,96 +1,93 @@ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt sentry_install_log*.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py # Sphinx documentation docs/_build/ # PyBuilder target/ # Ipython Notebook .ipynb_checkpoints # pyenv .python-version # https://docs.docker.com/compose/extends/ docker-compose.override.yml *.tar data/ .vscode/tags # custom Sentry config sentry/sentry.conf.py sentry/config.yml sentry/*.bak sentry/requirements.txt relay/credentials.json relay/config.yml symbolicator/config.yml geoip/GeoIP.conf geoip/*.mmdb geoip/.geoipupdate.lock # wal2json download postgres/wal2json -# custom certificate authorities -certificates - # integration testing _integration-test/custom-ca-roots/nginx/* sentry/test-custom-ca-roots.py diff --git a/CHANGELOG.md b/CHANGELOG.md index f388b87..57f37ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,28 +1,37 @@ # Changelog +## 21.9.0 + +- fix(healthcheck): Increase retries to 5 (#1072) +- fix(requirements): Make compose version check bw-compatible (#1068) +- ci: Test with the required minimum docker-compose (#1066) + Run tests using docker-compose `1.28.0` instead of latest +- fix(clickhouse): Use correct HTTP port for healthcheck (#1069) + Fixes the regular `Unexpected packet` errors in Clickhouse + ## 21.8.0 - feat: Support custom CA roots ([#27062](https://github.com/getsentry/sentry/pull/27062)), see the [docs](https://develop.sentry.dev/self-hosted/custom-ca-roots/) for more details. - fix: Fix `curl` image to version 7.77.0 - upgrade: docker-compose version to 1.29.2 - feat: Leverage health checks for depends_on ## 21.7.0 - No documented changes. ## 21.6.3 - No documented changes. ## 21.6.2 - BREAKING CHANGE: The frontend bundle will be loaded asynchronously (via [#25744](https://github.com/getsentry/sentry/pull/25744)). This is a breaking change that can affect custom plugins that access certain globals in the django template. Please see https://forum.sentry.io/t/breaking-frontend-changes-for-custom-plugins/14184 for more information. ## 21.6.1 - No documented changes. ## 21.6.0 - feat: Add healthchecks for redis, memcached and postgres (#975) diff --git a/LICENSE b/LICENSE index a55050d..cb64c4a 100644 --- a/LICENSE +++ b/LICENSE @@ -1,104 +1,104 @@ Business Source License 1.1 Parameters Licensor: Functional Software, Inc. Licensed Work: Sentry The Licensed Work is (c) 2019 Functional Software, Inc. Additional Use Grant: You may make use of the Licensed Work, provided that you do not use the Licensed Work for an Application Monitoring Service. An "Application Monitoring Service" is a commercial offering that allows third parties (other than your employees and contractors) to access the functionality of the Licensed Work so that such third parties directly benefit from the error-reporting or application monitoring features of the Licensed Work. -Change Date: 2024-08-15 +Change Date: 2024-09-15 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, please visit: https://sentry.io/pricing/ Notice The Business Source License (this document, or the "License") is not an Open Source license. However, the Licensed Work will eventually be made available under an Open Source License, as stated in this License. License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. "Business Source License" is a trademark of MariaDB Corporation Ab. ----------------------------------------------------------------------------- Business Source License 1.1 Terms The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. MariaDB hereby grants you permission to use this License’s text to license your works, and to refer to it using the trademark "Business Source License", as long as you comply with the Covenants of Licensor below. Covenants of Licensor In consideration of the right to use this License’s text and the "Business Source License" name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: 1. To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where "compatible" means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. 2. To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text "None". 3. To specify a Change Date. 4. Not to modify this License in any other way. diff --git a/README.md b/README.md index 5e2c66f..1cc6b64 100644 --- a/README.md +++ b/README.md @@ -1,63 +1,63 @@ -# Self-Hosted Sentry 21.8.0 +# Self-Hosted Sentry 21.9.0 Official bootstrap for running your own [Sentry](https://sentry.io/) with [Docker](https://www.docker.com/). ## Requirements * Docker 19.03.6+ * Compose 1.28.0+ * 4 CPU Cores * 8 GB RAM * 20 GB Free Disk Space ## Setup To get started with all the defaults, simply clone the repo and run `./install.sh` in your local check-out. Sentry uses Python 3 by default since December 4th, 2020 and Sentry 21.1.0 is the last version to support Python 2. During the install, a prompt will ask if you want to create a user account. If you require that the install not be blocked by the prompt, run `./install.sh --no-user-prompt`. Please visit [our documentation](https://develop.sentry.dev/self-hosted/) for everything else. ## Tips & Tricks ### Event Retention Sentry comes with a cleanup cron job that prunes events older than `90 days` by default. If you want to change that, you can change the `SENTRY_EVENT_RETENTION_DAYS` environment variable in `.env` or simply override it in your environment. If you do not want the cleanup cron, you can remove the `sentry-cleanup` service from the `docker-compose.yml`file. ### Installing a specific SHA If you want to install a specific release of Sentry, use the tags/releases on this repo. We continously push the Docker image for each commit made into [Sentry](https://github.com/getsentry/sentry), and other services such as [Snuba](https://github.com/getsentry/snuba) or [Symbolicator](https://github.com/getsentry/symbolicator) to [our Docker Hub](https://hub.docker.com/u/getsentry) and tag the latest version on master as `:nightly`. This is also usually what we have on sentry.io and what the install script uses. You can use a custom Sentry image, such as a modified version that you have built on your own, or simply a specific commit hash by setting the `SENTRY_IMAGE` environment variable to that image name before running `./install.sh`: ```shell SENTRY_IMAGE=getsentry/sentry:83b1380 ./install.sh ``` Note that this may not work for all commit SHAs as this repository evolves with Sentry and its satellite projects. It is highly recommended to check out a version of this repository that is close to the timestamp of the Sentry commit you are installing. ### Using Linux If you are using Linux and you need to use `sudo` when running `./install.sh`, modifying the version of Sentry is slightly different. First, run the following: ```shell sudo visudo ``` Then add the following line: ```shell Defaults env_keep += "SENTRY_IMAGE" ``` Save the file then in your terminal run the following ```shell export SENTRY_IMAGE=us.gcr.io/sentryio/sentry:83b1380 sudo ./install.sh ``` Where you replace `83b1380` with the sha you want to use. [build-status-image]: https://github.com/getsentry/onpremise/workflows/test/badge.svg [build-status-url]: https://git.io/JUYkh diff --git a/_integration-test/run.sh b/_integration-test/run.sh index dc13de4..6b66052 100755 --- a/_integration-test/run.sh +++ b/_integration-test/run.sh @@ -1,128 +1,128 @@ #!/usr/bin/env bash set -e source "$(dirname $0)/../install/_lib.sh" echo "${_group}Setting up variables and helpers ..." export SENTRY_TEST_HOST="${SENTRY_TEST_HOST:-http://localhost:9000}" TEST_USER='test@example.com' TEST_PASS='test123TEST' COOKIE_FILE=$(mktemp) # Courtesy of https://stackoverflow.com/a/2183063/90297 trap_with_arg() { func="$1" ; shift for sig ; do trap "$func $sig "'$LINENO' "$sig" done } DID_CLEAN_UP=0 # the cleanup function will be the exit point cleanup () { if [ "$DID_CLEAN_UP" -eq 1 ]; then return 0; fi DID_CLEAN_UP=1 if [ "$1" != "EXIT" ]; then echo "An error occurred, caught SIG$1 on line $2"; fi echo "Cleaning up..." rm $COOKIE_FILE echo "Done." } trap_with_arg cleanup ERR INT TERM EXIT echo "${_endgroup}" echo "${_group}Starting Sentry for tests ..." # Disable beacon for e2e tests echo 'SENTRY_BEACON=False' >> $SENTRY_CONFIG_PY $dcr web createuser --superuser --email $TEST_USER --password $TEST_PASS || true $dc up -d printf "Waiting for Sentry to be up"; timeout 60 bash -c 'until $(curl -Isf -o /dev/null $SENTRY_TEST_HOST); do printf '.'; sleep 0.5; done' echo "" echo "${_endgroup}" echo "${_group}Running tests ..." get_csrf_token () { awk '$6 == "sc" { print $7 }' $COOKIE_FILE; } sentry_api_request () { curl -s -H 'Accept: application/json; charset=utf-8' -H "Referer: $SENTRY_TEST_HOST" -H 'Content-Type: application/json' -H "X-CSRFToken: $(get_csrf_token)" -b "$COOKIE_FILE" -c "$COOKIE_FILE" "$SENTRY_TEST_HOST/api/0/$1" ${@:2}; } login () { INITIAL_AUTH_REDIRECT=$(curl -sL -o /dev/null $SENTRY_TEST_HOST -w %{url_effective}) if [ "$INITIAL_AUTH_REDIRECT" != "$SENTRY_TEST_HOST/auth/login/sentry/" ]; then echo "Initial /auth/login/ redirect failed, exiting..." echo "$INITIAL_AUTH_REDIRECT" exit -1 fi CSRF_TOKEN_FOR_LOGIN=$(curl $SENTRY_TEST_HOST -sL -c "$COOKIE_FILE" | awk -F "['\"]" ' /csrfmiddlewaretoken/ { print $4 "=" $6; exit; }') curl -sL --data-urlencode 'op=login' --data-urlencode "username=$TEST_USER" --data-urlencode "password=$TEST_PASS" --data-urlencode "$CSRF_TOKEN_FOR_LOGIN" "$SENTRY_TEST_HOST/auth/login/sentry/" -H "Referer: $SENTRY_TEST_HOST/auth/login/sentry/" -b "$COOKIE_FILE" -c "$COOKIE_FILE"; } LOGIN_RESPONSE=$(login); declare -a LOGIN_TEST_STRINGS=( '"isAuthenticated":true' '"username":"test@example.com"' '"isSuperuser":true' ) for i in "${LOGIN_TEST_STRINGS[@]}" do echo "Testing '$i'..." echo "$LOGIN_RESPONSE" | grep "$i[,}]" >& /dev/null echo "Pass." done echo "${_endgroup}" echo "${_group}Running moar tests !!!" # Set up initial/required settings (InstallWizard request) -sentry_api_request "internal/options/?query=is:required" -X PUT --data '{"mail.use-tls":false,"mail.username":"","mail.port":25,"system.admin-email":"ben@byk.im","mail.password":"","mail.from":"root@localhost","system.url-prefix":"'"$SENTRY_TEST_HOST"'","auth.allow-registration":false,"beacon.anonymous":true}' > /dev/null +sentry_api_request "internal/options/?query=is:required" -X PUT --data '{"mail.use-tls":false,"mail.username":"","mail.port":25,"system.admin-email":"ben@byk.im","mail.password":"","system.url-prefix":"'"$SENTRY_TEST_HOST"'","auth.allow-registration":false,"beacon.anonymous":true}' > /dev/null SENTRY_DSN=$(sentry_api_request "projects/sentry/internal/keys/" | awk 'BEGIN { RS=",|:{\n"; FS="\""; } $2 == "public" && $4 ~ "^http" { print $4; exit; }') # We ignore the protocol and the host as we already know those DSN_PIECES=(`echo $SENTRY_DSN | sed -ne 's|^https\{0,1\}://\([0-9a-z]\{1,\}\)@[^/]\{1,\}/\([0-9]\{1,\}\)$|\1 \2|p' | tr ' ' '\n'`) SENTRY_KEY=${DSN_PIECES[0]} PROJECT_ID=${DSN_PIECES[1]} TEST_EVENT_ID=$(export LC_ALL=C; head /dev/urandom | tr -dc "a-f0-9" | head -c 32) # Thanks @untitaker - https://forum.sentry.io/t/how-can-i-post-with-curl-a-sentry-event-which-authentication-credentials/4759/2?u=byk echo "Creating test event..." curl -sf --data '{"event_id": "'"$TEST_EVENT_ID"'","level":"error","message":"a failure","extra":{"object":"42"}}' -H 'Content-Type: application/json' -H "X-Sentry-Auth: Sentry sentry_version=7, sentry_key=$SENTRY_KEY, sentry_client=test-bash/0.1" "$SENTRY_TEST_HOST/api/$PROJECT_ID/store/" -o /dev/null EVENT_PATH="projects/sentry/internal/events/$TEST_EVENT_ID/" export -f sentry_api_request get_csrf_token export SENTRY_TEST_HOST COOKIE_FILE EVENT_PATH printf "Getting the test event back" timeout 30 bash -c 'until $(sentry_api_request "$EVENT_PATH" -Isf -X GET -o /dev/null); do printf '.'; sleep 0.5; done' echo " got it!"; EVENT_RESPONSE=$(sentry_api_request "$EVENT_PATH") declare -a EVENT_TEST_STRINGS=( '"eventID":"'"$TEST_EVENT_ID"'"' '"message":"a failure"' '"title":"a failure"' '"object":"42"' ) for i in "${EVENT_TEST_STRINGS[@]}" do echo "Testing '$i'..." echo "$EVENT_RESPONSE" | grep "$i[,}]" >& /dev/null echo "Pass." done echo "${_endgroup}" echo "${_group}Ensure cleanup crons are working ..." $dc ps | grep -q -- "-cleanup_.\+[[:space:]]\+Up[[:space:]]\+" echo "${_endgroup}" echo "${_group}Test custom CAs work ..." source ./custom-ca-roots/setup.sh $dcr --no-deps web python3 /etc/sentry/test-custom-ca-roots.py source ./custom-ca-roots/teardown.sh echo "${_endgroup}" diff --git a/certificates/.gitignore b/certificates/.gitignore new file mode 100644 index 0000000..30d0607 --- /dev/null +++ b/certificates/.gitignore @@ -0,0 +1,3 @@ +# Add all custom CAs in this folder +* +!.gitignore diff --git a/docker-compose.yml b/docker-compose.yml index 5d26623..395a832 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,338 +1,338 @@ -version: "3.4" x-restart-policy: &restart_policy restart: unless-stopped x-depends_on-healthy: &depends_on-healthy condition: service_healthy x-depends_on-default: &depends_on-default condition: service_started x-healthcheck-defaults: &healthcheck_defaults # Avoid setting the interval too small, as docker uses much more CPU than one would expect. # Related issues: # https://github.com/moby/moby/issues/39102 # https://github.com/moby/moby/issues/39388 # https://github.com/getsentry/onpremise/issues/1000 interval: 30s timeout: 5s - retries: 3 + retries: 5 start_period: 10s x-sentry-defaults: &sentry_defaults <<: *restart_policy image: "$SENTRY_IMAGE" depends_on: redis: <<: *depends_on-healthy kafka: <<: *depends_on-healthy memcached: <<: *depends_on-default snuba-api: <<: *depends_on-default snuba-consumer: <<: *depends_on-default snuba-outcomes-consumer: <<: *depends_on-default snuba-sessions-consumer: <<: *depends_on-default snuba-transactions-consumer: <<: *depends_on-default snuba-subscription-consumer-events: <<: *depends_on-default snuba-subscription-consumer-transactions: <<: *depends_on-default snuba-replacer: <<: *depends_on-default symbolicator: <<: *depends_on-default entrypoint: "/etc/sentry/entrypoint.sh" command: ["run", "web"] environment: PYTHONUSERBASE: "/data/custom-packages" SENTRY_CONF: "/etc/sentry" SNUBA: "http://snuba-api:1218" # Force everything to use the system CA bundle # This is mostly needed to support installing custom CA certs # This one is used by botocore DEFAULT_CA_BUNDLE: &ca_bundle "/etc/ssl/certs/ca-certificates.crt" # This one is used by requests REQUESTS_CA_BUNDLE: *ca_bundle # This one is used by grpc/google modules GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR: *ca_bundle # Leaving the value empty to just pass whatever is set # on the host system (or in the .env file) SENTRY_EVENT_RETENTION_DAYS: + SENTRY_MAIL_HOST: volumes: - "sentry-data:/data" - "./sentry:/etc/sentry" - "./geoip:/geoip:ro" - "./certificates:/usr/local/share/ca-certificates:ro" x-snuba-defaults: &snuba_defaults <<: *restart_policy depends_on: clickhouse: <<: *depends_on-healthy kafka: <<: *depends_on-healthy redis: <<: *depends_on-healthy image: "$SNUBA_IMAGE" environment: SNUBA_SETTINGS: docker CLICKHOUSE_HOST: clickhouse DEFAULT_BROKERS: "kafka:9092" REDIS_HOST: redis UWSGI_MAX_REQUESTS: "10000" UWSGI_DISABLE_LOGGING: "true" # Leaving the value empty to just pass whatever is set # on the host system (or in the .env file) SENTRY_EVENT_RETENTION_DAYS: services: memcached: <<: *restart_policy image: "memcached:1.6.9-alpine" healthcheck: <<: *healthcheck_defaults # From: https://stackoverflow.com/a/31877626/5155484 test: echo stats | nc 127.0.0.1 11211 redis: <<: *restart_policy image: "redis:6.2.4-alpine" healthcheck: <<: *healthcheck_defaults test: redis-cli ping volumes: - "sentry-redis:/data" ulimits: nofile: soft: 10032 hard: 10032 zookeeper: <<: *restart_policy image: "confluentinc/cp-zookeeper:5.5.0" environment: ZOOKEEPER_CLIENT_PORT: "2181" CONFLUENT_SUPPORT_METRICS_ENABLE: "false" ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: "WARN" ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: "WARN" KAFKA_OPTS: "-Dzookeeper.4lw.commands.whitelist=ruok" volumes: - "sentry-zookeeper:/var/lib/zookeeper/data" - "sentry-zookeeper-log:/var/lib/zookeeper/log" - "sentry-secrets:/etc/zookeeper/secrets" healthcheck: <<: *healthcheck_defaults test: ["CMD-SHELL", 'echo "ruok" | nc -w 2 -q 2 localhost 2181 | grep imok'] kafka: <<: *restart_policy depends_on: zookeeper: <<: *depends_on-healthy image: "confluentinc/cp-kafka:5.5.0" environment: KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181" KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9092" KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1" KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1" KAFKA_LOG_RETENTION_HOURS: "24" KAFKA_MESSAGE_MAX_BYTES: "50000000" #50MB or bust KAFKA_MAX_REQUEST_SIZE: "50000000" #50MB on requests apparently too CONFLUENT_SUPPORT_METRICS_ENABLE: "false" KAFKA_LOG4J_LOGGERS: "kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN" KAFKA_LOG4J_ROOT_LOGLEVEL: "WARN" KAFKA_TOOLS_LOG4J_LOGLEVEL: "WARN" volumes: - "sentry-kafka:/var/lib/kafka/data" - "sentry-kafka-log:/var/lib/kafka/log" - "sentry-secrets:/etc/kafka/secrets" healthcheck: <<: *healthcheck_defaults test: ["CMD-SHELL", "nc -z localhost 9092"] clickhouse: <<: *restart_policy image: "yandex/clickhouse-server:20.3.9.70" ulimits: nofile: soft: 262144 hard: 262144 volumes: - "sentry-clickhouse:/var/lib/clickhouse" - "sentry-clickhouse-log:/var/log/clickhouse-server" - type: bind read_only: true source: ./clickhouse/config.xml target: /etc/clickhouse-server/config.d/sentry.xml environment: # This limits Clickhouse's memory to 30% of the host memory # If you have high volume and your search return incomplete results # You might want to change this to a higher value (and ensure your host has enough memory) MAX_MEMORY_USAGE_RATIO: 0.3 healthcheck: test: [ "CMD-SHELL", - "wget -nv -t1 --spider 'http://localhost:9000/' || exit 1", + "wget -nv -t1 --spider 'http://localhost:8123/' || exit 1", ] interval: 3s timeout: 600s retries: 200 geoipupdate: image: "maxmindinc/geoipupdate:v4.7.1" # Override the entrypoint in order to avoid using envvars for config. # Futz with settings so we can keep mmdb and conf in same dir on host # (image looks for them in separate dirs by default). entrypoint: ["/usr/bin/geoipupdate", "-d", "/sentry", "-f", "/sentry/GeoIP.conf"] volumes: - "./geoip:/sentry" snuba-api: <<: *snuba_defaults # Kafka consumer responsible for feeding events into Clickhouse snuba-consumer: <<: *snuba_defaults command: consumer --storage errors --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding outcomes into Clickhouse # Use --auto-offset-reset=earliest to recover up to 7 days of TSDB data # since we did not do a proper migration snuba-outcomes-consumer: <<: *snuba_defaults command: consumer --storage outcomes_raw --auto-offset-reset=earliest --max-batch-time-ms 750 # Kafka consumer responsible for feeding session data into Clickhouse snuba-sessions-consumer: <<: *snuba_defaults command: consumer --storage sessions_raw --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding transactions data into Clickhouse snuba-transactions-consumer: <<: *snuba_defaults command: consumer --storage transactions --consumer-group transactions_group --auto-offset-reset=latest --max-batch-time-ms 750 --commit-log-topic=snuba-commit-log snuba-replacer: <<: *snuba_defaults command: replacer --storage errors --auto-offset-reset=latest --max-batch-size 3 snuba-subscription-consumer-events: <<: *snuba_defaults command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-events-subscriptions-consumers --topic=events --result-topic=events-subscription-results --dataset=events --commit-log-topic=snuba-commit-log --commit-log-group=snuba-consumers --delay-seconds=60 --schedule-ttl=60 snuba-subscription-consumer-transactions: <<: *snuba_defaults command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-transactions-subscriptions-consumers --topic=events --result-topic=transactions-subscription-results --dataset=transactions --commit-log-topic=snuba-commit-log --commit-log-group=transactions_group --delay-seconds=60 --schedule-ttl=60 snuba-cleanup: <<: *snuba_defaults image: snuba-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SNUBA_IMAGE" command: '"*/5 * * * * gosu snuba snuba cleanup --storage errors --dry-run False"' snuba-transactions-cleanup: <<: *snuba_defaults image: snuba-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SNUBA_IMAGE" command: '"*/5 * * * * gosu snuba snuba cleanup --storage transactions --dry-run False"' symbolicator: <<: *restart_policy image: "$SYMBOLICATOR_IMAGE" volumes: - "sentry-symbolicator:/data" - type: bind read_only: true source: ./symbolicator target: /etc/symbolicator command: run -c /etc/symbolicator/config.yml symbolicator-cleanup: <<: *restart_policy image: symbolicator-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SYMBOLICATOR_IMAGE" command: '"55 23 * * * gosu symbolicator symbolicator cleanup"' volumes: - "sentry-symbolicator:/data" web: <<: *sentry_defaults healthcheck: <<: *healthcheck_defaults test: - "CMD" - "/bin/bash" - '-c' # Courtesy of https://unix.stackexchange.com/a/234089/108960 - 'exec 3<>/dev/tcp/127.0.0.1/9000 && echo -e "GET /_health/ HTTP/1.1\r\nhost: 127.0.0.1\r\n\r\n" >&3 && grep ok -s -m 1 <&3' cron: <<: *sentry_defaults command: run cron worker: <<: *sentry_defaults command: run worker ingest-consumer: <<: *sentry_defaults command: run ingest-consumer --all-consumer-types post-process-forwarder: <<: *sentry_defaults # Increase `--commit-batch-size 1` below to deal with high-load environments. command: run post-process-forwarder --commit-batch-size 1 subscription-consumer-events: <<: *sentry_defaults command: run query-subscription-consumer --commit-batch-size 1 --topic events-subscription-results subscription-consumer-transactions: <<: *sentry_defaults command: run query-subscription-consumer --commit-batch-size 1 --topic transactions-subscription-results sentry-cleanup: <<: *sentry_defaults image: sentry-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SENTRY_IMAGE" entrypoint: "/entrypoint.sh" command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"' nginx: <<: *restart_policy ports: - "$SENTRY_BIND:80/tcp" image: "nginx:1.21.0-alpine" volumes: - type: bind read_only: true source: ./nginx target: /etc/nginx depends_on: - web - relay relay: <<: *restart_policy image: "$RELAY_IMAGE" volumes: - type: bind read_only: true source: ./relay target: /work/.relay - type: bind read_only: true source: ./geoip target: /geoip depends_on: kafka: <<: *depends_on-healthy redis: <<: *depends_on-healthy web: <<: *depends_on-healthy volumes: sentry-data: external: true sentry-redis: external: true sentry-zookeeper: external: true sentry-kafka: external: true sentry-clickhouse: external: true sentry-symbolicator: external: true sentry-secrets: sentry-zookeeper-log: sentry-kafka-log: sentry-clickhouse-log: diff --git a/install/_min-requirements.sh b/install/_min-requirements.sh new file mode 100644 index 0000000..fb1b8a1 --- /dev/null +++ b/install/_min-requirements.sh @@ -0,0 +1,7 @@ +# Don't forget to update the README and othes docs when you change these! +MIN_DOCKER_VERSION='19.03.6' +MIN_COMPOSE_VERSION='1.28.0' +MIN_RAM_HARD=3800 # MB +MIN_RAM_SOFT=7800 # MB +MIN_CPU_HARD=2 +MIN_CPU_SOFT=4 diff --git a/install/check-minimum-requirements.sh b/install/check-minimum-requirements.sh index b3804df..149b7d9 100644 --- a/install/check-minimum-requirements.sh +++ b/install/check-minimum-requirements.sh @@ -1,53 +1,49 @@ echo "${_group}Checking minimum requirements ..." -MIN_DOCKER_VERSION='19.03.6' -MIN_COMPOSE_VERSION='1.28.0' -MIN_RAM_HARD=3800 # MB -MIN_RAM_SOFT=7800 # MB -MIN_CPU_HARD=2 -MIN_CPU_SOFT=4 +source "$(dirname $0)/_min-requirements.sh" DOCKER_VERSION=$(docker version --format '{{.Server.Version}}') -COMPOSE_VERSION=$($dc --version | sed 's/docker-compose version \(.\{1,\}\),.*/\1/') +# Do NOT use $dc instead of `docker-compose` below as older versions don't support certain options and fail +COMPOSE_VERSION=$(docker-compose --version | sed 's/docker-compose version \(.\{1,\}\),.*/\1/') RAM_AVAILABLE_IN_DOCKER=$(docker run --rm busybox free -m 2>/dev/null | awk '/Mem/ {print $2}'); CPU_AVAILABLE_IN_DOCKER=$(docker run --rm busybox nproc --all); # Compare dot-separated strings - function below is inspired by https://stackoverflow.com/a/37939589/808368 function ver () { echo "$@" | awk -F. '{ printf("%d%03d%03d", $1,$2,$3); }'; } if [[ "$(ver $DOCKER_VERSION)" -lt "$(ver $MIN_DOCKER_VERSION)" ]]; then echo "FAIL: Expected minimum Docker version to be $MIN_DOCKER_VERSION but found $DOCKER_VERSION" exit 1 fi if [[ "$(ver $COMPOSE_VERSION)" -lt "$(ver $MIN_COMPOSE_VERSION)" ]]; then echo "FAIL: Expected minimum docker-compose version to be $MIN_COMPOSE_VERSION but found $COMPOSE_VERSION" exit 1 fi if [[ "$CPU_AVAILABLE_IN_DOCKER" -lt "$MIN_CPU_HARD" ]]; then echo "FAIL: Required minimum CPU cores available to Docker is $MIN_CPU_HARD, found $CPU_AVAILABLE_IN_DOCKER" exit 1 elif [[ "$CPU_AVAILABLE_IN_DOCKER" -lt "$MIN_CPU_SOFT" ]]; then echo "WARN: Recommended minimum CPU cores available to Docker is $MIN_CPU_SOFT, found $CPU_AVAILABLE_IN_DOCKER" fi if [[ "$RAM_AVAILABLE_IN_DOCKER" -lt "$MIN_RAM_HARD" ]]; then echo "FAIL: Required minimum RAM available to Docker is $MIN_RAM_HARD MB, found $RAM_AVAILABLE_IN_DOCKER MB" exit 1 elif [[ "$RAM_AVAILABLE_IN_DOCKER" -lt "$MIN_RAM_SOFT" ]]; then echo "WARN: Recommended minimum RAM available to Docker is $MIN_RAM_SOFT MB, found $RAM_AVAILABLE_IN_DOCKER MB" fi #SSE4.2 required by Clickhouse (https://clickhouse.yandex/docs/en/operations/requirements/) # On KVM, cpuinfo could falsely not report SSE 4.2 support, so skip the check. https://github.com/ClickHouse/ClickHouse/issues/20#issuecomment-226849297 IS_KVM=$(docker run --rm busybox grep -c 'Common KVM processor' /proc/cpuinfo || :) if [[ "$IS_KVM" -eq 0 ]]; then SUPPORTS_SSE42=$(docker run --rm busybox grep -c sse4_2 /proc/cpuinfo || :) if [[ "$SUPPORTS_SSE42" -eq 0 ]]; then echo "FAIL: The CPU your machine is running on does not support the SSE 4.2 instruction set, which is required for one of the services Sentry uses (Clickhouse). See https://git.io/JvLDt for more info." exit 1 fi fi echo "${_endgroup}" diff --git a/sentry/config.example.yml b/sentry/config.example.yml index 951916c..22a236a 100644 --- a/sentry/config.example.yml +++ b/sentry/config.example.yml @@ -1,110 +1,116 @@ # While a lot of configuration in Sentry can be changed via the UI, for all # new-style config (as of 8.0) you can also declare values here in this file # to enforce defaults or to ensure they cannot be changed via the UI. For more # information see the Sentry documentation. ############### # Mail Server # ############### # mail.backend: 'smtp' # Use dummy if you want to disable email entirely mail.host: 'smtp' # mail.port: 25 # mail.username: '' # mail.password: '' # mail.use-tls: false # mail.use-ssl: false + +# NOTE: The following 2 configs (mail.from and mail.list-namespace) are set +# through SENTRY_MAIL_HOST in sentry.conf.py so remove those first if +# you want your values in this file to be effective! + + # The email address to send on behalf of # mail.from: 'root@localhost' # The mailing list namespace for emails sent by this Sentry server. # This should be a domain you own (often the same domain as the domain # part of the `mail.from` configuration parameter value) or `localhost`. # mail.list-namespace: 'localhost' # If you'd like to configure email replies, enable this. # mail.enable-replies: true # When email-replies are enabled, this value is used in the Reply-To header # mail.reply-hostname: '' # If you're using mailgun for inbound mail, set your API key and configure a # route to forward to /api/hooks/mailgun/inbound/ # Also don't forget to set `mail.enable-replies: true` above. # mail.mailgun-api-key: '' ################### # System Settings # ################### # If this file ever becomes compromised, it's important to generate a new key. # Changing this value will result in all current sessions being invalidated. # A new key can be generated with `$ sentry config generate-secret-key` system.secret-key: '!!changeme!!' # The ``redis.clusters`` setting is used, unsurprisingly, to configure Redis # clusters. These clusters can be then referred to by name when configuring # backends such as the cache, digests, or TSDB backend. # redis.clusters: # default: # hosts: # 0: # host: 127.0.0.1 # port: 6379 ################ # File storage # ################ # Uploaded media uses these `filestore` settings. The available # backends are either `filesystem` or `s3`. filestore.backend: 'filesystem' filestore.options: location: '/data/files' dsym.cache-path: '/data/dsym-cache' releasefile.cache-path: '/data/releasefile-cache' # filestore.backend: 's3' # filestore.options: # access_key: 'AKIXXXXXX' # secret_key: 'XXXXXXX' # bucket_name: 's3-bucket-name' system.internal-url-prefix: 'http://web:9000' symbolicator.enabled: true symbolicator.options: url: "http://symbolicator:3021" transaction-events.force-disable-internal-project: true ###################### # GitHub Integration # ###################### # github-login.extended-permissions: ['repo'] # github-app.id: GITHUB_APP_ID # github-app.name: 'GITHUB_APP_NAME' # github-app.webhook-secret: 'GITHUB_WEBHOOK_SECRET' # Use only if configured in GitHub # github-app.client-id: 'GITHUB_CLIENT_ID' # github-app.client-secret: 'GITHUB_CLIENT_SECRET' # github-app.private-key: | # -----BEGIN RSA PRIVATE KEY----- # privatekeyprivatekeyprivatekeyprivatekey # privatekeyprivatekeyprivatekeyprivatekey # privatekeyprivatekeyprivatekeyprivatekey # privatekeyprivatekeyprivatekeyprivatekey # privatekeyprivatekeyprivatekeyprivatekey # -----END RSA PRIVATE KEY----- ##################### # Slack Integration # ##################### # Refer to https://develop.sentry.dev/integrations/slack/ for setup instructions. # slack.client-id: <'client id'> # slack.client-secret: # slack.signing-secret: ## If legacy-app is True use verfication-token instead of signing-secret # slack.verification-token: diff --git a/sentry/sentry.conf.example.py b/sentry/sentry.conf.example.py index 2d89b56..355ae63 100644 --- a/sentry/sentry.conf.example.py +++ b/sentry/sentry.conf.example.py @@ -1,274 +1,281 @@ # This file is just Python, with a touch of Django which means # you can inherit and tweak settings to your hearts content. from sentry.conf.server import * # NOQA # Generously adapted from pynetlinux: https://git.io/JJmga def get_internal_network(): import ctypes import fcntl import math import socket import struct iface = b"eth0" sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ifreq = struct.pack(b"16sH14s", iface, socket.AF_INET, b"\x00" * 14) try: ip = struct.unpack( b"!I", struct.unpack(b"16sH2x4s8x", fcntl.ioctl(sockfd, 0x8915, ifreq))[2] )[0] netmask = socket.ntohl( struct.unpack(b"16sH2xI8x", fcntl.ioctl(sockfd, 0x891B, ifreq))[2] ) except IOError: return () base = socket.inet_ntoa(struct.pack(b"!I", ip & netmask)) netmask_bits = 32 - int(round(math.log(ctypes.c_uint32(~netmask).value + 1, 2), 1)) return "{0:s}/{1:d}".format(base, netmask_bits) INTERNAL_SYSTEM_IPS = (get_internal_network(),) DATABASES = { "default": { "ENGINE": "sentry.db.postgres", "NAME": "postgres", "USER": "postgres", "PASSWORD": "", "HOST": "postgres", "PORT": "", } } # You should not change this setting after your database has been created # unless you have altered all schemas first SENTRY_USE_BIG_INTS = True # If you're expecting any kind of real traffic on Sentry, we highly recommend # configuring the CACHES and Redis settings ########### # General # ########### # Instruct Sentry that this install intends to be run by a single organization # and thus various UI optimizations should be enabled. SENTRY_SINGLE_ORGANIZATION = True SENTRY_OPTIONS["system.event-retention-days"] = int( env("SENTRY_EVENT_RETENTION_DAYS", "90") ) ######### # Redis # ######### # Generic Redis configuration used as defaults for various things including: # Buffers, Quotas, TSDB SENTRY_OPTIONS["redis.clusters"] = { "default": { "hosts": {0: {"host": "redis", "password": "", "port": "6379", "db": "0"}} } } ######### # Queue # ######### # See https://develop.sentry.dev/services/queue/ for more # information on configuring your queue broker and workers. Sentry relies # on a Python framework called Celery to manage queues. rabbitmq_host = None if rabbitmq_host: BROKER_URL = "amqp://{username}:{password}@{host}/{vhost}".format( username="guest", password="guest", host=rabbitmq_host, vhost="/" ) else: BROKER_URL = "redis://:{password}@{host}:{port}/{db}".format( **SENTRY_OPTIONS["redis.clusters"]["default"]["hosts"][0] ) ######### # Cache # ######### # Sentry currently utilizes two separate mechanisms. While CACHES is not a # requirement, it will optimize several high throughput patterns. CACHES = { "default": { "BACKEND": "django.core.cache.backends.memcached.MemcachedCache", "LOCATION": ["memcached:11211"], "TIMEOUT": 3600, } } # A primary cache is required for things such as processing events SENTRY_CACHE = "sentry.cache.redis.RedisCache" DEFAULT_KAFKA_OPTIONS = { "bootstrap.servers": "kafka:9092", "message.max.bytes": 50000000, "socket.timeout.ms": 1000, } SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream" SENTRY_EVENTSTREAM_OPTIONS = {"producer_configuration": DEFAULT_KAFKA_OPTIONS} KAFKA_CLUSTERS["default"] = DEFAULT_KAFKA_OPTIONS ############### # Rate Limits # ############### # Rate limits apply to notification handlers and are enforced per-project # automatically. SENTRY_RATELIMITER = "sentry.ratelimits.redis.RedisRateLimiter" ################## # Update Buffers # ################## # Buffers (combined with queueing) act as an intermediate layer between the # database and the storage API. They will greatly improve efficiency on large # numbers of the same events being sent to the API in a short amount of time. # (read: if you send any kind of real data to Sentry, you should enable buffers) SENTRY_BUFFER = "sentry.buffer.redis.RedisBuffer" ########## # Quotas # ########## # Quotas allow you to rate limit individual projects or the Sentry install as # a whole. SENTRY_QUOTAS = "sentry.quotas.redis.RedisQuota" ######## # TSDB # ######## # The TSDB is used for building charts as well as making things like per-rate # alerts possible. SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB" ######### # SNUBA # ######### SENTRY_SEARCH = "sentry.search.snuba.EventsDatasetSnubaSearchBackend" SENTRY_SEARCH_OPTIONS = {} SENTRY_TAGSTORE_OPTIONS = {} ########### # Digests # ########### # The digest backend powers notification summaries. SENTRY_DIGESTS = "sentry.digests.backends.redis.RedisBackend" ############## # Web Server # ############## SENTRY_WEB_HOST = "0.0.0.0" SENTRY_WEB_PORT = 9000 SENTRY_WEB_OPTIONS = { "http": "%s:%s" % (SENTRY_WEB_HOST, SENTRY_WEB_PORT), "protocol": "uwsgi", # This is needed in order to prevent https://git.io/fj7Lw "uwsgi-socket": None, "so-keepalive": True, # Keep this between 15s-75s as that's what Relay supports "http-keepalive": 15, "http-chunked-input": True, # the number of web workers "workers": 3, "threads": 4, "memory-report": False, # Some stuff so uwsgi will cycle workers sensibly "max-requests": 100000, "max-requests-delta": 500, "max-worker-lifetime": 86400, # Duplicate options from sentry default just so we don't get # bit by sentry changing a default value that we depend on. "thunder-lock": True, "log-x-forwarded-for": False, "buffer-size": 32768, "limit-post": 209715200, "disable-logging": True, "reload-on-rss": 600, "ignore-sigpipe": True, "ignore-write-errors": True, "disable-write-exception": True, } ########### # SSL/TLS # ########### # If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto # header and enable the settings below # SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # SESSION_COOKIE_SECURE = True # CSRF_COOKIE_SECURE = True # SOCIAL_AUTH_REDIRECT_IS_HTTPS = True # End of SSL/TLS settings +######## +# Mail # +######## + +SENTRY_OPTIONS["mail.list-namespace"] = env('SENTRY_MAIL_HOST', 'localhost') +SENTRY_OPTIONS["mail.from"] = f"sentry@{SENTRY_OPTIONS['mail.list-namespace']}" + ############ # Features # ############ SENTRY_FEATURES["projects:sample-events"] = False SENTRY_FEATURES.update( { feature: True for feature in ( "organizations:discover", "organizations:events", "organizations:global-views", "organizations:incidents", "organizations:integrations-issue-basic", "organizations:integrations-issue-sync", "organizations:invite-members", "organizations:metric-alert-builder-aggregate", "organizations:sso-basic", "organizations:sso-rippling", "organizations:sso-saml2", "organizations:performance-view", "organizations:advanced-search", "projects:custom-inbound-filters", "projects:data-forwarding", "projects:discard-groups", "projects:plugins", "projects:rate-limits", "projects:servicehooks", ) } ) ####################### # MaxMind Integration # ####################### GEOIP_PATH_MMDB = '/geoip/GeoLite2-City.mmdb' ######################### # Bitbucket Integration # ######################### # BITBUCKET_CONSUMER_KEY = 'YOUR_BITBUCKET_CONSUMER_KEY' # BITBUCKET_CONSUMER_SECRET = 'YOUR_BITBUCKET_CONSUMER_SECRET'