diff --git a/.env b/.env index 1bb63db..352d7c2 100644 --- a/.env +++ b/.env @@ -1,9 +1,9 @@ COMPOSE_PROJECT_NAME=sentry_onpremise SENTRY_EVENT_RETENTION_DAYS=90 # You can either use a port number or an IP:PORT combo for SENTRY_BIND # See https://docs.docker.com/compose/compose-file/#ports for more SENTRY_BIND=9000 -SENTRY_IMAGE=getsentry/sentry:21.2.0 -SNUBA_IMAGE=getsentry/snuba:21.2.0 -RELAY_IMAGE=getsentry/relay:21.2.0 +SENTRY_IMAGE=getsentry/sentry:21.3.1 +SNUBA_IMAGE=getsentry/snuba:21.3.1 +RELAY_IMAGE=getsentry/relay:21.3.1 SYMBOLICATOR_IMAGE=getsentry/symbolicator:0.3.3 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index b2a339a..0a47156 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,47 +1,47 @@ name: 'Close stale issues/PRs' on: schedule: - cron: '* */6 * * *' workflow_dispatch: jobs: stale: runs-on: ubuntu-latest steps: - uses: actions/stale@87c2b794b9b47a9bec68ae03c01aeb572ffebdb1 with: repo-token: ${{ github.token }} days-before-stale: 21 days-before-close: 7 only-labels: "" operations-per-run: 100 remove-stale-when-updated: true debug-only: false ascending: false - exempt-issue-labels: "Status: Accepted,Status: On Hold" + exempt-issue-labels: "Status: Backlog,Status: In Progress" stale-issue-label: "Status: Stale" stale-issue-message: |- This issue has gone three weeks without activity. In another week, I will close it. - But! If you comment or otherwise update it, I will reset the clock, and if you label it `Status: Accepted`, I will leave it alone ... forever! + But! If you comment or otherwise update it, I will reset the clock, and if you label it `Status: Backlog` or `Status: In Progress`, I will leave it alone ... forever! ---- "A weed is but an unloved flower." ― _Ella Wheeler Wilcox_ πŸ₯€ skip-stale-issue-message: false close-issue-label: "" close-issue-message: "" - exempt-pr-labels: "Status: Accepted,Status: On Hold" + exempt-pr-labels: "Status: Backlog,Status: In Progress" stale-pr-label: "Status: Stale" stale-pr-message: |- This pull request has gone three weeks without activity. In another week, I will close it. - But! If you comment or otherwise update it, I will reset the clock, and if you label it `Status: Accepted`, I will leave it alone ... forever! + But! If you comment or otherwise update it, I will reset the clock, and if you label it `Status: Backlog` or `Status: In Progress`, I will leave it alone ... forever! ---- "A weed is but an unloved flower." ― _Ella Wheeler Wilcox_ πŸ₯€ skip-stale-pr-message: false close-pr-label: close-pr-message: "" diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index fb891ae..b01e351 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,46 +1,57 @@ name: Test on: # Run CI on all pushes to the master and release/** branches, and on all new # pull requests, and on all pushes to pull requests (even if a pull request # is not against master). push: branches: - "master" - "releases/**" pull_request: env: DOCKER_COMPOSE_VERSION: 1.24.1 defaults: run: shell: bash jobs: - test: + unit-test: + runs-on: ubuntu-18.04 + name: "unit tests" + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Unit Tests + working-directory: install + run: find ./ -type f -name "*-test.sh" -exec "./{}" \; + + integration-test: runs-on: ubuntu-18.04 name: "test" steps: - name: Pin docker-compose run: | sudo rm /usr/local/bin/docker-compose curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose chmod +x docker-compose sudo mv docker-compose /usr/local/bin - name: Checkout uses: actions/checkout@v2 - - name: Install and test + - name: Integration Test run: | echo "Testing initial install" ./install.sh ./test.sh echo "Testing in-place upgrade" # Also test plugin installation here echo "sentry-auth-oidc" >> sentry/requirements.txt ./install.sh --minimize-downtime ./test.sh - name: Inspect failure if: failure() run: | docker-compose ps docker-compose logs diff --git a/.github/workflows/validate-new-issue.yml b/.github/workflows/validate-new-issue.yml index b62d636..328e2e2 100644 --- a/.github/workflows/validate-new-issue.yml +++ b/.github/workflows/validate-new-issue.yml @@ -1,80 +1,100 @@ name: Validate new issue on: issues: types: ["opened"] jobs: validate-new-issue: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: "Validate new issue" shell: bash env: GITHUB_TOKEN: ${{ github.token }} run: | - echo "Validating issue #${{ github.event.issue.number }}." + issue_number=${{ github.event.issue.number }} + echo "Validating issue #${issue_number}." # Trust users who belong to the getsentry org. if gh api "https://api.github.com/orgs/getsentry/members/${{ github.actor }}" >/dev/null 2>&1; then echo "Skipping validation, because ${{ github.actor }} is a member of the getsentry org." exit 0 else echo "${{ github.actor }} is not a member of the getsentry org. 🧐" fi + # Helper + function gh-issue-label() { + gh api "/repos/:owner/:repo/issues/${1}/labels" \ + -X POST \ + --input <(echo "{\"labels\":[\"$2\"]}") + } + # Prep reasons for error message comment. REASON="your issue does not properly use one of this repo's available issue templates" REASON_EXACT_MATCH="you created an issue from a template without filling in anything" REASON_EMPTY="you created an empty issue" + BASE_CASE_TITLE="validation bot is confused" + # Definition of valid: + # - is a report about buggy validation πŸ˜… or ... # - not empty (ignoring whitespace) # - matches a template - # - all the headings are also in this issue + # - at least one of the headings are also in this issue # - extra headings in the issue are fine # - order doesn't matter # - case-sensitive tho # - not an *exact* match for a template (ignoring whitespace) + + jq -r .issue.title "$GITHUB_EVENT_PATH" > issue-title + if diff issue-title <(echo "$BASE_CASE_TITLE") > /dev/null; then + echo "Infinite recursion avoided." + exit 0 + fi + function extract-headings { { sed 's/\r$//' "$1" | grep '^#' || echo -n ''; } | sort; } jq -r .issue.body "$GITHUB_EVENT_PATH" > issue if ! grep -q '[^[:space:]]' issue; then REASON="${REASON_EMPTY}" else extract-headings <(cat issue) > headings-in-issue for template in $(ls .github/ISSUE_TEMPLATE/*.md 2> /dev/null); do # Strip front matter. https://stackoverflow.com/a/29292490/14946704 sed -i'' '1{/^---$/!q;};1,/^---$/d' "$template" extract-headings "$template" > headings-in-template echo -n "$(basename $template)? " if [ ! -s headings-in-template ]; then echo "No headers in template. 🀷" - elif [ -z "$(comm -23 headings-in-template headings-in-issue)" ]; then + elif [ "$(comm -12 headings-in-template headings-in-issue)" ]; then echo "Match! πŸ‘ πŸ’ƒ" if diff -Bw "$template" issue > /dev/null; then echo "... like, an /exact/ match. πŸ˜–" REASON="${REASON_EXACT_MATCH}" break else + gh-issue-label "${issue_number}" "Status: Unrouted" exit 0 fi else echo "No match. πŸ‘Ž" fi done fi - # Failed validation! Close the issue with a comment. + # Failed validation! Close the issue with a comment and a label. cat << EOF > comment - Sorry, friend. As far as this ol' bot can tell, ${REASON}. Please [try again](https://github.com/${{ github.repository }}/issues/new/choose), if you like. (And if I'm confused, please [let us know](https://github.com/getsentry/.github/issues/new?title=template+enforcer+is+confused&body=${{ github.event.issue.html_url }}). 😬) + Sorry, friend. As far as this ol' bot can tell, ${REASON}. Please [try again](https://github.com/${{ github.repository }}/issues/new/choose), if you like. (And if I'm confused, please [let us know](https://github.com/getsentry/.github/issues/new?title=$(echo "$BASE_CASE_TITLE" | tr ' ' '+')&body=${{ github.event.issue.html_url }}). 😬) ---- [![Did you see the memo about this?](https://user-images.githubusercontent.com/134455/104515469-e04a9c80-55c0-11eb-8e15-ffe9c0b8dd7f.gif)](https://www.youtube.com/watch?v=Fy3rjQGc6lA) ([log](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})) EOF echo -n "Commented: " - gh issue comment ${{ github.event.issue.number }} --body "$(cat comment)" - gh issue close ${{ github.event.issue.number }} + gh issue comment "${issue_number}" --body "$(cat comment)" + gh-issue-label "${issue_number}" "Status: Invalid" + gh issue close "${issue_number}" echo "Closed with: \"${REASON}.\"" diff --git a/LICENSE b/LICENSE index e311179..f18f128 100644 --- a/LICENSE +++ b/LICENSE @@ -1,104 +1,104 @@ Business Source License 1.1 Parameters Licensor: Functional Software, Inc. Licensed Work: Sentry The Licensed Work is (c) 2019 Functional Software, Inc. Additional Use Grant: You may make use of the Licensed Work, provided that you do not use the Licensed Work for an Application Monitoring Service. An "Application Monitoring Service" is a commercial offering that allows third parties (other than your employees and contractors) to access the functionality of the Licensed Work so that such third parties directly benefit from the error-reporting or application monitoring features of the Licensed Work. -Change Date: 2024-02-15 +Change Date: 2024-04-08 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, please visit: https://sentry.io/pricing/ Notice The Business Source License (this document, or the "License") is not an Open Source license. However, the Licensed Work will eventually be made available under an Open Source License, as stated in this License. License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. "Business Source License" is a trademark of MariaDB Corporation Ab. ----------------------------------------------------------------------------- Business Source License 1.1 Terms The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. MariaDB hereby grants you permission to use this License’s text to license your works, and to refer to it using the trademark "Business Source License", as long as you comply with the Covenants of Licensor below. Covenants of Licensor In consideration of the right to use this License’s text and the "Business Source License" name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: 1. To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where "compatible" means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. 2. To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text "None". 3. To specify a Change Date. 4. Not to modify this License in any other way. diff --git a/README.md b/README.md index d4906ad..e235744 100644 --- a/README.md +++ b/README.md @@ -1,73 +1,73 @@ -# Self-Hosted Sentry 21.2.0 +# Self-Hosted Sentry 21.3.1 Official bootstrap for running your own [Sentry](https://sentry.io/) with [Docker](https://www.docker.com/). ## Requirements * Docker 19.03.6+ * Compose 1.24.1+ * 4 CPU Cores * 8 GB RAM * 20 GB Free Disk Space ## Setup To get started with all the defaults, simply clone the repo and run `./install.sh` in your local check-out. Sentry uses Python 3 by default since December 4th, 2020 and Sentry 21.1.0 is the last version to support Python 2. During the install, a prompt will ask if you want to create a user account. If you require that the install not be blocked by the prompt, run `./install.sh --no-user-prompt`. There may need to be modifications to the included example config files (`sentry/config.example.yml` and `sentry/sentry.conf.example.py`) to accommodate your needs or your environment (such as adding GitHub credentials). If you want to perform these, do them before you run the install script and copy them without the `.example` extensions in the name (such as `sentry/sentry.conf.py`) before running the `install.sh` script. The recommended way to customize your configuration is using the files below, in that order: * `config.yml` * `sentry.conf.py` * `.env` w/ environment variables We currently support a very minimal set of environment variables to promote other means of configuration. If you have any issues or questions, our [Community Forum](https://forum.sentry.io/c/on-premise) is at your service! Everytime you run the install script, it will generate a log file, `sentry_install_log-.txt` with the output. Sharing these logs would help people diagnose any issues you might be having. ## Versioning If you want to install a specific release of Sentry, use the tags/releases on this repo. We continously push the Docker image for each commit made into [Sentry](https://github.com/getsentry/sentry), and other services such as [Snuba](https://github.com/getsentry/snuba) or [Symbolicator](https://github.com/getsentry/symbolicator) to [our Docker Hub](https://hub.docker.com/u/getsentry) and tag the latest version on master as `:nightly`. This is also usually what we have on sentry.io and what the install script uses. You can use a custom Sentry image, such as a modified version that you have built on your own, or simply a specific commit hash by setting the `SENTRY_IMAGE` environment variable to that image name before running `./install.sh`: ```shell SENTRY_IMAGE=getsentry/sentry:83b1380 ./install.sh ``` Note that this may not work for all commit SHAs as this repository evolves with Sentry and its satellite projects. It is highly recommended to check out a version of this repository that is close to the timestamp of the Sentry commit you are installing. ## Event Retention Sentry comes with a cleanup cron job that prunes events older than `90 days` by default. If you want to change that, you can change the `SENTRY_EVENT_RETENTION_DAYS` environment variable in `.env` or simply override it in your environment. If you do not want the cleanup cron, you can remove the `sentry-cleanup` service from the `docker-compose.yml`file. ## Securing Sentry with SSL/TLS If you'd like to protect your Sentry install with SSL/TLS, there are fantastic SSL/TLS proxies like [HAProxy](http://www.haproxy.org/) -and [Nginx](http://nginx.org/). Our recommendation is running and external Nginx instance or your choice of load balancer that does the TLS termination and more. Read more over at our [productionalizing self-hosted docs](https://develop.sentry.dev/self-hosted/#productionalizing). +and [Nginx](http://nginx.org/). Our recommendation is running an external Nginx instance or your choice of load balancer that does the TLS termination and more. Read more over at our [productionalizing self-hosted docs](https://develop.sentry.dev/self-hosted/#productionalizing). ## Updating Sentry _You need to be on at least Sentry 9.1.2 to be able to upgrade automatically to the latest version. If you are not, upgrade to 9.1.2 first by checking out the [9.1.2 tag](https://github.com/getsentry/onpremise/tree/9.1.2) on this repo._ We recommend (and sometimes require) you to upgrade Sentry one version at a time. That means if you are running 20.6.0, instead of going directly to 20.8.0, first go through 20.7.0. Skipping versions would work most of the time, but there will be times that we require you to stop at specific versions to ensure essential data migrations along the way. Pull the version of the repository that you wish to upgrade to by checking out the tagged release of this repo. Make sure to check for any difference between the example config files and your current config files in use. There might be new configuration that has to be added to your adjusted files such as feature flags or server configuration. The included `install.sh` script is meant to be idempotent and to bring you to the latest version. What this means is you can and should run `install.sh` to upgrade to the latest version available. Remember that the output of the script will be stored in a log file, `sentry_install_log-.txt`, which you may share for diagnosis if anything goes wrong. For more information regarding updating your Sentry installation, please visit [our documentation](https://develop.sentry.dev/self-hosted/#upgrading). ## Resources * [Documentation](https://develop.sentry.dev/self-hosted/) * [Bug Tracker](https://github.com/getsentry/onpremise/issues) * [Community Forums](https://forum.sentry.io/c/on-premise) [build-status-image]: https://github.com/getsentry/onpremise/workflows/test/badge.svg [build-status-url]: https://git.io/JUYkh diff --git a/docker-compose.yml b/docker-compose.yml index d25c738..9f85d8c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,253 +1,255 @@ version: "3.4" x-restart-policy: &restart_policy restart: unless-stopped x-sentry-defaults: &sentry_defaults <<: *restart_policy image: "$SENTRY_IMAGE" depends_on: - redis - memcached - snuba-api - snuba-consumer - snuba-outcomes-consumer - snuba-sessions-consumer - snuba-transactions-consumer + - snuba-subscription-consumer-events + - snuba-subscription-consumer-transactions - snuba-replacer - symbolicator - kafka entrypoint: "/etc/sentry/entrypoint.sh" command: ["run", "web"] environment: PYTHONUSERBASE: "/data/custom-packages" SENTRY_CONF: "/etc/sentry" SNUBA: "http://snuba-api:1218" # Leaving the value empty to just pass whatever is set # on the host system (or in the .env file) SENTRY_EVENT_RETENTION_DAYS: volumes: - "sentry-data:/data" - "./sentry:/etc/sentry" - "./geoip:/geoip:ro" x-snuba-defaults: &snuba_defaults <<: *restart_policy depends_on: - redis - clickhouse - kafka image: "$SNUBA_IMAGE" environment: SNUBA_SETTINGS: docker CLICKHOUSE_HOST: clickhouse DEFAULT_BROKERS: "kafka:9092" REDIS_HOST: redis UWSGI_MAX_REQUESTS: "10000" UWSGI_DISABLE_LOGGING: "true" # Leaving the value empty to just pass whatever is set # on the host system (or in the .env file) SENTRY_EVENT_RETENTION_DAYS: services: memcached: <<: *restart_policy image: "memcached:1.5-alpine" redis: <<: *restart_policy image: "redis:5.0-alpine" volumes: - "sentry-redis:/data" ulimits: nofile: soft: 10032 hard: 10032 zookeeper: <<: *restart_policy image: "confluentinc/cp-zookeeper:5.5.0" environment: ZOOKEEPER_CLIENT_PORT: "2181" CONFLUENT_SUPPORT_METRICS_ENABLE: "false" ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: "WARN" ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: "WARN" volumes: - "sentry-zookeeper:/var/lib/zookeeper/data" - "sentry-zookeeper-log:/var/lib/zookeeper/log" - "sentry-secrets:/etc/zookeeper/secrets" kafka: <<: *restart_policy depends_on: - zookeeper image: "confluentinc/cp-kafka:5.5.0" environment: KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181" KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9092" KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1" KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1" KAFKA_LOG_RETENTION_HOURS: "24" KAFKA_MESSAGE_MAX_BYTES: "50000000" #50MB or bust KAFKA_MAX_REQUEST_SIZE: "50000000" #50MB on requests apparently too CONFLUENT_SUPPORT_METRICS_ENABLE: "false" KAFKA_LOG4J_LOGGERS: "kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN" KAFKA_LOG4J_ROOT_LOGLEVEL: "WARN" KAFKA_TOOLS_LOG4J_LOGLEVEL: "WARN" volumes: - "sentry-kafka:/var/lib/kafka/data" - "sentry-kafka-log:/var/lib/kafka/log" - "sentry-secrets:/etc/kafka/secrets" clickhouse: <<: *restart_policy image: "yandex/clickhouse-server:20.3.9.70" ulimits: nofile: soft: 262144 hard: 262144 volumes: - "sentry-clickhouse:/var/lib/clickhouse" - "sentry-clickhouse-log:/var/log/clickhouse-server" - type: bind read_only: true source: ./clickhouse/config.xml target: /etc/clickhouse-server/config.d/sentry.xml environment: # This limits Clickhouse's memory to 30% of the host memory # If you have high volume and your search return incomplete results # You might want to change this to a higher value (and ensure your host has enough memory) MAX_MEMORY_USAGE_RATIO: 0.3 geoipupdate: image: "maxmindinc/geoipupdate:latest" # Override the entrypoint in order to avoid using envvars for config. # Futz with settings so we can keep mmdb and conf in same dir on host # (image looks for them in separate dirs by default). entrypoint: ["/usr/bin/geoipupdate", "-d", "/sentry", "-f", "/sentry/GeoIP.conf"] volumes: - "./geoip:/sentry" snuba-api: <<: *snuba_defaults # Kafka consumer responsible for feeding events into Clickhouse snuba-consumer: <<: *snuba_defaults command: consumer --storage events --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding outcomes into Clickhouse # Use --auto-offset-reset=earliest to recover up to 7 days of TSDB data # since we did not do a proper migration snuba-outcomes-consumer: <<: *snuba_defaults command: consumer --storage outcomes_raw --auto-offset-reset=earliest --max-batch-time-ms 750 # Kafka consumer responsible for feeding session data into Clickhouse snuba-sessions-consumer: <<: *snuba_defaults command: consumer --storage sessions_raw --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding transactions data into Clickhouse snuba-transactions-consumer: <<: *snuba_defaults command: consumer --storage transactions --consumer-group transactions_group --auto-offset-reset=latest --max-batch-time-ms 750 --commit-log-topic=snuba-commit-log snuba-replacer: <<: *snuba_defaults command: replacer --storage events --auto-offset-reset=latest --max-batch-size 3 snuba-subscription-consumer-events: <<: *snuba_defaults command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-events-subscriptions-consumers --topic=events --result-topic=events-subscription-results --dataset=events --commit-log-topic=snuba-commit-log --commit-log-group=snuba-consumers --delay-seconds=60 --schedule-ttl=60 snuba-subscription-consumer-transactions: <<: *snuba_defaults command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-transactions-subscriptions-consumers --topic=events --result-topic=transactions-subscription-results --dataset=transactions --commit-log-topic=snuba-commit-log --commit-log-group=transactions_group --delay-seconds=60 --schedule-ttl=60 snuba-cleanup: <<: *snuba_defaults image: snuba-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SNUBA_IMAGE" command: '"*/5 * * * * gosu snuba snuba cleanup --dry-run False"' symbolicator: <<: *restart_policy image: "$SYMBOLICATOR_IMAGE" volumes: - "sentry-symbolicator:/data" - type: bind read_only: true source: ./symbolicator target: /etc/symbolicator command: run -c /etc/symbolicator/config.yml symbolicator-cleanup: <<: *restart_policy image: symbolicator-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SYMBOLICATOR_IMAGE" command: '"55 23 * * * gosu symbolicator symbolicator cleanup"' volumes: - "sentry-symbolicator:/data" web: <<: *sentry_defaults cron: <<: *sentry_defaults command: run cron worker: <<: *sentry_defaults command: run worker ingest-consumer: <<: *sentry_defaults command: run ingest-consumer --all-consumer-types post-process-forwarder: <<: *sentry_defaults # Increase `--commit-batch-size 1` below to deal with high-load environments. command: run post-process-forwarder --commit-batch-size 1 subscription-consumer-events: <<: *sentry_defaults command: run query-subscription-consumer --commit-batch-size 1 --topic events-subscription-results subscription-consumer-transactions: <<: *sentry_defaults command: run query-subscription-consumer --commit-batch-size 1 --topic transactions-subscription-results sentry-cleanup: <<: *sentry_defaults image: sentry-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SENTRY_IMAGE" entrypoint: "/entrypoint.sh" command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"' nginx: <<: *restart_policy ports: - "$SENTRY_BIND:80/tcp" image: "nginx:1.16" volumes: - type: bind read_only: true source: ./nginx target: /etc/nginx depends_on: - web - relay relay: <<: *restart_policy image: "$RELAY_IMAGE" volumes: - type: bind read_only: true source: ./relay target: /work/.relay - type: bind read_only: true source: ./geoip target: /geoip depends_on: - kafka - redis volumes: sentry-data: external: true sentry-redis: external: true sentry-zookeeper: external: true sentry-kafka: external: true sentry-clickhouse: external: true sentry-symbolicator: external: true sentry-secrets: sentry-zookeeper-log: sentry-kafka-log: sentry-clickhouse-log: diff --git a/install.sh b/install.sh index 4468677..171851b 100755 --- a/install.sh +++ b/install.sh @@ -1,383 +1,28 @@ #!/usr/bin/env bash set -e - if [[ -n "$MSYSTEM" ]]; then echo "Seems like you are using an MSYS2-based system (such as Git Bash) which is not supported. Please use WSL instead."; exit 1 fi -# Thanks to https://unix.stackexchange.com/a/145654/108960 -log_file="sentry_install_log-`date +'%Y-%m-%d_%H-%M-%S'`.txt" -exec &> >(tee -a "$log_file") -if [ "$GITHUB_ACTIONS" = "true" ]; then - _group="::group::" - _endgroup="::endgroup::" -else - _group="β–Ά " - _endgroup="" -fi - -echo "${_group}Defining variables and helpers ..." -# Read .env for default values with a tip o' the hat to https://stackoverflow.com/a/59831605/90297 -t=$(mktemp) && export -p > "$t" && set -a && . ./.env && set +a && . "$t" && rm "$t" && unset t - -source ./install/docker-aliases.sh - -MIN_DOCKER_VERSION='19.03.6' -MIN_COMPOSE_VERSION='1.24.1' -MIN_RAM_HARD=3800 # MB -MIN_RAM_SOFT=7800 # MB -MIN_CPU_HARD=2 -MIN_CPU_SOFT=4 - -# Increase the default 10 second SIGTERM timeout -# to ensure celery queues are properly drained -# between upgrades as task signatures may change across -# versions -STOP_TIMEOUT=60 # seconds -SENTRY_CONFIG_PY='sentry/sentry.conf.py' -SENTRY_CONFIG_YML='sentry/config.yml' -SYMBOLICATOR_CONFIG_YML='symbolicator/config.yml' -RELAY_CONFIG_YML='relay/config.yml' -RELAY_CREDENTIALS_JSON='relay/credentials.json' -SENTRY_EXTRA_REQUIREMENTS='sentry/requirements.txt' -MINIMIZE_DOWNTIME= -echo $_endgroup - -echo "${_group}Parsing command line ..." -show_help() { - cat < /dev/null - fi -} -trap_with_arg cleanup ERR INT TERM EXIT -echo "${_endgroup}" - -echo "${_group}Checking minimum requirements ..." -DOCKER_VERSION=$(docker version --format '{{.Server.Version}}') -COMPOSE_VERSION=$($dc --version | sed 's/docker-compose version \(.\{1,\}\),.*/\1/') -RAM_AVAILABLE_IN_DOCKER=$(docker run --rm busybox free -m 2>/dev/null | awk '/Mem/ {print $2}'); -CPU_AVAILABLE_IN_DOCKER=$(docker run --rm busybox nproc --all); - -# Compare dot-separated strings - function below is inspired by https://stackoverflow.com/a/37939589/808368 -function ver () { echo "$@" | awk -F. '{ printf("%d%03d%03d", $1,$2,$3); }'; } - -# Thanks to https://stackoverflow.com/a/25123013/90297 for the quick `sed` pattern -function ensure_file_from_example { - if [[ -f "$1" ]]; then - echo "$1 already exists, skipped creation." - else - echo "Creating $1..." - cp -n $(echo "$1" | sed 's/\.[^.]*$/.example&/') "$1" - fi -} - -if [[ "$(ver $DOCKER_VERSION)" -lt "$(ver $MIN_DOCKER_VERSION)" ]]; then - echo "FAIL: Expected minimum Docker version to be $MIN_DOCKER_VERSION but found $DOCKER_VERSION" - exit 1 -fi - -if [[ "$(ver $COMPOSE_VERSION)" -lt "$(ver $MIN_COMPOSE_VERSION)" ]]; then - echo "FAIL: Expected minimum docker-compose version to be $MIN_COMPOSE_VERSION but found $COMPOSE_VERSION" - exit 1 -fi - -if [[ "$CPU_AVAILABLE_IN_DOCKER" -lt "$MIN_CPU_HARD" ]]; then - echo "FAIL: Required minimum CPU cores available to Docker is $MIN_CPU_HARD, found $CPU_AVAILABLE_IN_DOCKER" - exit 1 -elif [[ "$RAM_AVAILABLE_IN_DOCKER" -lt "$MIN_CPU_SOFT" ]]; then - echo "WARN: Recommended minimum CPU cores available to Docker is $MIN_CPU_SOFT, found $CPU_AVAILABLE_IN_DOCKER" -fi - -if [[ "$RAM_AVAILABLE_IN_DOCKER" -lt "$MIN_RAM_HARD" ]]; then - echo "FAIL: Required minimum RAM available to Docker is $MIN_RAM_HARD MB, found $RAM_AVAILABLE_IN_DOCKER MB" - exit 1 -elif [[ "$RAM_AVAILABLE_IN_DOCKER" -lt "$MIN_RAM_SOFT" ]]; then - echo "WARN: Recommended minimum RAM available to Docker is $MIN_RAM_SOFT MB, found $RAM_AVAILABLE_IN_DOCKER MB" -fi - -#SSE4.2 required by Clickhouse (https://clickhouse.yandex/docs/en/operations/requirements/) -# On KVM, cpuinfo could falsely not report SSE 4.2 support, so skip the check. https://github.com/ClickHouse/ClickHouse/issues/20#issuecomment-226849297 -IS_KVM=$(docker run --rm busybox grep -c 'Common KVM processor' /proc/cpuinfo || :) -if [[ "$IS_KVM" -eq 0 ]]; then - SUPPORTS_SSE42=$(docker run --rm busybox grep -c sse4_2 /proc/cpuinfo || :) - if [[ "$SUPPORTS_SSE42" -eq 0 ]]; then - echo "FAIL: The CPU your machine is running on does not support the SSE 4.2 instruction set, which is required for one of the services Sentry uses (Clickhouse). See https://git.io/JvLDt for more info." - exit 1 - fi -fi -echo "${_endgroup}" - -echo "${_group}Creating volumes for persistent storage ..." -echo "Created $(docker volume create --name=sentry-data)." -echo "Created $(docker volume create --name=sentry-redis)." -echo "Created $(docker volume create --name=sentry-zookeeper)." -echo "Created $(docker volume create --name=sentry-kafka)." -echo "Created $(docker volume create --name=sentry-clickhouse)." -echo "Created $(docker volume create --name=sentry-symbolicator)." -echo "${_endgroup}" - -echo "${_group}Ensuring files from examples ..." -ensure_file_from_example $SENTRY_CONFIG_PY -ensure_file_from_example $SENTRY_CONFIG_YML -ensure_file_from_example $SENTRY_EXTRA_REQUIREMENTS -ensure_file_from_example $SYMBOLICATOR_CONFIG_YML -ensure_file_from_example $RELAY_CONFIG_YML -echo "${_endgroup}" - -echo "${_group}Generating secret key ..." -if grep -xq "system.secret-key: '!!changeme!!'" $SENTRY_CONFIG_YML ; then - # This is to escape the secret key to be used in sed below - # Note the need to set LC_ALL=C due to BSD tr and sed always trying to decode - # whatever is passed to them. Kudos to https://stackoverflow.com/a/23584470/90297 - SECRET_KEY=$(export LC_ALL=C; head /dev/urandom | tr -dc "a-z0-9@#%^&*(-_=+)" | head -c 50 | sed -e 's/[\/&]/\\&/g') - sed -i -e 's/^system.secret-key:.*$/system.secret-key: '"'$SECRET_KEY'"'/' $SENTRY_CONFIG_YML - echo "Secret key written to $SENTRY_CONFIG_YML" -fi -echo "${_endgroup}" - -echo "${_group}Replacing TSDB ..." -replace_tsdb() { - if ( - [[ -f "$SENTRY_CONFIG_PY" ]] && - ! grep -xq 'SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"' "$SENTRY_CONFIG_PY" - ); then - # Do NOT indent the following string as it would be reflected in the end result, - # breaking the final config file. See getsentry/onpremise#624. - tsdb_settings="\ -SENTRY_TSDB = \"sentry.tsdb.redissnuba.RedisSnubaTSDB\" - -# Automatic switchover 90 days after $(date). Can be removed afterwards. -SENTRY_TSDB_OPTIONS = {\"switchover_timestamp\": $(date +%s) + (90 * 24 * 3600)}\ -" - - if grep -q 'SENTRY_TSDB_OPTIONS = ' "$SENTRY_CONFIG_PY"; then - echo "Not attempting automatic TSDB migration due to presence of SENTRY_TSDB_OPTIONS" - else - echo "Attempting to automatically migrate to new TSDB" - # Escape newlines for sed - tsdb_settings="${tsdb_settings//$'\n'/\\n}" - cp "$SENTRY_CONFIG_PY" "$SENTRY_CONFIG_PY.bak" - sed -i -e "s/^SENTRY_TSDB = .*$/${tsdb_settings}/g" "$SENTRY_CONFIG_PY" || true - - if grep -xq 'SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"' "$SENTRY_CONFIG_PY"; then - echo "Migrated TSDB to Snuba. Old configuration file backed up to $SENTRY_CONFIG_PY.bak" - return - fi - - echo "Failed to automatically migrate TSDB. Reverting..." - mv "$SENTRY_CONFIG_PY.bak" "$SENTRY_CONFIG_PY" - echo "$SENTRY_CONFIG_PY restored from backup." - fi - - echo "WARN: Your Sentry configuration uses a legacy data store for time-series data. Remove the options SENTRY_TSDB and SENTRY_TSDB_OPTIONS from $SENTRY_CONFIG_PY and add:" - echo "" - echo "$tsdb_settings" - echo "" - echo "For more information please refer to https://github.com/getsentry/onpremise/pull/430" - fi -} - -replace_tsdb -echo "${_endgroup}" - -echo "${_group}Fetching and updating Docker images ..." -# We tag locally built images with an '-onpremise-local' suffix. docker-compose pull tries to pull these too and -# shows a 404 error on the console which is confusing and unnecessary. To overcome this, we add the stderr>stdout -# redirection below and pass it through grep, ignoring all lines having this '-onpremise-local' suffix. -$dc pull -q --ignore-pull-failures 2>&1 | grep -v -- -onpremise-local || true - -# We may not have the set image on the repo (local images) so allow fails -docker pull ${SENTRY_IMAGE} || true; -echo "${_endgroup}" - -echo "${_group}Building and tagging Docker images ..." -echo "" -$dc build --force-rm -echo "" -echo "Docker images built." -echo "${_endgroup}" - -echo "${_group}Turning things off ..." -if [[ -n "$MINIMIZE_DOWNTIME" ]]; then - # Stop everything but relay and nginx - $dc rm -fsv $($dc config --services | grep -v -E '^(nginx|relay)$') -else - # Clean up old stuff and ensure nothing is working while we install/update - # This is for older versions of on-premise: - $dc -p onpremise down -t $STOP_TIMEOUT --rmi local --remove-orphans - # This is for newer versions - $dc down -t $STOP_TIMEOUT --rmi local --remove-orphans -fi -echo "${_endgroup}" - -echo "${_group}Setting up Zookeeper ..." -ZOOKEEPER_SNAPSHOT_FOLDER_EXISTS=$($dcr zookeeper bash -c 'ls 2>/dev/null -Ubad1 -- /var/lib/zookeeper/data/version-2 | wc -l | tr -d '[:space:]'') -if [[ "$ZOOKEEPER_SNAPSHOT_FOLDER_EXISTS" -eq 1 ]]; then - ZOOKEEPER_LOG_FILE_COUNT=$($dcr zookeeper bash -c 'ls 2>/dev/null -Ubad1 -- /var/lib/zookeeper/log/version-2/* | wc -l | tr -d '[:space:]'') - ZOOKEEPER_SNAPSHOT_FILE_COUNT=$($dcr zookeeper bash -c 'ls 2>/dev/null -Ubad1 -- /var/lib/zookeeper/data/version-2/* | wc -l | tr -d '[:space:]'') - # This is a workaround for a ZK upgrade bug: https://issues.apache.org/jira/browse/ZOOKEEPER-3056 - if [[ "$ZOOKEEPER_LOG_FILE_COUNT" -gt 0 ]] && [[ "$ZOOKEEPER_SNAPSHOT_FILE_COUNT" -eq 0 ]]; then - $dcr -v $(pwd)/zookeeper:/temp zookeeper bash -c 'cp /temp/snapshot.0 /var/lib/zookeeper/data/version-2/snapshot.0' - $dc run -d -e ZOOKEEPER_SNAPSHOT_TRUST_EMPTY=true zookeeper - fi -fi -echo "${_endgroup}" - -echo "${_group}Bootstrapping and migrating Snuba ..." -$dcr snuba-api bootstrap --no-migrate --force -$dcr snuba-api migrations migrate --force -echo "${_endgroup}" - -echo "${_group}Creating additional Kafka topics ..." -# NOTE: This step relies on `kafka` being available from the previous `snuba-api bootstrap` step -# XXX(BYK): We cannot use auto.create.topics as Confluence and Apache hates it now (and makes it very hard to enable) -EXISTING_KAFKA_TOPICS=$($dcr kafka kafka-topics --list --bootstrap-server kafka:9092 2>/dev/null) -NEEDED_KAFKA_TOPICS="ingest-attachments ingest-transactions ingest-events" -for topic in $NEEDED_KAFKA_TOPICS; do - if ! echo "$EXISTING_KAFKA_TOPICS" | grep -wq $topic; then - $dcr kafka kafka-topics --create --topic $topic --bootstrap-server kafka:9092 - echo "" - fi -done -echo "${_endgroup}" - -echo "${_group}Ensuring proper PostgreSQL version ..." -# Very naively check whether there's an existing sentry-postgres volume and the PG version in it -if [[ -n "$(docker volume ls -q --filter name=sentry-postgres)" && "$(docker run --rm -v sentry-postgres:/db busybox cat /db/PG_VERSION 2>/dev/null)" == "9.5" ]]; then - docker volume rm sentry-postgres-new || true - # If this is Postgres 9.5 data, start upgrading it to 9.6 in a new volume - docker run --rm \ - -v sentry-postgres:/var/lib/postgresql/9.5/data \ - -v sentry-postgres-new:/var/lib/postgresql/9.6/data \ - tianon/postgres-upgrade:9.5-to-9.6 - - # Get rid of the old volume as we'll rename the new one to that - docker volume rm sentry-postgres - docker volume create --name sentry-postgres - # There's no rename volume in Docker so copy the contents from old to new name - # Also append the `host all all all trust` line as `tianon/postgres-upgrade:9.5-to-9.6` - # doesn't do that automatically. - docker run --rm -v sentry-postgres-new:/from -v sentry-postgres:/to alpine ash -c \ - "cd /from ; cp -av . /to ; echo 'host all all all trust' >> /to/pg_hba.conf" - # Finally, remove the new old volume as we are all in sentry-postgres now - docker volume rm sentry-postgres-new -fi -echo "${_endgroup}" - -echo "${_group}Setting up database ..." -if [[ -n "$CI" || "$SKIP_USER_PROMPT" == 1 ]]; then - $dcr web upgrade --noinput - echo "" - echo "Did not prompt for user creation due to non-interactive shell." - echo "Run the following command to create one yourself (recommended):" - echo "" - echo " docker-compose run --rm web createuser" - echo "" -else - $dcr web upgrade -fi -echo "${_endgroup}" - -echo "${_group}Migrating file storage ..." -SENTRY_DATA_NEEDS_MIGRATION=$(docker run --rm -v sentry-data:/data alpine ash -c "[ ! -d '/data/files' ] && ls -A1x /data | wc -l || true") -if [[ -n "$SENTRY_DATA_NEEDS_MIGRATION" ]]; then - # Use the web (Sentry) image so the file owners are kept as sentry:sentry - # The `\"` escape pattern is to make this compatible w/ Git Bash on Windows. See #329. - $dcr --entrypoint \"/bin/bash\" web -c \ - "mkdir -p /tmp/files; mv /data/* /tmp/files/; mv /tmp/files /data/files; chown -R sentry:sentry /data" -fi -echo "${_endgroup}" - -echo "${_group}Generating Relay credentials ..." -if [[ ! -f "$RELAY_CREDENTIALS_JSON" ]]; then - - # We need the ugly hack below as `relay generate credentials` tries to read the config and the credentials - # even with the `--stdout` and `--overwrite` flags and then errors out when the credentials file exists but - # not valid JSON. We hit this case as we redirect output to the same config folder, creating an empty - # credentials file before relay runs. - $dcr --no-deps -v $(pwd)/$RELAY_CONFIG_YML:/tmp/config.yml relay --config /tmp credentials generate --stdout > "$RELAY_CREDENTIALS_JSON" - echo "Relay credentials written to $RELAY_CREDENTIALS_JSON" - echo "${_endgroup}" -fi - -echo "${_group}Setting up GeoIP integration ..." -source ./install/geoip.sh -echo "${_endgroup}" - -if [[ "$MINIMIZE_DOWNTIME" ]]; then - echo "${_group}Waiting for Sentry to start ..." - # Start the whole setup, except nginx and relay. - $dc up -d --remove-orphans $($dc config --services | grep -v -E '^(nginx|relay)$') - $dc exec -T nginx service nginx reload - - docker run --rm --network="${COMPOSE_PROJECT_NAME}_default" alpine ash \ - -c 'while [[ "$(wget -T 1 -q -O- http://web:9000/_health/)" != "ok" ]]; do sleep 0.5; done' - - # Make sure everything is up. This should only touch relay and nginx - $dc up -d - echo "${_endgroup}" -else - echo "" - echo "-----------------------------------------------------------------" - echo "" - echo "You're all done! Run the following command to get Sentry running:" - echo "" - echo " docker-compose up -d" - echo "" - echo "-----------------------------------------------------------------" - echo "" -fi +source "$(dirname $0)/install/_lib.sh" # does a `cd .../install/`, among other things + +source parse-cli.sh +source error-handling.sh +source check-minimum-requirements.sh +source create-docker-volumes.sh +source ensure-files-from-examples.sh +source generate-secret-key.sh +source replace-tsdb.sh +source update-docker-images.sh +source build-docker-images.sh +source turn-things-off.sh +source set-up-zookeeper.sh +source bootstrap-snuba.sh +source create-kafka-topics.sh +source upgrade-postgres.sh +source set-up-and-migrate-database.sh +source migrate-file-storage.sh +source relay-credentials.sh +source geoip.sh +source wrap-up.sh diff --git a/install/_lib.sh b/install/_lib.sh new file mode 100644 index 0000000..0e0ad5b --- /dev/null +++ b/install/_lib.sh @@ -0,0 +1,49 @@ +set -euo pipefail +test "${DEBUG:-}" && set -x + +# Thanks to https://unix.stackexchange.com/a/145654/108960 +log_file="sentry_install_log-`date +'%Y-%m-%d_%H-%M-%S'`.txt" +exec &> >(tee -a "$log_file") + +# Work from the onpremise root, no matter which script is called from where. +if [[ "$(basename $0)" = "install.sh" ]]; then + cd "$(dirname $0)/install/" +else + cd "$(dirname $0)" # assume we're a *-test.sh script +fi + +_ENV="$(realpath ../.env)" + +# Read .env for default values with a tip o' the hat to https://stackoverflow.com/a/59831605/90297 +t=$(mktemp) && export -p > "$t" && set -a && . $_ENV && set +a && . "$t" && rm "$t" && unset t + +if [ "${GITHUB_ACTIONS:-}" = "true" ]; then + _group="::group::" + _endgroup="::endgroup::" +else + _group="β–Ά " + _endgroup="" +fi + +dc="docker-compose --no-ansi" +dcr="$dc run --rm" + +# A couple of the config files are referenced from other subscripts, so they +# get vars, while multiple subscripts call ensure_file_from_example. +function ensure_file_from_example { + if [[ -f "$1" ]]; then + echo "$1 already exists, skipped creation." + else + echo "Creating $1..." + cp -n $(echo "$1" | sed 's/\.[^.]*$/.example&/') "$1" + # sed from https://stackoverflow.com/a/25123013/90297 + fi +} +SENTRY_CONFIG_PY='../sentry/sentry.conf.py' +SENTRY_CONFIG_YML='../sentry/config.yml' + +# Increase the default 10 second SIGTERM timeout +# to ensure celery queues are properly drained +# between upgrades as task signatures may change across +# versions +STOP_TIMEOUT=60 # seconds diff --git a/install/_test_setup.sh b/install/_test_setup.sh new file mode 100644 index 0000000..6fdf29e --- /dev/null +++ b/install/_test_setup.sh @@ -0,0 +1,52 @@ +set -euo pipefail +source "$(dirname $0)/_lib.sh" + +rm -rf /tmp/sentry-onpremise-test-sandbox.* +_SANDBOX="$(mktemp -d /tmp/sentry-onpremise-test-sandbox.XXX)" + +report_success() { + echo "$(basename $0) - Success πŸ‘" +} + +teardown() { + test "${DEBUG:-}" || rm -rf "$_SANDBOX" +} + +setup() { + cd .. + + # Clone the local repo into a temp dir. FWIW `git clone --local` breaks for + # me because it depends on hard-linking, which doesn't work across devices, + # and I happen to have my workspace and /tmp on separate devices. + git -c advice.detachedHead=false clone --depth=1 "file://$(pwd)" "$_SANDBOX" + + # Now propagate any local changes from the working copy to the sandbox. This + # provides a pretty nice dev experience: edit the files in the working copy, + # then run `DEBUG=1 some-test.sh` to leave the sandbox up for interactive + # dev/debugging. + git status --porcelain | while read line; do + # $line here is something like `M some-script.sh`. + + local filepath="$(cut -f2 -d' ' <(echo $line))" + local filestatus="$(cut -f1 -d' ' <(echo $line))" + + case $filestatus in + D) + rm "$_SANDBOX/$filepath" + ;; + A | M | AM | ??) + ln -sf "$(realpath $filepath)" "$_SANDBOX/$filepath" + ;; + **) + echo "Wuh? $line" + exit 77 + ;; + esac + done + + cd "$_SANDBOX/install" + + trap teardown EXIT +} + +setup diff --git a/install/bootstrap-snuba.sh b/install/bootstrap-snuba.sh new file mode 100644 index 0000000..2952ed0 --- /dev/null +++ b/install/bootstrap-snuba.sh @@ -0,0 +1,6 @@ +echo "${_group}Bootstrapping and migrating Snuba ..." + +$dcr snuba-api bootstrap --no-migrate --force +$dcr snuba-api migrations migrate --force + +echo "${_endgroup}" diff --git a/install/build-docker-images.sh b/install/build-docker-images.sh new file mode 100644 index 0000000..4bb96b5 --- /dev/null +++ b/install/build-docker-images.sh @@ -0,0 +1,8 @@ +echo "${_group}Building and tagging Docker images ..." + +echo "" +$dc build --force-rm +echo "" +echo "Docker images built." + +echo "${_endgroup}" diff --git a/install/check-minimum-requirements.sh b/install/check-minimum-requirements.sh new file mode 100644 index 0000000..4527a22 --- /dev/null +++ b/install/check-minimum-requirements.sh @@ -0,0 +1,53 @@ +echo "${_group}Checking minimum requirements ..." + +MIN_DOCKER_VERSION='19.03.6' +MIN_COMPOSE_VERSION='1.24.1' +MIN_RAM_HARD=3800 # MB +MIN_RAM_SOFT=7800 # MB +MIN_CPU_HARD=2 +MIN_CPU_SOFT=4 + +DOCKER_VERSION=$(docker version --format '{{.Server.Version}}') +COMPOSE_VERSION=$($dc --version | sed 's/docker-compose version \(.\{1,\}\),.*/\1/') +RAM_AVAILABLE_IN_DOCKER=$(docker run --rm busybox free -m 2>/dev/null | awk '/Mem/ {print $2}'); +CPU_AVAILABLE_IN_DOCKER=$(docker run --rm busybox nproc --all); + +# Compare dot-separated strings - function below is inspired by https://stackoverflow.com/a/37939589/808368 +function ver () { echo "$@" | awk -F. '{ printf("%d%03d%03d", $1,$2,$3); }'; } + +if [[ "$(ver $DOCKER_VERSION)" -lt "$(ver $MIN_DOCKER_VERSION)" ]]; then + echo "FAIL: Expected minimum Docker version to be $MIN_DOCKER_VERSION but found $DOCKER_VERSION" + exit 1 +fi + +if [[ "$(ver $COMPOSE_VERSION)" -lt "$(ver $MIN_COMPOSE_VERSION)" ]]; then + echo "FAIL: Expected minimum docker-compose version to be $MIN_COMPOSE_VERSION but found $COMPOSE_VERSION" + exit 1 +fi + +if [[ "$CPU_AVAILABLE_IN_DOCKER" -lt "$MIN_CPU_HARD" ]]; then + echo "FAIL: Required minimum CPU cores available to Docker is $MIN_CPU_HARD, found $CPU_AVAILABLE_IN_DOCKER" + exit 1 +elif [[ "$CPU_AVAILABLE_IN_DOCKER" -lt "$MIN_CPU_SOFT" ]]; then + echo "WARN: Recommended minimum CPU cores available to Docker is $MIN_CPU_SOFT, found $CPU_AVAILABLE_IN_DOCKER" +fi + +if [[ "$RAM_AVAILABLE_IN_DOCKER" -lt "$MIN_RAM_HARD" ]]; then + echo "FAIL: Required minimum RAM available to Docker is $MIN_RAM_HARD MB, found $RAM_AVAILABLE_IN_DOCKER MB" + exit 1 +elif [[ "$RAM_AVAILABLE_IN_DOCKER" -lt "$MIN_RAM_SOFT" ]]; then + echo "WARN: Recommended minimum RAM available to Docker is $MIN_RAM_SOFT MB, found $RAM_AVAILABLE_IN_DOCKER MB" +fi + +#SSE4.2 required by Clickhouse (https://clickhouse.yandex/docs/en/operations/requirements/) +# On KVM, cpuinfo could falsely not report SSE 4.2 support, so skip the check. https://github.com/ClickHouse/ClickHouse/issues/20#issuecomment-226849297 +IS_KVM=$(docker run --rm busybox grep -c 'Common KVM processor' /proc/cpuinfo || :) +if [[ "$IS_KVM" -eq 0 ]]; then + SUPPORTS_SSE42=$(docker run --rm busybox grep -c sse4_2 /proc/cpuinfo || :) + if [[ "$SUPPORTS_SSE42" -eq 0 ]]; then + echo "FAIL: The CPU your machine is running on does not support the SSE 4.2 instruction set, which is required for one of the services Sentry uses (Clickhouse). See https://git.io/JvLDt for more info." + exit 1 + fi +fi + +echo "${_endgroup}" diff --git a/install/create-docker-volumes-test.sh b/install/create-docker-volumes-test.sh new file mode 100755 index 0000000..7cf8969 --- /dev/null +++ b/install/create-docker-volumes-test.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +source "$(dirname $0)/_test_setup.sh" + +expected=7 +count() { + docker volume ls --quiet | grep '^sentry-.*' | wc -l +} + +# Maybe they exist prior, maybe they don't. Script is idempotent. + +before=$(count) +test $before -eq 0 || test $before -eq $expected + +source create-docker-volumes.sh +source create-docker-volumes.sh +source create-docker-volumes.sh + +test $(count) -eq $expected + +report_success diff --git a/install/create-docker-volumes.sh b/install/create-docker-volumes.sh new file mode 100644 index 0000000..620c52a --- /dev/null +++ b/install/create-docker-volumes.sh @@ -0,0 +1,10 @@ +echo "${_group}Creating volumes for persistent storage ..." + +echo "Created $(docker volume create --name=sentry-clickhouse)." +echo "Created $(docker volume create --name=sentry-data)." +echo "Created $(docker volume create --name=sentry-kafka)." +echo "Created $(docker volume create --name=sentry-redis)." +echo "Created $(docker volume create --name=sentry-symbolicator)." +echo "Created $(docker volume create --name=sentry-zookeeper)." + +echo "${_endgroup}" diff --git a/install/create-kafka-topics.sh b/install/create-kafka-topics.sh new file mode 100644 index 0000000..a542cb5 --- /dev/null +++ b/install/create-kafka-topics.sh @@ -0,0 +1,14 @@ +echo "${_group}Creating additional Kafka topics ..." + +# NOTE: This step relies on `kafka` being available from the previous `snuba-api bootstrap` step +# XXX(BYK): We cannot use auto.create.topics as Confluence and Apache hates it now (and makes it very hard to enable) +EXISTING_KAFKA_TOPICS=$($dcr kafka kafka-topics --list --bootstrap-server kafka:9092 2>/dev/null) +NEEDED_KAFKA_TOPICS="ingest-attachments ingest-transactions ingest-events" +for topic in $NEEDED_KAFKA_TOPICS; do + if ! echo "$EXISTING_KAFKA_TOPICS" | grep -wq $topic; then + $dcr kafka kafka-topics --create --topic $topic --bootstrap-server kafka:9092 + echo "" + fi +done + +echo "${_endgroup}" diff --git a/install/docker-aliases.sh b/install/docker-aliases.sh deleted file mode 100755 index e19384b..0000000 --- a/install/docker-aliases.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash -dc="docker-compose --no-ansi" -dcr="$dc run --rm" diff --git a/install/ensure-files-from-examples.sh b/install/ensure-files-from-examples.sh new file mode 100644 index 0000000..17958a0 --- /dev/null +++ b/install/ensure-files-from-examples.sh @@ -0,0 +1,8 @@ +echo "${_group}Ensuring files from examples ..." + +ensure_file_from_example $SENTRY_CONFIG_PY +ensure_file_from_example $SENTRY_CONFIG_YML +ensure_file_from_example '../symbolicator/config.yml' +ensure_file_from_example '../sentry/requirements.txt' + +echo "${_endgroup}" diff --git a/install/error-handling.sh b/install/error-handling.sh new file mode 100644 index 0000000..d25ee01 --- /dev/null +++ b/install/error-handling.sh @@ -0,0 +1,35 @@ +echo "${_group}Setting up error handling ..." + +# Courtesy of https://stackoverflow.com/a/2183063/90297 +trap_with_arg() { + func="$1" ; shift + for sig ; do + trap "$func $sig "'$LINENO' "$sig" + done +} + +DID_CLEAN_UP=0 +# the cleanup function will be the exit point +cleanup () { + if [[ "$DID_CLEAN_UP" -eq 1 ]]; then + return 0; + fi + DID_CLEAN_UP=1 + + if [[ "$1" != "EXIT" ]]; then + echo "An error occurred, caught SIG$1 on line $2"; + + if [[ -n "$MINIMIZE_DOWNTIME" ]]; then + echo "*NOT* cleaning up, to clean your environment run \"docker-compose stop\"." + else + echo "Cleaning up..." + fi + fi + + if [[ -z "$MINIMIZE_DOWNTIME" ]]; then + $dc stop -t $STOP_TIMEOUT &> /dev/null + fi +} +trap_with_arg cleanup ERR INT TERM EXIT + +echo "${_endgroup}" diff --git a/install/generate-secret-key.sh b/install/generate-secret-key.sh new file mode 100644 index 0000000..de2afba --- /dev/null +++ b/install/generate-secret-key.sh @@ -0,0 +1,12 @@ +echo "${_group}Generating secret key ..." + +if grep -xq "system.secret-key: '!!changeme!!'" $SENTRY_CONFIG_YML ; then + # This is to escape the secret key to be used in sed below + # Note the need to set LC_ALL=C due to BSD tr and sed always trying to decode + # whatever is passed to them. Kudos to https://stackoverflow.com/a/23584470/90297 + SECRET_KEY=$(export LC_ALL=C; head /dev/urandom | tr -dc "a-z0-9@#%^&*(-_=+)" | head -c 50 | sed -e 's/[\/&]/\\&/g') + sed -i -e 's/^system.secret-key:.*$/system.secret-key: '"'$SECRET_KEY'"'/' $SENTRY_CONFIG_YML + echo "Secret key written to $SENTRY_CONFIG_YML" +fi + +echo "${_endgroup}" diff --git a/install/geoip-test.sh b/install/geoip-test.sh new file mode 100755 index 0000000..3d61c11 --- /dev/null +++ b/install/geoip-test.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +source "$(dirname $0)/_test_setup.sh" + +mmdb="../geoip/GeoLite2-City.mmdb" + +# Starts with no mmdb, ends up with empty. +test ! -f $mmdb +source geoip.sh +diff -rub $mmdb $mmdb.empty + +# Doesn't clobber existing, though. +echo GARBAGE > $mmdb +source geoip.sh +test "$(cat $mmdb)" = "GARBAGE" + +report_success diff --git a/install/geoip.sh b/install/geoip.sh old mode 100755 new mode 100644 index c90b56d..bc5d84b --- a/install/geoip.sh +++ b/install/geoip.sh @@ -1,36 +1,38 @@ -#!/usr/bin/env bash - -if [[ ! -f 'install.sh' ]]; then echo 'Where are you?'; exit 1; fi - -source ./install/docker-aliases.sh +echo "${_group}Setting up GeoIP integration ..." install_geoip() { - local mmdb='geoip/GeoLite2-City.mmdb' - local conf='geoip/GeoIP.conf' + cd ../geoip + + local mmdb='GeoLite2-City.mmdb' + local conf='GeoIP.conf' local result='Done' echo "Setting up IP address geolocation ..." if [[ ! -f "$mmdb" ]]; then echo -n "Installing (empty) IP address geolocation database ... " cp "$mmdb.empty" "$mmdb" echo "done." else echo "IP address geolocation database already exists." fi if [[ ! -f "$conf" ]]; then echo "IP address geolocation is not configured for updates." echo "See https://develop.sentry.dev/self-hosted/geolocation/ for instructions." result='Error' else echo "IP address geolocation is configured for updates." echo "Updating IP address geolocation database ... " if ! $dcr geoipupdate; then result='Error' fi echo "$result updating IP address geolocation database." fi echo "$result setting up IP address geolocation." + + cd ../install } install_geoip + +echo "${_endgroup}" diff --git a/install/migrate-file-storage.sh b/install/migrate-file-storage.sh new file mode 100644 index 0000000..8623fae --- /dev/null +++ b/install/migrate-file-storage.sh @@ -0,0 +1,11 @@ +echo "${_group}Migrating file storage ..." + +SENTRY_DATA_NEEDS_MIGRATION=$(docker run --rm -v sentry-data:/data alpine ash -c "[ ! -d '/data/files' ] && ls -A1x /data | wc -l || true") +if [[ -n "$SENTRY_DATA_NEEDS_MIGRATION" ]]; then + # Use the web (Sentry) image so the file owners are kept as sentry:sentry + # The `\"` escape pattern is to make this compatible w/ Git Bash on Windows. See #329. + $dcr --entrypoint \"/bin/bash\" web -c \ + "mkdir -p /tmp/files; mv /data/* /tmp/files/; mv /tmp/files /data/files; chown -R sentry:sentry /data" +fi + +echo "${_endgroup}" diff --git a/install/parse-cli.sh b/install/parse-cli.sh new file mode 100644 index 0000000..f1b6218 --- /dev/null +++ b/install/parse-cli.sh @@ -0,0 +1,32 @@ +echo "${_group}Parsing command line ..." + +show_help() { + cat < $cfg +echo MOAR GARBAGE > $creds +source relay-credentials.sh +test "$(cat $cfg)" = "GARBAGE" +test "$(cat $creds)" = "MOAR GARBAGE" + +report_success diff --git a/install/relay-credentials.sh b/install/relay-credentials.sh new file mode 100644 index 0000000..2d62e2b --- /dev/null +++ b/install/relay-credentials.sh @@ -0,0 +1,25 @@ +echo "${_group}Generating Relay credentials ..." + +RELAY_CONFIG_YML="../relay/config.yml" +RELAY_CREDENTIALS_JSON="../relay/credentials.json" + +ensure_file_from_example $RELAY_CONFIG_YML + +if [[ ! -f "$RELAY_CREDENTIALS_JSON" ]]; then + + # We need the ugly hack below as `relay generate credentials` tries to read + # the config and the credentials even with the `--stdout` and `--overwrite` + # flags and then errors out when the credentials file exists but not valid + # JSON. We hit this case as we redirect output to the same config folder, + # creating an empty credentials file before relay runs. + + $dcr \ + --no-deps \ + --volume "$(pwd)/$RELAY_CONFIG_YML:/tmp/config.yml" \ + relay --config /tmp credentials generate --stdout \ + > "$RELAY_CREDENTIALS_JSON" + + echo "Relay credentials written to $RELAY_CREDENTIALS_JSON" +fi + +echo "${_endgroup}" diff --git a/install/replace-tsdb.sh b/install/replace-tsdb.sh new file mode 100644 index 0000000..0716bc1 --- /dev/null +++ b/install/replace-tsdb.sh @@ -0,0 +1,46 @@ +echo "${_group}Replacing TSDB ..." + +replace_tsdb() { + if ( + [[ -f "$SENTRY_CONFIG_PY" ]] && + ! grep -xq 'SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"' "$SENTRY_CONFIG_PY" + ); then + # Do NOT indent the following string as it would be reflected in the end result, + # breaking the final config file. See getsentry/onpremise#624. + tsdb_settings="\ +SENTRY_TSDB = \"sentry.tsdb.redissnuba.RedisSnubaTSDB\" + +# Automatic switchover 90 days after $(date). Can be removed afterwards. +SENTRY_TSDB_OPTIONS = {\"switchover_timestamp\": $(date +%s) + (90 * 24 * 3600)}\ +" + + if grep -q 'SENTRY_TSDB_OPTIONS = ' "$SENTRY_CONFIG_PY"; then + echo "Not attempting automatic TSDB migration due to presence of SENTRY_TSDB_OPTIONS" + else + echo "Attempting to automatically migrate to new TSDB" + # Escape newlines for sed + tsdb_settings="${tsdb_settings//$'\n'/\\n}" + cp "$SENTRY_CONFIG_PY" "$SENTRY_CONFIG_PY.bak" + sed -i -e "s/^SENTRY_TSDB = .*$/${tsdb_settings}/g" "$SENTRY_CONFIG_PY" || true + + if grep -xq 'SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"' "$SENTRY_CONFIG_PY"; then + echo "Migrated TSDB to Snuba. Old configuration file backed up to $SENTRY_CONFIG_PY.bak" + return + fi + + echo "Failed to automatically migrate TSDB. Reverting..." + mv "$SENTRY_CONFIG_PY.bak" "$SENTRY_CONFIG_PY" + echo "$SENTRY_CONFIG_PY restored from backup." + fi + + echo "WARN: Your Sentry configuration uses a legacy data store for time-series data. Remove the options SENTRY_TSDB and SENTRY_TSDB_OPTIONS from $SENTRY_CONFIG_PY and add:" + echo "" + echo "$tsdb_settings" + echo "" + echo "For more information please refer to https://github.com/getsentry/onpremise/pull/430" + fi +} + +replace_tsdb + +echo "${_endgroup}" diff --git a/install/set-up-and-migrate-database.sh b/install/set-up-and-migrate-database.sh new file mode 100644 index 0000000..38d4093 --- /dev/null +++ b/install/set-up-and-migrate-database.sh @@ -0,0 +1,15 @@ +echo "${_group}Setting up / migrating database ..." + +if [[ -n "${CI:-}" || "${SKIP_USER_PROMPT:-0}" == 1 ]]; then + $dcr web upgrade --noinput + echo "" + echo "Did not prompt for user creation due to non-interactive shell." + echo "Run the following command to create one yourself (recommended):" + echo "" + echo " docker-compose run --rm web createuser" + echo "" +else + $dcr web upgrade +fi + +echo "${_endgroup}" diff --git a/install/set-up-zookeeper.sh b/install/set-up-zookeeper.sh new file mode 100644 index 0000000..00b633d --- /dev/null +++ b/install/set-up-zookeeper.sh @@ -0,0 +1,14 @@ +echo "${_group}Setting up Zookeeper ..." + +ZOOKEEPER_SNAPSHOT_FOLDER_EXISTS=$($dcr zookeeper bash -c 'ls 2>/dev/null -Ubad1 -- /var/lib/zookeeper/data/version-2 | wc -l | tr -d '[:space:]'') +if [[ "$ZOOKEEPER_SNAPSHOT_FOLDER_EXISTS" -eq 1 ]]; then + ZOOKEEPER_LOG_FILE_COUNT=$($dcr zookeeper bash -c 'ls 2>/dev/null -Ubad1 -- /var/lib/zookeeper/log/version-2/* | wc -l | tr -d '[:space:]'') + ZOOKEEPER_SNAPSHOT_FILE_COUNT=$($dcr zookeeper bash -c 'ls 2>/dev/null -Ubad1 -- /var/lib/zookeeper/data/version-2/* | wc -l | tr -d '[:space:]'') + # This is a workaround for a ZK upgrade bug: https://issues.apache.org/jira/browse/ZOOKEEPER-3056 + if [[ "$ZOOKEEPER_LOG_FILE_COUNT" -gt 0 ]] && [[ "$ZOOKEEPER_SNAPSHOT_FILE_COUNT" -eq 0 ]]; then + $dcr -v $(pwd)/zookeeper:/temp zookeeper bash -c 'cp /temp/snapshot.0 /var/lib/zookeeper/data/version-2/snapshot.0' + $dc run -d -e ZOOKEEPER_SNAPSHOT_TRUST_EMPTY=true zookeeper + fi +fi + +echo "${_endgroup}" diff --git a/install/turn-things-off.sh b/install/turn-things-off.sh new file mode 100644 index 0000000..090dc8d --- /dev/null +++ b/install/turn-things-off.sh @@ -0,0 +1,14 @@ +echo "${_group}Turning things off ..." + +if [[ -n "$MINIMIZE_DOWNTIME" ]]; then + # Stop everything but relay and nginx + $dc rm -fsv $($dc config --services | grep -v -E '^(nginx|relay)$') +else + # Clean up old stuff and ensure nothing is working while we install/update + # This is for older versions of on-premise: + $dc -p onpremise down -t $STOP_TIMEOUT --rmi local --remove-orphans + # This is for newer versions + $dc down -t $STOP_TIMEOUT --rmi local --remove-orphans +fi + +echo "${_endgroup}" diff --git a/install/update-docker-images.sh b/install/update-docker-images.sh new file mode 100644 index 0000000..e6d232c --- /dev/null +++ b/install/update-docker-images.sh @@ -0,0 +1,13 @@ +echo "${_group}Fetching and updating Docker images ..." + +# We tag locally built images with an '-onpremise-local' suffix. docker-compose +# pull tries to pull these too and shows a 404 error on the console which is +# confusing and unnecessary. To overcome this, we add the stderr>stdout +# redirection below and pass it through grep, ignoring all lines having this +# '-onpremise-local' suffix. +$dc pull -q --ignore-pull-failures 2>&1 | grep -v -- -onpremise-local || true + +# We may not have the set image on the repo (local images) so allow fails +docker pull ${SENTRY_IMAGE} || true; + +echo "${_endgroup}" diff --git a/install/upgrade-postgres.sh b/install/upgrade-postgres.sh new file mode 100644 index 0000000..1faccb8 --- /dev/null +++ b/install/upgrade-postgres.sh @@ -0,0 +1,24 @@ +echo "${_group}Ensuring proper PostgreSQL version ..." + +# Very naively check whether there's an existing sentry-postgres volume and the PG version in it +if [[ -n "$(docker volume ls -q --filter name=sentry-postgres)" && "$(docker run --rm -v sentry-postgres:/db busybox cat /db/PG_VERSION 2>/dev/null)" == "9.5" ]]; then + docker volume rm sentry-postgres-new || true + # If this is Postgres 9.5 data, start upgrading it to 9.6 in a new volume + docker run --rm \ + -v sentry-postgres:/var/lib/postgresql/9.5/data \ + -v sentry-postgres-new:/var/lib/postgresql/9.6/data \ + tianon/postgres-upgrade:9.5-to-9.6 + + # Get rid of the old volume as we'll rename the new one to that + docker volume rm sentry-postgres + docker volume create --name sentry-postgres + # There's no rename volume in Docker so copy the contents from old to new name + # Also append the `host all all all trust` line as `tianon/postgres-upgrade:9.5-to-9.6` + # doesn't do that automatically. + docker run --rm -v sentry-postgres-new:/from -v sentry-postgres:/to alpine ash -c \ + "cd /from ; cp -av . /to ; echo 'host all all all trust' >> /to/pg_hba.conf" + # Finally, remove the new old volume as we are all in sentry-postgres now + docker volume rm sentry-postgres-new +fi + +echo "${_endgroup}" diff --git a/install/wrap-up.sh b/install/wrap-up.sh new file mode 100644 index 0000000..2671a3a --- /dev/null +++ b/install/wrap-up.sh @@ -0,0 +1,25 @@ +if [[ "$MINIMIZE_DOWNTIME" ]]; then + echo "${_group}Waiting for Sentry to start ..." + + # Start the whole setup, except nginx and relay. + $dc up -d --remove-orphans $($dc config --services | grep -v -E '^(nginx|relay)$') + $dc exec -T nginx service nginx reload + + docker run --rm --network="${COMPOSE_PROJECT_NAME}_default" alpine ash \ + -c 'while [[ "$(wget -T 1 -q -O- http://web:9000/_health/)" != "ok" ]]; do sleep 0.5; done' + + # Make sure everything is up. This should only touch relay and nginx + $dc up -d + + echo "${_endgroup}" +else + echo "" + echo "-----------------------------------------------------------------" + echo "" + echo "You're all done! Run the following command to get Sentry running:" + echo "" + echo " docker-compose up -d" + echo "" + echo "-----------------------------------------------------------------" + echo "" +fi diff --git a/relay/config.example.yml b/relay/config.example.yml index 0d70d0a..8538bd7 100644 --- a/relay/config.example.yml +++ b/relay/config.example.yml @@ -1,15 +1,13 @@ relay: upstream: "http://web:9000/" host: 0.0.0.0 port: 3000 logging: level: WARN processing: enabled: true kafka_config: - {name: "bootstrap.servers", value: "kafka:9092"} - {name: "message.max.bytes", value: 50000000} #50MB or bust redis: redis://redis:6379 geoip_path: "/geoip/GeoLite2-City.mmdb" -http: - _client: "reqwest" diff --git a/scripts/post-release.sh b/scripts/post-release.sh index 652a792..a05afe6 100755 --- a/scripts/post-release.sh +++ b/scripts/post-release.sh @@ -1,10 +1,10 @@ #!/bin/bash set -eu SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" cd $SCRIPT_DIR/.. # Bring master back to nightlies after merge from release branch - +git checkout master && git pull SYMBOLICATOR_VERSION=nightly ./scripts/bump-version.sh '' 'nightly' git diff --quiet || git commit -anm 'build: Set master version to nightly' && git pull --rebase && git push diff --git a/sentry/config.example.yml b/sentry/config.example.yml index 9595a73..2989299 100644 --- a/sentry/config.example.yml +++ b/sentry/config.example.yml @@ -1,106 +1,104 @@ # While a lot of configuration in Sentry can be changed via the UI, for all # new-style config (as of 8.0) you can also declare values here in this file # to enforce defaults or to ensure they cannot be changed via the UI. For more # information see the Sentry documentation. ############### # Mail Server # ############### # mail.backend: 'smtp' # Use dummy if you want to disable email entirely mail.host: 'smtp' # mail.port: 25 # mail.username: '' # mail.password: '' # mail.use-tls: false # The email address to send on behalf of # mail.from: 'root@localhost' # If you'd like to configure email replies, enable this. # mail.enable-replies: true # When email-replies are enabled, this value is used in the Reply-To header # mail.reply-hostname: '' # If you're using mailgun for inbound mail, set your API key and configure a # route to forward to /api/hooks/mailgun/inbound/ # Also don't forget to set `mail.enable-replies: true` above. # mail.mailgun-api-key: '' ################### # System Settings # ################### # If this file ever becomes compromised, it's important to generate a new key. # Changing this value will result in all current sessions being invalidated. # A new key can be generated with `$ sentry config generate-secret-key` system.secret-key: '!!changeme!!' # The ``redis.clusters`` setting is used, unsurprisingly, to configure Redis # clusters. These clusters can be then referred to by name when configuring # backends such as the cache, digests, or TSDB backend. # redis.clusters: # default: # hosts: # 0: # host: 127.0.0.1 # port: 6379 ################ # File storage # ################ # Uploaded media uses these `filestore` settings. The available # backends are either `filesystem` or `s3`. filestore.backend: 'filesystem' filestore.options: location: '/data/files' dsym.cache-path: '/data/dsym-cache' releasefile.cache-path: '/data/releasefile-cache' # filestore.backend: 's3' # filestore.options: # access_key: 'AKIXXXXXX' # secret_key: 'XXXXXXX' # bucket_name: 's3-bucket-name' system.internal-url-prefix: 'http://web:9000' symbolicator.enabled: true symbolicator.options: url: "http://symbolicator:3021" transaction-events.force-disable-internal-project: true ###################### # GitHub Integration # ###################### # github-login.extended-permissions: ['repo'] # github-app.id: GITHUB_APP_ID # github-app.name: 'GITHUB_APP_NAME' # github-app.webhook-secret: 'GITHUB_WEBHOOK_SECRET' # Use only if configured in GitHub # github-app.client-id: 'GITHUB_CLIENT_ID' # github-app.client-secret: 'GITHUB_CLIENT_SECRET' # github-app.private-key: | # -----BEGIN RSA PRIVATE KEY----- # privatekeyprivatekeyprivatekeyprivatekey # privatekeyprivatekeyprivatekeyprivatekey # privatekeyprivatekeyprivatekeyprivatekey # privatekeyprivatekeyprivatekeyprivatekey # privatekeyprivatekeyprivatekeyprivatekey # -----END RSA PRIVATE KEY----- ##################### # Slack Integration # ##################### # Refer to https://develop.sentry.dev/integrations/slack/ for setup instructions. # slack.client-id: <'client id'> # slack.client-secret: # slack.signing-secret: -## If you made your slack bot before july 2020 set legacy-app to True -slack.legacy-app: False ## If legacy-app is True use verfication-token instead of signing-secret # slack.verification-token: