diff --git a/docker-compose.yml b/docker-compose.yml index 4a2644c..d9d28c4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,271 +1,272 @@ version: "3.4" x-restart-policy: &restart_policy restart: unless-stopped x-sentry-defaults: &sentry_defaults <<: *restart_policy image: "$SENTRY_IMAGE" depends_on: - redis - postgres - memcached - smtp - snuba-api - snuba-consumer - snuba-outcomes-consumer - snuba-sessions-consumer - snuba-transactions-consumer - snuba-replacer - symbolicator - kafka entrypoint: "/etc/sentry/entrypoint.sh" command: ["run", "web"] environment: PYTHONUSERBASE: "/data/custom-packages" SENTRY_CONF: "/etc/sentry" SNUBA: "http://snuba-api:1218" # Leaving the value empty to just pass whatever is set # on the host system (or in the .env file) SENTRY_EVENT_RETENTION_DAYS: volumes: - "sentry-data:/data" - "./sentry:/etc/sentry" - "./geoip:/geoip:ro" x-snuba-defaults: &snuba_defaults <<: *restart_policy depends_on: - redis - clickhouse - kafka image: "$SNUBA_IMAGE" environment: SNUBA_SETTINGS: docker CLICKHOUSE_HOST: clickhouse DEFAULT_BROKERS: "kafka:9092" REDIS_HOST: redis UWSGI_MAX_REQUESTS: "10000" UWSGI_DISABLE_LOGGING: "true" # Leaving the value empty to just pass whatever is set # on the host system (or in the .env file) SENTRY_EVENT_RETENTION_DAYS: services: smtp: <<: *restart_policy image: tianon/exim4 volumes: - "sentry-smtp:/var/spool/exim4" - "sentry-smtp-log:/var/log/exim4" memcached: <<: *restart_policy image: "memcached:1.5-alpine" redis: <<: *restart_policy image: "redis:5.0-alpine" volumes: - "sentry-redis:/data" ulimits: nofile: soft: 10032 hard: 10032 postgres: <<: *restart_policy image: "postgres:9.6" environment: POSTGRES_HOST_AUTH_METHOD: "trust" volumes: - "sentry-postgres:/var/lib/postgresql/data" zookeeper: <<: *restart_policy image: "confluentinc/cp-zookeeper:5.5.0" environment: ZOOKEEPER_CLIENT_PORT: "2181" CONFLUENT_SUPPORT_METRICS_ENABLE: "false" ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: "WARN" ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: "WARN" volumes: - "sentry-zookeeper:/var/lib/zookeeper/data" - "sentry-zookeeper-log:/var/lib/zookeeper/log" - "sentry-secrets:/etc/zookeeper/secrets" kafka: <<: *restart_policy depends_on: - zookeeper image: "confluentinc/cp-kafka:5.5.0" environment: KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181" KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9092" KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1" KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1" KAFKA_LOG_RETENTION_HOURS: "24" KAFKA_MESSAGE_MAX_BYTES: "50000000" #50MB or bust KAFKA_MAX_REQUEST_SIZE: "50000000" #50MB on requests apparently too CONFLUENT_SUPPORT_METRICS_ENABLE: "false" KAFKA_LOG4J_LOGGERS: "kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN" KAFKA_LOG4J_ROOT_LOGLEVEL: "WARN" KAFKA_TOOLS_LOG4J_LOGLEVEL: "WARN" volumes: - "sentry-kafka:/var/lib/kafka/data" - "sentry-kafka-log:/var/lib/kafka/log" - "sentry-secrets:/etc/kafka/secrets" clickhouse: <<: *restart_policy image: "yandex/clickhouse-server:20.3.9.70" ulimits: nofile: soft: 262144 hard: 262144 volumes: - "sentry-clickhouse:/var/lib/clickhouse" - "sentry-clickhouse-log:/var/log/clickhouse-server" - type: bind read_only: true source: ./clickhouse/config.xml target: /etc/clickhouse-server/config.d/sentry.xml environment: # This limits Clickhouse's memory to 30% of the host memory # If you have high volume and your search return incomplete results # You might want to change this to a higher value (and ensure your host has enough memory) MAX_MEMORY_USAGE_RATIO: 0.3 geoipupdate: image: "maxmindinc/geoipupdate:latest" # Override the entrypoint in order to avoid using envvars for config. # Futz with settings so we can keep mmdb and conf in same dir on host # (image looks for them in separate dirs by default). entrypoint: ["/usr/bin/geoipupdate", "-d", "/sentry", "-f", "/sentry/GeoIP.conf"] volumes: - "./geoip:/sentry" snuba-api: <<: *snuba_defaults # Kafka consumer responsible for feeding events into Clickhouse snuba-consumer: <<: *snuba_defaults command: consumer --storage events --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding outcomes into Clickhouse # Use --auto-offset-reset=earliest to recover up to 7 days of TSDB data # since we did not do a proper migration snuba-outcomes-consumer: <<: *snuba_defaults command: consumer --storage outcomes_raw --auto-offset-reset=earliest --max-batch-time-ms 750 # Kafka consumer responsible for feeding session data into Clickhouse snuba-sessions-consumer: <<: *snuba_defaults command: consumer --storage sessions_raw --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding transactions data into Clickhouse snuba-transactions-consumer: <<: *snuba_defaults command: consumer --storage transactions --consumer-group transactions_group --auto-offset-reset=latest --max-batch-time-ms 750 --commit-log-topic=snuba-commit-log snuba-replacer: <<: *snuba_defaults command: replacer --storage events --auto-offset-reset=latest --max-batch-size 3 snuba-subscription-consumer-events: <<: *snuba_defaults command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-events-subscriptions-consumers --topic=events --result-topic=events-subscription-results --dataset=events --commit-log-topic=snuba-commit-log --commit-log-group=snuba-consumers --delay-seconds=60 --schedule-ttl=60 snuba-subscription-consumer-transactions: <<: *snuba_defaults command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-transactions-subscriptions-consumers --topic=events --result-topic=transactions-subscription-results --dataset=transactions --commit-log-topic=snuba-commit-log --commit-log-group=transactions_group --delay-seconds=60 --schedule-ttl=60 snuba-cleanup: <<: *snuba_defaults image: snuba-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SNUBA_IMAGE" command: '"*/5 * * * * gosu snuba snuba cleanup --dry-run False"' symbolicator: <<: *restart_policy image: "$SYMBOLICATOR_IMAGE" volumes: - "sentry-symbolicator:/data" - type: bind read_only: true source: ./symbolicator target: /etc/symbolicator command: run -c /etc/symbolicator/config.yml symbolicator-cleanup: <<: *restart_policy image: symbolicator-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SYMBOLICATOR_IMAGE" command: '"55 23 * * * gosu symbolicator symbolicator cleanup"' volumes: - "sentry-symbolicator:/data" web: <<: *sentry_defaults cron: <<: *sentry_defaults command: run cron worker: <<: *sentry_defaults command: run worker ingest-consumer: <<: *sentry_defaults command: run ingest-consumer --all-consumer-types post-process-forwarder: <<: *sentry_defaults # Increase `--commit-batch-size 1` below to deal with high-load environments. command: run post-process-forwarder --commit-batch-size 1 subscription-consumer-events: <<: *sentry_defaults command: run query-subscription-consumer --commit-batch-size 1 --topic events-subscription-results subscription-consumer-transactions: <<: *sentry_defaults command: run query-subscription-consumer --commit-batch-size 1 --topic transactions-subscription-results sentry-cleanup: <<: *sentry_defaults image: sentry-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: "$SENTRY_IMAGE" + entrypoint: "/entrypoint.sh" command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"' nginx: <<: *restart_policy ports: - "$SENTRY_BIND:80/tcp" image: "nginx:1.16" volumes: - type: bind read_only: true source: ./nginx target: /etc/nginx depends_on: - web - relay relay: <<: *restart_policy image: "$RELAY_IMAGE" volumes: - type: bind read_only: true source: ./relay target: /work/.relay - type: bind read_only: true source: ./geoip target: /geoip depends_on: - kafka - redis volumes: sentry-data: external: true sentry-postgres: external: true sentry-redis: external: true sentry-zookeeper: external: true sentry-kafka: external: true sentry-clickhouse: external: true sentry-symbolicator: external: true sentry-secrets: sentry-smtp: sentry-zookeeper-log: sentry-kafka-log: sentry-smtp-log: sentry-clickhouse-log: diff --git a/test.sh b/test.sh index b4028dc..4c6ca2f 100755 --- a/test.sh +++ b/test.sh @@ -1,115 +1,119 @@ #!/usr/bin/env bash set -e echo "::group::Setting up variables and helpers ..." export SENTRY_TEST_HOST="${SENTRY_TEST_HOST:-http://localhost:9000}" TEST_USER='test@example.com' TEST_PASS='test123TEST' COOKIE_FILE=$(mktemp) # Courtesy of https://stackoverflow.com/a/2183063/90297 trap_with_arg() { func="$1" ; shift for sig ; do trap "$func $sig "'$LINENO' "$sig" done } DID_CLEAN_UP=0 # the cleanup function will be the exit point cleanup () { if [ "$DID_CLEAN_UP" -eq 1 ]; then return 0; fi DID_CLEAN_UP=1 if [ "$1" != "EXIT" ]; then echo "An error occurred, caught SIG$1 on line $2"; fi echo "Cleaning up..." rm $COOKIE_FILE echo "Done." } trap_with_arg cleanup ERR INT TERM EXIT echo "::endgroup::" echo "::group::Starting Sentry for tests ..." # Disable beacon for e2e tests echo 'SENTRY_BEACON=False' >> sentry/sentry.conf.py docker-compose run --rm web createuser --superuser --email $TEST_USER --password $TEST_PASS || true docker-compose up -d printf "Waiting for Sentry to be up"; timeout 60 bash -c 'until $(curl -Isf -o /dev/null $SENTRY_TEST_HOST); do printf '.'; sleep 0.5; done' echo "::endgroup::" echo "::group::Running tests ..." get_csrf_token () { awk '$6 == "sc" { print $7 }' $COOKIE_FILE; } sentry_api_request () { curl -s -H 'Accept: application/json; charset=utf-8' -H "Referer: $SENTRY_TEST_HOST" -H 'Content-Type: application/json' -H "X-CSRFToken: $(get_csrf_token)" -b "$COOKIE_FILE" -c "$COOKIE_FILE" "$SENTRY_TEST_HOST/api/0/$1" ${@:2}; } login () { INITIAL_AUTH_REDIRECT=$(curl -sL -o /dev/null $SENTRY_TEST_HOST -w %{url_effective}) if [ "$INITIAL_AUTH_REDIRECT" != "$SENTRY_TEST_HOST/auth/login/sentry/" ]; then echo "Initial /auth/login/ redirect failed, exiting..." echo "$INITIAL_AUTH_REDIRECT" exit -1 fi CSRF_TOKEN_FOR_LOGIN=$(curl $SENTRY_TEST_HOST -sL -c "$COOKIE_FILE" | awk -F "'" ' /csrfmiddlewaretoken/ { print $4 "=" $6; exit; }') curl -sL --data-urlencode 'op=login' --data-urlencode "username=$TEST_USER" --data-urlencode "password=$TEST_PASS" --data-urlencode "$CSRF_TOKEN_FOR_LOGIN" "$SENTRY_TEST_HOST/auth/login/sentry/" -H "Referer: $SENTRY_TEST_HOST/auth/login/sentry/" -b "$COOKIE_FILE" -c "$COOKIE_FILE"; } LOGIN_RESPONSE=$(login); declare -a LOGIN_TEST_STRINGS=( '"isAuthenticated":true' '"username":"test@example.com"' '"isSuperuser":true' ) for i in "${LOGIN_TEST_STRINGS[@]}" do echo "Testing '$i'..." echo "$LOGIN_RESPONSE" | grep "$i[,}]" >& /dev/null echo "Pass." done echo "::endgroup::" echo "::group::Running moar tests !!!" # Set up initial/required settings (InstallWizard request) sentry_api_request "internal/options/?query=is:required" -X PUT --data '{"mail.use-tls":false,"mail.username":"","mail.port":25,"system.admin-email":"ben@byk.im","mail.password":"","mail.from":"root@localhost","system.url-prefix":"'"$SENTRY_TEST_HOST"'","auth.allow-registration":false,"beacon.anonymous":true}' > /dev/null SENTRY_DSN=$(sentry_api_request "projects/sentry/internal/keys/" | awk 'BEGIN { RS=",|:{\n"; FS="\""; } $2 == "public" && $4 ~ "^http" { print $4; exit; }') # We ignore the protocol and the host as we already know those DSN_PIECES=(`echo $SENTRY_DSN | sed -ne 's|^https\?://\([0-9a-z]\+\)@[^/]\+/\([0-9]\+\)$|\1\n\2|p'`) SENTRY_KEY=${DSN_PIECES[0]} PROJECT_ID=${DSN_PIECES[1]} TEST_EVENT_ID=$(export LC_ALL=C; head /dev/urandom | tr -dc "a-f0-9" | head -c 32) # Thanks @untitaker - https://forum.sentry.io/t/how-can-i-post-with-curl-a-sentry-event-which-authentication-credentials/4759/2?u=byk echo "Creating test event..." curl -sf --data '{"event_id": "'"$TEST_EVENT_ID"'","level":"error","message":"a failure","extra":{"object":"42"}}' -H 'Content-Type: application/json' -H "X-Sentry-Auth: Sentry sentry_version=7, sentry_key=$SENTRY_KEY, sentry_client=test-bash/0.1" "$SENTRY_TEST_HOST/api/$PROJECT_ID/store/" -o /dev/null EVENT_PATH="projects/sentry/internal/events/$TEST_EVENT_ID/" export -f sentry_api_request get_csrf_token export SENTRY_TEST_HOST COOKIE_FILE EVENT_PATH printf "Getting the test event back" timeout 30 bash -c 'until $(sentry_api_request "$EVENT_PATH" -Isf -X GET -o /dev/null); do printf '.'; sleep 0.5; done' echo ""; EVENT_RESPONSE=$(sentry_api_request "$EVENT_PATH") declare -a EVENT_TEST_STRINGS=( '"eventID":"'"$TEST_EVENT_ID"'"' '"message":"a failure"' '"title":"a failure"' '"object":"42"' ) for i in "${EVENT_TEST_STRINGS[@]}" do echo "Testing '$i'..." echo "$EVENT_RESPONSE" | grep "$i[,}]" >& /dev/null echo "Pass." done echo "::endgroup::" + +echo "::group::Ensure cleanup crons are working ..." +docker-compose ps | grep -q -- "-cleanup_.\+[[:space:]]\+Up[[:space:]]\+" +echo "::endgroup::"