diff --git a/docker-compose.yml b/docker-compose.yml index 4a082d8..4754ebc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,241 +1,253 @@ version: '3.4' x-restart-policy: &restart_policy restart: unless-stopped x-sentry-defaults: &sentry_defaults << : *restart_policy build: context: ./sentry args: - SENTRY_IMAGE - SENTRY_PYTHON3 image: sentry-onpremise-local depends_on: - redis - postgres - memcached - smtp - snuba-api - snuba-consumer - snuba-outcomes-consumer - snuba-sessions-consumer - snuba-transactions-consumer - snuba-replacer - symbolicator - kafka environment: SENTRY_CONF: '/etc/sentry' SNUBA: 'http://snuba-api:1218' volumes: - 'sentry-data:/data' - './sentry:/etc/sentry' x-snuba-defaults: &snuba_defaults << : *restart_policy depends_on: - redis - clickhouse - kafka image: '$SNUBA_IMAGE' environment: SNUBA_SETTINGS: docker CLICKHOUSE_HOST: clickhouse DEFAULT_BROKERS: 'kafka:9092' REDIS_HOST: redis UWSGI_MAX_REQUESTS: '10000' UWSGI_DISABLE_LOGGING: 'true' services: smtp: << : *restart_policy image: tianon/exim4 volumes: - 'sentry-smtp:/var/spool/exim4' - 'sentry-smtp-log:/var/log/exim4' memcached: << : *restart_policy image: 'memcached:1.5-alpine' redis: << : *restart_policy image: 'redis:5.0-alpine' volumes: - 'sentry-redis:/data' ulimits: nofile: soft: 10032 hard: 10032 postgres: << : *restart_policy image: 'postgres:9.6' environment: POSTGRES_HOST_AUTH_METHOD: 'trust' volumes: - 'sentry-postgres:/var/lib/postgresql/data' zookeeper: << : *restart_policy image: 'confluentinc/cp-zookeeper:5.5.0' environment: ZOOKEEPER_CLIENT_PORT: '2181' CONFLUENT_SUPPORT_METRICS_ENABLE: 'false' ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: 'WARN' ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: 'WARN' volumes: - 'sentry-zookeeper:/var/lib/zookeeper/data' - 'sentry-zookeeper-log:/var/lib/zookeeper/log' - 'sentry-secrets:/etc/zookeeper/secrets' kafka: << : *restart_policy depends_on: - zookeeper image: 'confluentinc/cp-kafka:5.5.0' environment: KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka:9092' KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: '1' KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: '1' KAFKA_LOG_RETENTION_HOURS: '24' KAFKA_MESSAGE_MAX_BYTES: '50000000' #50MB or bust KAFKA_MAX_REQUEST_SIZE: '50000000' #50MB on requests apparently too CONFLUENT_SUPPORT_METRICS_ENABLE: 'false' KAFKA_LOG4J_LOGGERS: 'kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN' KAFKA_LOG4J_ROOT_LOGLEVEL: 'WARN' KAFKA_TOOLS_LOG4J_LOGLEVEL: 'WARN' volumes: - 'sentry-kafka:/var/lib/kafka/data' - 'sentry-kafka-log:/var/lib/kafka/log' - 'sentry-secrets:/etc/kafka/secrets' clickhouse: << : *restart_policy image: 'yandex/clickhouse-server:20.3.9.70' ulimits: nofile: soft: 262144 hard: 262144 volumes: - 'sentry-clickhouse:/var/lib/clickhouse' - 'sentry-clickhouse-log:/var/log/clickhouse-server' - type: bind read_only: true source: ./clickhouse/config.xml target: /etc/clickhouse-server/config.d/sentry.xml environment: # This limits Clickhouse's memory to 30% of the host memory # If you have high volume and your search return incomplete results # You might want to change this to a higher value (and ensure your host has enough memory) MAX_MEMORY_USAGE_RATIO: 0.3 snuba-api: << : *snuba_defaults # Kafka consumer responsible for feeding events into Clickhouse snuba-consumer: << : *snuba_defaults command: consumer --storage events --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding outcomes into Clickhouse # Use --auto-offset-reset=earliest to recover up to 7 days of TSDB data # since we did not do a proper migration snuba-outcomes-consumer: << : *snuba_defaults command: consumer --storage outcomes_raw --auto-offset-reset=earliest --max-batch-time-ms 750 # Kafka consumer responsible for feeding session data into Clickhouse snuba-sessions-consumer: << : *snuba_defaults command: consumer --storage sessions_raw --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding transactions data into Clickhouse snuba-transactions-consumer: << : *snuba_defaults command: consumer --storage transactions --consumer-group transactions_group --auto-offset-reset=latest --max-batch-time-ms 750 snuba-replacer: << : *snuba_defaults command: replacer --storage events --auto-offset-reset=latest --max-batch-size 3 + snuba-subscription-consumer-events: + << : *snuba_defaults + command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-events-subscriptions-consumers --topic=events --result-topic=events-subscription-results --dataset=events --commit-log-topic=snuba-commit-log --commit-log-group=snuba-consumers --delay-seconds=60 --schedule-ttl=60 + snuba-subscription-consumer-transactions: + << : *snuba_defaults + command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-transactions-subscriptions-consumers --topic=events --result-topic=transactions-subscription-results --dataset=transactions --commit-log-topic=snuba-commit-log --commit-log-group=snuba-transactions-consumers --delay-seconds=60 --schedule-ttl=60 snuba-cleanup: << : *snuba_defaults image: snuba-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: '$SNUBA_IMAGE' command: '"*/5 * * * * gosu snuba snuba cleanup --dry-run False"' symbolicator: << : *restart_policy image: '$SYMBOLICATOR_IMAGE' volumes: - 'sentry-symbolicator:/data' - type: bind read_only: true source: ./symbolicator target: /etc/symbolicator command: run -c /etc/symbolicator/config.yml symbolicator-cleanup: << : *restart_policy image: symbolicator-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: '$SYMBOLICATOR_IMAGE' command: '"55 23 * * * gosu symbolicator symbolicator cleanup"' volumes: - 'sentry-symbolicator:/data' web: << : *sentry_defaults cron: << : *sentry_defaults command: run cron worker: << : *sentry_defaults command: run worker ingest-consumer: << : *sentry_defaults command: run ingest-consumer --all-consumer-types post-process-forwarder: << : *sentry_defaults # Increase `--commit-batch-size 1` below to deal with high-load environments. command: run post-process-forwarder --commit-batch-size 1 + subscription-consumer-events: + << : *sentry_defaults + command: run query-subscription-consumer --commit-batch-size 1 --topic events-subscription-results + subscription-consumer-transactions: + << : *sentry_defaults + command: run query-subscription-consumer --commit-batch-size 1 --topic transactions-subscription-results sentry-cleanup: << : *sentry_defaults image: sentry-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: 'sentry-onpremise-local' command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"' nginx: << : *restart_policy ports: - '$SENTRY_BIND:80/tcp' image: 'nginx:1.16' volumes: - type: bind read_only: true source: ./nginx target: /etc/nginx depends_on: - web - relay relay: << : *restart_policy image: '$RELAY_IMAGE' volumes: - type: bind read_only: true source: ./relay target: /work/.relay depends_on: - kafka - redis volumes: sentry-data: external: true sentry-postgres: external: true sentry-redis: external: true sentry-zookeeper: external: true sentry-kafka: external: true sentry-clickhouse: external: true sentry-symbolicator: external: true sentry-secrets: sentry-smtp: sentry-zookeeper-log: sentry-kafka-log: sentry-smtp-log: sentry-clickhouse-log: diff --git a/sentry/sentry.conf.example.py b/sentry/sentry.conf.example.py index 01b0925..ca217d1 100644 --- a/sentry/sentry.conf.example.py +++ b/sentry/sentry.conf.example.py @@ -1,266 +1,268 @@ # This file is just Python, with a touch of Django which means # you can inherit and tweak settings to your hearts content. from sentry.conf.server import * # NOQA # Generously adapted from pynetlinux: https://git.io/JJmga def get_internal_network(): import ctypes import fcntl import math import socket import struct iface = b"eth0" sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ifreq = struct.pack(b"16sH14s", iface, socket.AF_INET, b"\x00" * 14) try: ip = struct.unpack( b"!I", struct.unpack(b"16sH2x4s8x", fcntl.ioctl(sockfd, 0x8915, ifreq))[2] )[0] netmask = socket.ntohl( struct.unpack(b"16sH2xI8x", fcntl.ioctl(sockfd, 0x891B, ifreq))[2] ) except IOError: return () base = socket.inet_ntoa(struct.pack(b"!I", ip & netmask)) netmask_bits = 32 - int(round(math.log(ctypes.c_uint32(~netmask).value + 1, 2), 1)) return "{0:s}/{1:d}".format(base, netmask_bits) INTERNAL_SYSTEM_IPS = (get_internal_network(),) DATABASES = { "default": { "ENGINE": "sentry.db.postgres", "NAME": "postgres", "USER": "postgres", "PASSWORD": "", "HOST": "postgres", "PORT": "", } } # You should not change this setting after your database has been created # unless you have altered all schemas first SENTRY_USE_BIG_INTS = True # If you're expecting any kind of real traffic on Sentry, we highly recommend # configuring the CACHES and Redis settings ########### # General # ########### # Instruct Sentry that this install intends to be run by a single organization # and thus various UI optimizations should be enabled. SENTRY_SINGLE_ORGANIZATION = True SENTRY_OPTIONS["system.event-retention-days"] = int( env("SENTRY_EVENT_RETENTION_DAYS", "90") ) ######### # Redis # ######### # Generic Redis configuration used as defaults for various things including: # Buffers, Quotas, TSDB SENTRY_OPTIONS["redis.clusters"] = { "default": { "hosts": {0: {"host": "redis", "password": "", "port": "6379", "db": "0"}} } } ######### # Queue # ######### # See https://develop.sentry.dev/services/queue/ for more # information on configuring your queue broker and workers. Sentry relies # on a Python framework called Celery to manage queues. rabbitmq_host = None if rabbitmq_host: BROKER_URL = "amqp://{username}:{password}@{host}/{vhost}".format( username="guest", password="guest", host=rabbitmq_host, vhost="/" ) else: BROKER_URL = "redis://:{password}@{host}:{port}/{db}".format( **SENTRY_OPTIONS["redis.clusters"]["default"]["hosts"][0] ) ######### # Cache # ######### # Sentry currently utilizes two separate mechanisms. While CACHES is not a # requirement, it will optimize several high throughput patterns. CACHES = { "default": { "BACKEND": "django.core.cache.backends.memcached.MemcachedCache", "LOCATION": ["memcached:11211"], "TIMEOUT": 3600, } } # A primary cache is required for things such as processing events SENTRY_CACHE = "sentry.cache.redis.RedisCache" DEFAULT_KAFKA_OPTIONS = { "bootstrap.servers": "kafka:9092", "message.max.bytes": 50000000, "socket.timeout.ms": 1000, } SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream" SENTRY_EVENTSTREAM_OPTIONS = {"producer_configuration": DEFAULT_KAFKA_OPTIONS} KAFKA_CLUSTERS["default"] = DEFAULT_KAFKA_OPTIONS ############### # Rate Limits # ############### # Rate limits apply to notification handlers and are enforced per-project # automatically. SENTRY_RATELIMITER = "sentry.ratelimits.redis.RedisRateLimiter" ################## # Update Buffers # ################## # Buffers (combined with queueing) act as an intermediate layer between the # database and the storage API. They will greatly improve efficiency on large # numbers of the same events being sent to the API in a short amount of time. # (read: if you send any kind of real data to Sentry, you should enable buffers) SENTRY_BUFFER = "sentry.buffer.redis.RedisBuffer" ########## # Quotas # ########## # Quotas allow you to rate limit individual projects or the Sentry install as # a whole. SENTRY_QUOTAS = "sentry.quotas.redis.RedisQuota" ######## # TSDB # ######## # The TSDB is used for building charts as well as making things like per-rate # alerts possible. SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB" ######### # SNUBA # ######### SENTRY_SEARCH = "sentry.search.snuba.EventsDatasetSnubaSearchBackend" SENTRY_SEARCH_OPTIONS = {} SENTRY_TAGSTORE_OPTIONS = {} ########### # Digests # ########### # The digest backend powers notification summaries. SENTRY_DIGESTS = "sentry.digests.backends.redis.RedisBackend" ############## # Web Server # ############## SENTRY_WEB_HOST = "0.0.0.0" SENTRY_WEB_PORT = 9000 SENTRY_WEB_OPTIONS = { "http": "%s:%s" % (SENTRY_WEB_HOST, SENTRY_WEB_PORT), "protocol": "uwsgi", # This is needed in order to prevent https://git.io/fj7Lw "uwsgi-socket": None, "so-keepalive": True, # Keep this between 15s-75s as that's what Relay supports "http-keepalive": 15, "http-chunked-input": True, # the number of web workers "workers": 3, "threads": 4, "memory-report": False, # Some stuff so uwsgi will cycle workers sensibly "max-requests": 100000, "max-requests-delta": 500, "max-worker-lifetime": 86400, # Duplicate options from sentry default just so we don't get # bit by sentry changing a default value that we depend on. "thunder-lock": True, "log-x-forwarded-for": False, "buffer-size": 32768, "limit-post": 209715200, "disable-logging": True, "reload-on-rss": 600, "ignore-sigpipe": True, "ignore-write-errors": True, "disable-write-exception": True, } ########### # SSL/TLS # ########### # If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto # header and enable the settings below # SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # SESSION_COOKIE_SECURE = True # CSRF_COOKIE_SECURE = True # SOCIAL_AUTH_REDIRECT_IS_HTTPS = True # End of SSL/TLS settings ############ # Features # ############ SENTRY_FEATURES["projects:sample-events"] = False SENTRY_FEATURES.update( { feature: True for feature in ( "organizations:discover", "organizations:events", "organizations:global-views", + "organizations:incidents", "organizations:integrations-issue-basic", "organizations:integrations-issue-sync", "organizations:invite-members", + "organizations:metric-alert-builder-aggregate", "organizations:sso-basic", "organizations:sso-rippling", "organizations:sso-saml2", "organizations:performance-view", "organizations:advanced-search", "projects:custom-inbound-filters", "projects:data-forwarding", "projects:discard-groups", "projects:plugins", "projects:rate-limits", "projects:servicehooks", ) } ) ######################### # Bitbucket Integration # ######################## # BITBUCKET_CONSUMER_KEY = 'YOUR_BITBUCKET_CONSUMER_KEY' # BITBUCKET_CONSUMER_SECRET = 'YOUR_BITBUCKET_CONSUMER_SECRET'