diff --git a/conf/scheduler.yml b/conf/scheduler.yml new file mode 100644 index 0000000..f9054c4 --- /dev/null +++ b/conf/scheduler.yml @@ -0,0 +1,9 @@ +scheduler: + cls: postgresql + db: postgresql:///?service=swh + +celery: + broker_heartbeat: null + task_broker: amqp://guest:guest@amqp + broker_transport_options: + max_retries: 1 diff --git a/conf/vault-worker.yml b/conf/vault-worker.yml new file mode 100644 index 0000000..e078372 --- /dev/null +++ b/conf/vault-worker.yml @@ -0,0 +1,18 @@ +storage: + cls: remote + url: http://storage:5002/ + +vault: + cls: remote + url: http://vault:5005/ + +celery: + broker_heartbeat: null + task_broker: amqp://guest:guest@amqp// + task_modules: + - swh.vault.cooking_tasks + task_queues: + - swh.vault.cooking_tasks.SWHBatchCookingTask + - swh.vault.cooking_tasks.SWHCookingTask + +max_bundle_size: 536870912 diff --git a/conf/vault.yml b/conf/vault.yml new file mode 100644 index 0000000..b4e858a --- /dev/null +++ b/conf/vault.yml @@ -0,0 +1,16 @@ +vault: + cls: postgresql + db: postgresql:///?service=swh + storage: + cls: remote + url: http://storage:5002/ + scheduler: + cls: remote + url: http://scheduler:5008/ + cache: + cls: pathslicing + root: /srv/softwareheritage/vault + slicing: 0:5 + smtp: + port: 1025 + host: mailhog diff --git a/env/celery-worker.env b/env/celery-worker.env new file mode 100644 index 0000000..373f41d --- /dev/null +++ b/env/celery-worker.env @@ -0,0 +1,4 @@ +CONCURRENCY=1 +MAX_TASKS_PER_CHILD=10 +LOGLEVEL=DEBUG +SWH_SCHEDULER_INSTANCE=http://scheduler:5008 diff --git a/env/common-python.env b/env/common-python.env new file mode 100644 index 0000000..f527015 --- /dev/null +++ b/env/common-python.env @@ -0,0 +1,14 @@ +# Forces Python's stdout to be shown in docker logs before they exit: +PYTHONUNBUFFERED=1 + +# Send statsd probes to prometheus-statsd-exporter so they are visible in +# http://localhost:5080/grafana +STATSD_HOST=prometheus-statsd-exporter +STATSD_PORT=9125 + +# Uncomment and set this to a Sentry DSN to report errors to Sentry: +# SWH_SENTRY_DSN= + +# By default, main config file is expected to be there; +# override if you want to put it somewhere else +# SWH_CONFIG_FILENAME=/etc/softwareheritage/config.yml diff --git a/images/Dockerfile b/images/Dockerfile index bd92b6d..6acb152 100644 --- a/images/Dockerfile +++ b/images/Dockerfile @@ -1,131 +1,137 @@ ARG debianversion=buster FROM debian:${debianversion:-buster} as swh-common LABEL maintainer="Software Heritage " ENV PROJECT_NAME swh-base RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && apt-get upgrade -y && \ apt-get install -y \ apt-transport-https \ curl \ lsb-release \ wait-for-it RUN echo deb http://deb.debian.org/debian/ $(lsb_release -sc)-backports main \ > /etc/apt/sources.list.d/backports.list RUN echo deb [trusted=yes] https://debian.softwareheritage.org/ $(lsb_release -sc)-swh main \ > /etc/apt/sources.list.d/softwareheritage.list RUN mkdir /etc/softwareheritage RUN mkdir -p /var/run/gunicorn/swh RUN mkdir -p /var/lib/swh RUN mkdir -p /srv/softwareheritage ENV SWH_CONFIG_FILENAME=/etc/softwareheritage/config.yml ENV LC_ALL=C.UTF-8 ################################## # BASE services ################################## FROM swh-common as swh-base ARG SWH_VER ENV SWH_VER=${SWH_VER} RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ apt-get install -y \ -t $(lsb_release -sc)-backports \ --no-install-recommends \ + # git is required for the vault-worker... maybe use a dedicated image gor this + git \ gunicorn \ postgresql-client \ python3-dulwich \ + python3-swh.indexer \ + python3-swh.indexer.storage \ python3-swh.journal \ python3-swh.objstorage \ python3-swh.objstorage.cloud \ # python3-swh.objstorage.rados \ python3-swh.scheduler \ + python3-swh.search \ python3-swh.storage \ + python3-swh.vault \ zstd \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* ## still missing: vault, deposit COPY conf/logconfig.ini /etc/gunicorn/logconfig.ini COPY conf/gunicorn.cfg /etc/gunicorn/swh.cfg COPY tools/*.sh /srv/softwareheritage/utils/ COPY tools/*.sql /srv/softwareheritage/utils/ COPY tools/*.py /srv/softwareheritage/utils/ RUN chmod +x /srv/softwareheritage/utils/*.sh COPY base/entrypoint.sh / ENTRYPOINT ["/entrypoint.sh"] ################################## # WEB ################################## FROM swh-common as swh-web ARG SWH_VER ENV SWH_VER=${SWH_VER} ENV DJANGO_SETTINGS_MODULE=swh.web.settings.production RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ apt-get install -y --no-install-recommends \ -t $(lsb_release -sc)-backports \ gunicorn \ postgresql-client \ python3-magic \ python3-swh.web \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* COPY conf/logconfig.ini /etc/gunicorn/logconfig.ini COPY conf/gunicorn.cfg /etc/gunicorn/swh.cfg COPY tools/*.sh /srv/softwareheritage/utils/ COPY tools/*.sql /srv/softwareheritage/utils/ RUN chmod +x /srv/softwareheritage/utils/*.sh COPY web/entrypoint.sh /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] ################################## # Mirror (replayer) ################################## FROM swh-common as swh-replayer ARG SWH_VER ENV SWH_VER=${SWH_VER} RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ apt-get install -y \ -t $(lsb_release -sc)-backports \ --no-install-recommends \ python3-swh.journal \ python3-swh.objstorage \ python3-swh.objstorage.replayer \ python3-swh.storage \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* COPY tools/*.sh /srv/softwareheritage/utils/ COPY tools/*.sql /srv/softwareheritage/utils/ RUN chmod +x /srv/softwareheritage/utils/*.sh COPY replayer/entrypoint.sh / ENTRYPOINT ["/entrypoint.sh"] ### # Test image ### FROM swh-base as swh-test COPY restore_kafka.py / #ENTRYPOINT ["python3", "-u", "/restore_kafka.py"] diff --git a/images/base/entrypoint.sh b/images/base/entrypoint.sh old mode 100644 new mode 100755 index 72d2f2a..44c40d3 --- a/images/base/entrypoint.sh +++ b/images/base/entrypoint.sh @@ -1,66 +1,102 @@ #!/bin/bash set -e source /srv/softwareheritage/utils/pgsql.sh # generate the config file from the 'template' if [ -f /etc/softwareheritage/config.yml.tmpl ]; then # I know... I know! eval "echo \"`cat /etc/softwareheritage/config.yml.tmpl`\"" > \ /etc/softwareheritage/config.yml fi # generate the pgservice file if any if [ -f /run/secrets/postgres-password ]; then POSTGRES_PASSWORD_FILE=/run/secrets/postgres-password setup_pgsql fi # ensure root dirs used for pathslicer objstorage exist (if any) if [ -v SWH_CONFIG_FILENAME ]; then python3 /srv/softwareheritage/utils/init_pathslicer_root.py fi # For debugging purpose echo "### CONFIG FILE ###" cat /etc/softwareheritage/config.yml | grep -v password || true echo "###################" echo "Arguments: $@" case "$1" in "shell") - exec bash -i - ;; - *) - if [ -v POSTGRES_DB ]; then - wait_pgsql template1 - - echo Database setup - if ! check_pgsql_db_created; then - echo Creating database and extensions... - swh db create --db-name ${POSTGRES_DB} $1 - fi - echo Initializing the database... - echo " step 1: init-admin" - swh db init-admin --db-name ${POSTGRES_DB} $1 - echo " step 2: init" - swh db init --flavor ${FLAVOR:-default} $1 - echo " step 3: upgrade" - swh db upgrade --non-interactive $1 - fi - - echo "Starting the SWH $1 RPC server" - exec gunicorn3 \ - --bind 0.0.0.0:${PORT:-5000} \ - --bind unix:/var/run/gunicorn/swh/$1.sock \ - --threads ${GUNICORN_THREADS:-4} \ - --workers ${GUNICORN_WORKERS:-16} \ - --log-level "${LOG_LEVEL:-WARNING}" \ - --timeout ${GUNICORN_TIMEOUT:-3600} \ - --statsd-host=prometheus-statsd-exporter:9125 \ - --statsd-prefix=service.app.$1 \ - "swh.$1.api.server:make_app_from_configfile()" - ;; + exec bash -i + ;; + + "celery-worker") + shift + + # these 2 lines below should be adapted, but we do not have easily + # access to the rabbitmq host:port from here for now + ## echo Waiting for RabbitMQ to start + ## wait-for-it amqp:5672 -s --timeout=0 + + echo Starting the swh Celery worker for ${SWH_WORKER_INSTANCE} + exec python3 -m celery \ + --app=swh.scheduler.celery_backend.config.app \ + worker \ + --pool=prefork --events \ + --concurrency=${CONCURRENCY} \ + --max-tasks-per-child=${MAX_TASKS_PER_CHILD} \ + -Ofair --loglevel=${LOGLEVEL:-INFO} \ + --hostname "${SWH_WORKER_INSTANCE}@%h" + ;; + + "rpc-server") + shift + if [ -v POSTGRES_DB ]; then + wait_pgsql template1 + + echo Database setup + if ! check_pgsql_db_created; then + echo Creating database and extensions... + swh db create --db-name ${POSTGRES_DB} $1 + fi + echo Initializing the database... + echo " step 1: init-admin" + swh db init-admin --db-name ${POSTGRES_DB} $1 + echo " step 2: init" + swh db init --flavor ${FLAVOR:-default} $1 + echo " step 3: upgrade" + swh db upgrade --non-interactive $1 + fi + + echo "Starting the SWH $1 RPC server" + exec gunicorn3 \ + --bind 0.0.0.0:${PORT:-5000} \ + --bind unix:/var/run/gunicorn/swh/$1.sock \ + --threads ${GUNICORN_THREADS:-4} \ + --workers ${GUNICORN_WORKERS:-16} \ + --log-level "${LOG_LEVEL:-WARNING}" \ + --timeout ${GUNICORN_TIMEOUT:-3600} \ + --statsd-host=prometheus-statsd-exporter:9125 \ + --statsd-prefix=service.app.$1 \ + "swh.$1.api.server:make_app_from_configfile()" + ;; + + "scheduler") + shift + wait_pgsql + + wait-for-it scheduler:5008 -s --timeout=0 + + echo "Starting the swh-scheduler $1" + exec wait-for-it amqp:5672 -s --timeout=0 -- \ + swh --log-level ${LOGLEVEL:-INFO} scheduler -C ${SWH_CONFIG_FILENAME} $@ + ;; + + *) + exec $@ + ;; esac diff --git a/mirror.yml b/mirror.yml index eb3119b..268f384 100644 --- a/mirror.yml +++ b/mirror.yml @@ -1,302 +1,481 @@ version: "3.8" services: memcache: # used by the web app image: memcached deploy: replicas: 1 db-storage: # the main storage database image: postgres:13 deploy: # we want only one replica of this service in the whole cluster replicas: 1 # possible workaround to prevent dropped idle cnx (making pg pool fail to work after a while) endpoint_mode: dnsrr placement: max_replicas_per_node: 1 constraints: - node.labels.org.softwareheritage.mirror.volumes.storage-db == true command: ['-c', 'shared_buffers=4GB', '-c', 'effective_cache_size=4GB', '-c', 'random_page_cost=1.5', '-c', 'max_wal_size=4GB'] environment: POSTGRES_PASSWORD_FILE: /run/secrets/postgres-password POSTGRES_USER: swh POSTGRES_DB: # unset POSTGRES_DB: we're handling db creation ourselves in the backend volumes: - "storage-db:/var/lib/postgresql/data:rw,Z" secrets: - source: swh-mirror-db-postgres-password target: postgres-password uid: '999' mode: 0400 db-web: # the database for the web application image: postgres:13 deploy: # we want only one replica of this service in the whole cluster replicas: 1 endpoint_mode: dnsrr placement: max_replicas_per_node: 1 constraints: - node.labels.org.softwareheritage.mirror.volumes.web-db == true command: ['-c', 'shared_buffers=4GB', '-c', 'effective_cache_size=4GB', '-c', 'random_page_cost=1.5', '-c', 'max_wal_size=4GB'] environment: POSTGRES_PASSWORD_FILE: /run/secrets/postgres-password POSTGRES_USER: swh POSTGRES_DB: swh-web volumes: - "web-db:/var/lib/postgresql/data:rw,Z" secrets: - source: swh-mirror-web-postgres-password target: postgres-password uid: '999' mode: 0400 web: # the web app; serves both the web navigation interface and the public web API image: softwareheritage/web:${SWH_IMAGE_TAG:-latest} configs: - source: web target: /etc/softwareheritage/config.yml command: serve environment: PORT: "5004" PGHOST: db-web PGUSER: swh POSTGRES_DB: swh-web depends_on: - db-web - memcache secrets: - source: swh-mirror-web-postgres-password target: postgres-password mode: 0400 objstorage: # the swh-objstorage backend service; this example configuration uses a simple # filesystem-based pathslicing implementation of the swh-objstorage: see # https://docs.softwareheritage.org/devel/apidoc/swh.objstorage.backends.pathslicing.html image: softwareheritage/base:${SWH_IMAGE_TAG:-latest} deploy: # needed to allow actual and dynamic load balancing endpoint_mode: dnsrr # a real life replicas value better be in the 16 to 64 range replicas: 1 placement: # note: if using a local volume, you need to pin the objstorage # instances on the node hosting the volume, eg. the manager, otherwise, # if using a remote/distributed objstorage backend (seaweedfs, cloud, # etc.) you want to remove this placement constraint constraints: - node.labels.org.softwareheritage.mirror.volumes.objstorage == true volumes: - "objstorage:/srv/softwareheritage/objects:rw,Z" configs: - source: objstorage target: /etc/softwareheritage/config.yml + env_file: + - ./env/common-python.env environment: PORT: "5003" - STATSD_HOST: prometheus-statsd-exporter - STATSD_PORT: 9125 - command: objstorage + command: ["rpc-server", "objstorage"] storage: # the swh-storage backend service; using postgresql (db-storage) as backend image: softwareheritage/base:${SWH_IMAGE_TAG:-latest} deploy: # needed to allow actual and dynammic load balancing endpoint_mode: dnsrr # a real life replicas value better be in the 16 to 64 range # however we recommend keeping 1 in this stack deploy file so that # an upgrade of the base image that comes with a database migration script # is upgraded in a consistent way replicas: 1 configs: - source: storage target: /etc/softwareheritage/config.yml environment: PGHOST: db-storage PGUSER: swh POSTGRES_DB: swh-storage FLAVOR: mirror PORT: "5002" - STATSD_HOST: prometheus-statsd-exporter - STATSD_PORT: 9125 - command: storage - depends_on: - - db-storage + env_file: + - ./env/common-python.env secrets: - source: swh-mirror-db-postgres-password target: postgres-password mode: 0400 + command: ["rpc-server", "storage"] + depends_on: + - db-storage nginx: image: nginx configs: - source: nginx target: /etc/nginx/nginx.conf ports: - "5081:5081/tcp" deploy: mode: global prometheus: image: prom/prometheus depends_on: - prometheus-statsd-exporter command: # Needed for the reverse-proxy - "--web.external-url=/prometheus" - "--config.file=/etc/prometheus/prometheus.yml" configs: - source: prometheus target: /etc/prometheus/prometheus.yml deploy: # we want only one replica of this service in the whole cluster replicas: 1 placement: max_replicas_per_node: 1 constraints: - node.labels.org.softwareheritage.mirror.monitoring == true prometheus-statsd-exporter: image: prom/statsd-exporter command: - "--statsd.mapping-config=/etc/prometheus/statsd-mapping.yml" configs: - source: prometheus-statsd-exporter target: /etc/prometheus/statsd-mapping.yml deploy: # we want only one replica of this service in the whole cluster replicas: 1 placement: max_replicas_per_node: 1 constraints: - node.labels.org.softwareheritage.mirror.monitoring == true grafana: image: grafana/grafana depends_on: - prometheus environment: GF_SERVER_ROOT_URL: http://localhost:5081/grafana configs: - source: grafana-provisioning-datasources-prometheus target: /etc/grafana/provisioning/datasources/prometheus.yaml - source: grafana-provisioning-dashboards-all target: /etc/grafana/provisioning/dashboards/all.yaml - source: grafana-dashboards-backend-stats target: /var/lib/grafana/dashboards/backend-stats.json - source: grafana-dashboards-content-replayer target: /var/lib/grafana/dashboards/content-replayer.json - source: grafana-dashboards-graph-replayer target: /var/lib/grafana/dashboards/graph-replayer.json deploy: # we want only one replica of this service in the whole cluster replicas: 1 placement: max_replicas_per_node: 1 constraints: - node.labels.org.softwareheritage.mirror.monitoring == true +## replayer services + redis: image: redis:6.2.6 deploy: # we want only one replica of this service in the whole cluster replicas: 1 placement: max_replicas_per_node: 1 constraints: - node.labels.org.softwareheritage.mirror.volumes.redis == true command: - redis-server - --save 60 1 - --loglevel warning volumes: - redis:/data graph-replayer: image: softwareheritage/replayer:${SWH_IMAGE_TAG:-latest} deploy: # do not start replayers by default once the remainig of the stack is # running as expected, bump this value; expected real-life values should # be something in the range [16, 64] (staging) or [16, 256] (production) # depending on your hardware capabilities; note that there is no need of # going above the number of partitions on the kafka cluster (so the 64 # and 254 upper limits depending on the execution environment). replicas: 0 + restart_policy: + condition: "none" + env_file: + - ./env/common-python.env environment: - STATSD_HOST: prometheus-statsd-exporter - STATSD_PORT: 9125 - STATSD_TAGS: 'role:content-replayer,hostname:${HOSTNAME}' + STATSD_TAGS: 'role:graph-replayer,hostname:${HOSTNAME}' configs: - source: graph-replayer target: /etc/softwareheritage/config.yml command: - graph-replayer depends_on: - storage - redis content-replayer: image: softwareheritage/replayer:${SWH_IMAGE_TAG:-latest} deploy: # do not start replayers by default; see above replicas: 0 + env_file: + - ./env/common-python.env environment: - STATSD_HOST: prometheus-statsd-exporter - STATSD_PORT: 9125 STATSD_TAGS: 'role:content-replayer,hostname:${HOSTNAME}' configs: - source: content-replayer target: /etc/softwareheritage/config.yml command: - content-replayer depends_on: - objstorage - redis +## secondary services + + amqp: + image: rabbitmq:3.6-management + ports: + - 5072:5672 + +### vault services + + db-vault: + # the database for the vault rpc server + image: postgres:13 + deploy: + # we want only one replica of this service in the whole cluster + replicas: 1 + endpoint_mode: dnsrr + placement: + max_replicas_per_node: 1 + constraints: + - node.labels.org.softwareheritage.mirror.volumes.web-db == true + command: ['-c', 'shared_buffers=4GB', '-c', 'effective_cache_size=4GB', '-c', 'random_page_cost=1.5', '-c', 'max_wal_size=4GB'] + environment: + POSTGRES_PASSWORD_FILE: /run/secrets/postgres-password + POSTGRES_USER: swh + POSTGRES_DB: swh-vault + volumes: + - "vault-db:/var/lib/postgresql/data:rw,Z" + secrets: + - source: swh-mirror-vault-postgres-password + target: postgres-password + uid: '999' + mode: 0400 + + vault: + image: softwareheritage/base:${SWH_IMAGE_TAG:-latest} + deploy: + replicas: 1 + env_file: + - ./env/common-python.env + configs: + - source: vault + target: /etc/softwareheritage/config.yml + environment: + PGHOST: db-vault + PGUSER: swh + POSTGRES_DB: swh-vault + PORT: "5005" + command: ["rpc-server", "vault"] + secrets: + - source: swh-mirror-vault-postgres-password + target: postgres-password + uid: '999' + mode: 0400 + + vault-worker: + image: softwareheritage/base:${SWH_IMAGE_TAG:-latest} + deploy: + replicas: 1 + env_file: + - ./env/common-python.env + - ./env/celery-worker.env + environment: + SWH_WORKER_INSTANCE: vault + configs: + - source: vault-worker + target: /etc/softwareheritage/config.yml + command: + - celery-worker + + # vault do really need someone to talk to via SMTP + mailhog: + image: mailhog/mailhog + +### scheduler services + + db-scheduler: + # the database for the vault rpc server + image: postgres:13 + deploy: + # we want only one replica of this service in the whole cluster + replicas: 1 + endpoint_mode: dnsrr + placement: + max_replicas_per_node: 1 + constraints: + - node.labels.org.softwareheritage.mirror.volumes.web-db == true + command: ['-c', 'shared_buffers=4GB', '-c', 'effective_cache_size=4GB', '-c', 'random_page_cost=1.5', '-c', 'max_wal_size=4GB'] + environment: + POSTGRES_PASSWORD_FILE: /run/secrets/postgres-password + POSTGRES_USER: swh + POSTGRES_DB: swh-scheduler + volumes: + - "scheduler-db:/var/lib/postgresql/data:rw,Z" + secrets: + - source: swh-mirror-scheduler-postgres-password + target: postgres-password + uid: '999' + mode: 0400 + + scheduler: + image: softwareheritage/base:${SWH_IMAGE_TAG:-latest} + deploy: + replicas: 1 + configs: + - source: scheduler + target: /etc/softwareheritage/config.yml + env_file: + - ./env/common-python.env + environment: + PGHOST: db-scheduler + PGUSER: swh + POSTGRES_DB: swh-scheduler + PORT: "5008" + command: ["rpc-server", "scheduler"] + secrets: + - source: swh-mirror-scheduler-postgres-password + target: postgres-password + uid: '999' + mode: 0400 + + scheduler-listener: + image: softwareheritage/base:${SWH_IMAGE_TAG:-latest} + deploy: + replicas: 1 + configs: + - source: scheduler + target: /etc/softwareheritage/config.yml + env_file: + - ./env/common-python.env + environment: + SWH_WORKER_INSTANCE: scheduler + PGHOST: db-scheduler + PGUSER: swh + POSTGRES_DB: swh-scheduler + command: ["scheduler", "start-listener"] + secrets: + - source: swh-mirror-scheduler-postgres-password + target: postgres-password + uid: '999' + mode: 0400 + + scheduler-runner: + image: softwareheritage/base:${SWH_IMAGE_TAG:-latest} + deploy: + replicas: 1 + configs: + - source: scheduler + target: /etc/softwareheritage/config.yml + env_file: + - ./env/common-python.env + environment: + SWH_WORKER_INSTANCE: scheduler + PGHOST: db-scheduler + PGUSER: swh + POSTGRES_DB: swh-scheduler + command: ["scheduler", "start-runner", "--period", "10"] + secrets: + - source: swh-mirror-scheduler-postgres-password + target: postgres-password + uid: '999' + mode: 0400 + + volumes: objstorage: + redis: + scheduler-db: storage-db: + vault-db: web-db: - redis: secrets: swh-mirror-db-postgres-password: external: true swh-mirror-web-postgres-password: external: true + swh-mirror-vault-postgres-password: + external: true + swh-mirror-scheduler-postgres-password: + external: true configs: storage: file: conf/storage.yml objstorage: file: conf/objstorage.yml nginx: file: conf/nginx.conf + scheduler: + file: conf/scheduler.yml + vault: + file: conf/vault.yml + vault-worker: + file: conf/vault-worker.yml web: file: conf/web.yml content-replayer: file: conf/content-replayer.yml graph-replayer: file: conf/graph-replayer.yml prometheus: file: conf/prometheus.yml prometheus-statsd-exporter: file: conf/prometheus-statsd-mapping.yml grafana-provisioning-datasources-prometheus: file: conf/grafana/provisioning/datasources/prometheus.yaml grafana-provisioning-dashboards-all: file: conf/grafana/provisioning/dashboards/all.yaml grafana-dashboards-graph-replayer: file: conf/grafana/dashboards/graph-replayer.json grafana-dashboards-content-replayer: file: conf/grafana/dashboards/content-replayer.json grafana-dashboards-backend-stats: file: conf/grafana/dashboards/backend-stats.json