Page Menu
Home
Software Heritage
Search
Configure Global Search
Log In
Files
F9341831
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
9 KB
Subscribers
None
View Options
diff --git a/mirror.yml b/mirror.yml
index 8391901..eb3119b 100644
--- a/mirror.yml
+++ b/mirror.yml
@@ -1,299 +1,302 @@
version: "3.8"
services:
memcache:
# used by the web app
image: memcached
deploy:
replicas: 1
db-storage:
# the main storage database
image: postgres:13
deploy:
# we want only one replica of this service in the whole cluster
replicas: 1
+ # possible workaround to prevent dropped idle cnx (making pg pool fail to work after a while)
+ endpoint_mode: dnsrr
placement:
max_replicas_per_node: 1
constraints:
- node.labels.org.softwareheritage.mirror.volumes.storage-db == true
command: ['-c', 'shared_buffers=4GB', '-c', 'effective_cache_size=4GB', '-c', 'random_page_cost=1.5', '-c', 'max_wal_size=4GB']
environment:
POSTGRES_PASSWORD_FILE: /run/secrets/postgres-password
POSTGRES_USER: swh
POSTGRES_DB:
# unset POSTGRES_DB: we're handling db creation ourselves in the backend
volumes:
- "storage-db:/var/lib/postgresql/data:rw,Z"
secrets:
- source: swh-mirror-db-postgres-password
target: postgres-password
uid: '999'
mode: 0400
db-web:
# the database for the web application
image: postgres:13
deploy:
# we want only one replica of this service in the whole cluster
replicas: 1
+ endpoint_mode: dnsrr
placement:
max_replicas_per_node: 1
constraints:
- node.labels.org.softwareheritage.mirror.volumes.web-db == true
command: ['-c', 'shared_buffers=4GB', '-c', 'effective_cache_size=4GB', '-c', 'random_page_cost=1.5', '-c', 'max_wal_size=4GB']
environment:
POSTGRES_PASSWORD_FILE: /run/secrets/postgres-password
POSTGRES_USER: swh
POSTGRES_DB: swh-web
volumes:
- "web-db:/var/lib/postgresql/data:rw,Z"
secrets:
- source: swh-mirror-web-postgres-password
target: postgres-password
uid: '999'
mode: 0400
web:
# the web app; serves both the web navigation interface and the public web API
image: softwareheritage/web:${SWH_IMAGE_TAG:-latest}
configs:
- source: web
target: /etc/softwareheritage/config.yml
command: serve
environment:
PORT: "5004"
PGHOST: db-web
PGUSER: swh
POSTGRES_DB: swh-web
depends_on:
- db-web
- memcache
secrets:
- source: swh-mirror-web-postgres-password
target: postgres-password
mode: 0400
objstorage:
# the swh-objstorage backend service; this example configuration uses a simple
# filesystem-based pathslicing implementation of the swh-objstorage: see
# https://docs.softwareheritage.org/devel/apidoc/swh.objstorage.backends.pathslicing.html
image: softwareheritage/base:${SWH_IMAGE_TAG:-latest}
deploy:
# needed to allow actual and dynamic load balancing
endpoint_mode: dnsrr
# a real life replicas value better be in the 16 to 64 range
replicas: 1
placement:
# note: if using a local volume, you need to pin the objstorage
# instances on the node hosting the volume, eg. the manager, otherwise,
# if using a remote/distributed objstorage backend (seaweedfs, cloud,
# etc.) you want to remove this placement constraint
constraints:
- node.labels.org.softwareheritage.mirror.volumes.objstorage == true
volumes:
- "objstorage:/srv/softwareheritage/objects:rw,Z"
configs:
- source: objstorage
target: /etc/softwareheritage/config.yml
environment:
PORT: "5003"
STATSD_HOST: prometheus-statsd-exporter
STATSD_PORT: 9125
command: objstorage
storage:
# the swh-storage backend service; using postgresql (db-storage) as backend
image: softwareheritage/base:${SWH_IMAGE_TAG:-latest}
deploy:
# needed to allow actual and dynammic load balancing
endpoint_mode: dnsrr
# a real life replicas value better be in the 16 to 64 range
# however we recommend keeping 1 in this stack deploy file so that
# an upgrade of the base image that comes with a database migration script
# is upgraded in a consistent way
replicas: 1
configs:
- source: storage
target: /etc/softwareheritage/config.yml
environment:
PGHOST: db-storage
PGUSER: swh
POSTGRES_DB: swh-storage
FLAVOR: mirror
PORT: "5002"
STATSD_HOST: prometheus-statsd-exporter
STATSD_PORT: 9125
command: storage
depends_on:
- db-storage
secrets:
- source: swh-mirror-db-postgres-password
target: postgres-password
mode: 0400
nginx:
image: nginx
configs:
- source: nginx
target: /etc/nginx/nginx.conf
ports:
- "5081:5081/tcp"
deploy:
mode: global
prometheus:
image: prom/prometheus
depends_on:
- prometheus-statsd-exporter
command:
# Needed for the reverse-proxy
- "--web.external-url=/prometheus"
- "--config.file=/etc/prometheus/prometheus.yml"
configs:
- source: prometheus
target: /etc/prometheus/prometheus.yml
deploy:
# we want only one replica of this service in the whole cluster
replicas: 1
placement:
max_replicas_per_node: 1
constraints:
- node.labels.org.softwareheritage.mirror.monitoring == true
prometheus-statsd-exporter:
image: prom/statsd-exporter
command:
- "--statsd.mapping-config=/etc/prometheus/statsd-mapping.yml"
configs:
- source: prometheus-statsd-exporter
target: /etc/prometheus/statsd-mapping.yml
deploy:
# we want only one replica of this service in the whole cluster
replicas: 1
placement:
max_replicas_per_node: 1
constraints:
- node.labels.org.softwareheritage.mirror.monitoring == true
grafana:
image: grafana/grafana
depends_on:
- prometheus
environment:
GF_SERVER_ROOT_URL: http://localhost:5081/grafana
configs:
- source: grafana-provisioning-datasources-prometheus
target: /etc/grafana/provisioning/datasources/prometheus.yaml
- source: grafana-provisioning-dashboards-all
target: /etc/grafana/provisioning/dashboards/all.yaml
- source: grafana-dashboards-backend-stats
target: /var/lib/grafana/dashboards/backend-stats.json
- source: grafana-dashboards-content-replayer
target: /var/lib/grafana/dashboards/content-replayer.json
- source: grafana-dashboards-graph-replayer
target: /var/lib/grafana/dashboards/graph-replayer.json
deploy:
# we want only one replica of this service in the whole cluster
replicas: 1
placement:
max_replicas_per_node: 1
constraints:
- node.labels.org.softwareheritage.mirror.monitoring == true
redis:
image: redis:6.2.6
deploy:
# we want only one replica of this service in the whole cluster
replicas: 1
placement:
max_replicas_per_node: 1
constraints:
- node.labels.org.softwareheritage.mirror.volumes.redis == true
command:
- redis-server
- --save 60 1
- --loglevel warning
volumes:
- redis:/data
graph-replayer:
image: softwareheritage/replayer:${SWH_IMAGE_TAG:-latest}
deploy:
# do not start replayers by default once the remainig of the stack is
# running as expected, bump this value; expected real-life values should
# be something in the range [16, 64] (staging) or [16, 256] (production)
# depending on your hardware capabilities; note that there is no need of
# going above the number of partitions on the kafka cluster (so the 64
# and 254 upper limits depending on the execution environment).
replicas: 0
environment:
STATSD_HOST: prometheus-statsd-exporter
STATSD_PORT: 9125
STATSD_TAGS: 'role:content-replayer,hostname:${HOSTNAME}'
configs:
- source: graph-replayer
target: /etc/softwareheritage/config.yml
command:
- graph-replayer
depends_on:
- storage
- redis
content-replayer:
image: softwareheritage/replayer:${SWH_IMAGE_TAG:-latest}
deploy:
# do not start replayers by default; see above
replicas: 0
environment:
STATSD_HOST: prometheus-statsd-exporter
STATSD_PORT: 9125
STATSD_TAGS: 'role:content-replayer,hostname:${HOSTNAME}'
configs:
- source: content-replayer
target: /etc/softwareheritage/config.yml
command:
- content-replayer
depends_on:
- objstorage
- redis
volumes:
objstorage:
storage-db:
web-db:
redis:
secrets:
swh-mirror-db-postgres-password:
external: true
swh-mirror-web-postgres-password:
external: true
configs:
storage:
file: conf/storage.yml
objstorage:
file: conf/objstorage.yml
nginx:
file: conf/nginx.conf
web:
file: conf/web.yml
content-replayer:
file: conf/content-replayer.yml
graph-replayer:
file: conf/graph-replayer.yml
prometheus:
file: conf/prometheus.yml
prometheus-statsd-exporter:
file: conf/prometheus-statsd-mapping.yml
grafana-provisioning-datasources-prometheus:
file: conf/grafana/provisioning/datasources/prometheus.yaml
grafana-provisioning-dashboards-all:
file: conf/grafana/provisioning/dashboards/all.yaml
grafana-dashboards-graph-replayer:
file: conf/grafana/dashboards/graph-replayer.json
grafana-dashboards-content-replayer:
file: conf/grafana/dashboards/content-replayer.json
grafana-dashboards-backend-stats:
file: conf/grafana/dashboards/backend-stats.json
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Fri, Jul 4, 12:19 PM (2 w, 3 d ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3292074
Attached To
rCDFP Deployment tools for hosting a mirror
Event Timeline
Log In to Comment