diff --git a/kubernetes/29-web-db.yml b/kubernetes/29-web-db.yml index 5fdfb94..fe3ef2e 100644 --- a/kubernetes/29-web-db.yml +++ b/kubernetes/29-web-db.yml @@ -1,141 +1,141 @@ --- apiVersion: v1 kind: PersistentVolume metadata: name: web-db-pv spec: capacity: storage: 10Gi volumeMode: Filesystem accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Delete storageClassName: web-db local: path: /srv/softwareheritage-kube/dev/web-db nodeAffinity: required: nodeSelectorTerms: - matchExpressions: # TODO adapt for your needs - key: kubernetes.io/os operator: In values: - linux --- kind: PersistentVolumeClaim apiVersion: v1 metadata: name: web-db-pvc spec: accessModes: - ReadWriteOnce storageClassName: web-db resources: requests: storage: 10Gi --- ## TODO Change this to your real postgresql password apiVersion: v1 kind: Secret metadata: name: web-db type: Opaque stringData: POSTGRES_PASSWORD: swh --- apiVersion: v1 kind: ConfigMap metadata: name: web-db data: # property-like keys; each key maps to a simple value POSTGRES_USER: swh POSTGRES_DB: swh-web --- apiVersion: apps/v1 kind: Deployment metadata: name: web-db labels: app: web-db spec: replicas: 1 selector: matchLabels: app: web-db template: metadata: labels: app: web-db spec: containers: - name: web-db image: postgres:13.0 imagePullPolicy: Always ports: - containerPort: 5432 args: - "-c" - "shared_buffers=128MB" - "-c" - "effective_cache_size=128MB" - "-c" - "random_page_cost=1.5" - "-c" - "max_wal_size=128MB" env: - name: POSTGRES_USER valueFrom: configMapKeyRef: name: web-db key: POSTGRES_USER - name: POSTGRES_PASSWORD valueFrom: secretKeyRef: name: web-db key: POSTGRES_PASSWORD - name: POSTGRES_DB valueFrom: configMapKeyRef: name: web-db key: POSTGRES_DB volumeMounts: - mountPath: "/var/lib/postgresql/data" name: web-db-pvc resources: requests: - memory: "128Mi" - cpu: "50m" + memory: "50Mi" + cpu: "10m" limits: - memory: "256Mi" - cpu: "200m" + memory: "100Mi" + cpu: "50m" volumes: - name: web-db-pvc persistentVolumeClaim: claimName: web-db-pvc --- apiVersion: v1 kind: Service metadata: name: web-db spec: type: ClusterIP selector: app: web-db ports: - port: 5432 targetPort: 5432 --- apiVersion: v1 kind: Service metadata: name: web-db-dev spec: type: NodePort selector: app: web-db ports: - port: 5437 targetPort: 5432 # internal port of the service diff --git a/kubernetes/45-listers.yml b/kubernetes/45-listers.yml index 3380257..374d169 100644 --- a/kubernetes/45-listers.yml +++ b/kubernetes/45-listers.yml @@ -1,137 +1,137 @@ --- apiVersion: v1 kind: ConfigMap metadata: name: listers data: config.yml: | scheduler: cls: remote url: http://scheduler:5008/ celery: task_broker: amqp://guest:guest@amqp// task_queues: - swh.lister.bitbucket.tasks.FullBitBucketRelister - swh.lister.bitbucket.tasks.IncrementalBitBucketLister - swh.lister.bitbucket.tasks.RangeBitBucketLister - swh.lister.cgit.tasks.CGitListerTask - swh.lister.cran.tasks.CRANListerTask - swh.lister.debian.tasks.DebianListerTask - swh.lister.gitea.tasks.FullGiteaRelister - swh.lister.gitea.tasks.IncrementalGiteaLister - swh.lister.gitea.tasks.RangeGiteaLister - swh.lister.github.tasks.FullGitHubRelister - swh.lister.github.tasks.IncrementalGitHubLister - swh.lister.github.tasks.RangeGitHubLister - swh.lister.gitlab.tasks.FullGitLabRelister - swh.lister.gitlab.tasks.IncrementalGitLabLister - swh.lister.gitlab.tasks.RangeGitLabLister - swh.lister.gnu.tasks.GNUListerTask - swh.lister.npm.tasks.NpmIncrementalListerTask - swh.lister.npm.tasks.NpmListerTask - swh.lister.launchpad.tasks.IncrementalLaunchpadLister - swh.lister.launchpad.tasks.FullLaunchpadLister - swh.lister.packagist.tasks.PackagistListerTask - swh.lister.phabricator.tasks.FullPhabricatorLister - swh.lister.phabricator.tasks.IncrementalPhabricatorLister - swh.lister.pypi.tasks.PyPIListerTask entrypoint-init.sh: | #!/bin/bash set -e # echo Waiting for RabbitMQ to start wait-for-it amqp:5672 -s --timeout=0 # echo Register task types in scheduler database wait-for-it scheduler:5008 -s --timeout=0 swh scheduler --url http://scheduler:5008 task-type register entrypoint.sh: | #!/bin/bash set -e echo Starting the swh listers exec python -m celery \ --app=swh.scheduler.celery_backend.config.app \ worker \ --pool=prefork --events \ --concurrency=${CONCURRENCY} \ --max-tasks-per-child=${MAX_TASKS_PER_CHILD} \ -Ofair --loglevel=${LOGLEVEL} \ --hostname "${HOSTNAME}" --- apiVersion: apps/v1 kind: Deployment metadata: name: listers labels: app: listers spec: replicas: 1 selector: matchLabels: app: listers strategy: type: RollingUpdate rollingUpdate: maxSurge: 1 template: metadata: labels: app: listers spec: initContainers: - name: listers-init image: swh/listers:latest imagePullPolicy: Always command: - /entrypoint.sh volumeMounts: - name: config mountPath: /etc/softwareheritage/config.yml subPath: config.yml readOnly: true - name: config mountPath: /entrypoint.sh subPath: entrypoint-init.sh readOnly: true containers: - name: listers image: swh/listers:latest imagePullPolicy: Always command: - /entrypoint.sh resources: requests: - memory: "128Mi" - cpu: "200m" + memory: "100Mi" + cpu: "20m" limits: - memory: "256Mi" - cpu: "256m" + memory: "150Mi" + cpu: "100m" env: - name: CONCURRENCY value: "1" - name: MAX_TASKS_PER_CHILD value: "5" - name: LOGLEVEL value: "INFO" - name: SWH_CONFIG_FILENAME value: /etc/softwareheritage/config.yml volumeMounts: - name: config mountPath: /etc/softwareheritage/config.yml subPath: config.yml readOnly: true - name: config mountPath: /entrypoint.sh subPath: entrypoint.sh readOnly: true volumes: - name: config configMap: name: listers defaultMode: 0777 diff --git a/kubernetes/55-search.yml b/kubernetes/55-search.yml index d88f396..b8ed8d9 100644 --- a/kubernetes/55-search.yml +++ b/kubernetes/55-search.yml @@ -1,148 +1,148 @@ --- apiVersion: v1 kind: ConfigMap metadata: name: search data: config.yml: | search: cls: elasticsearch hosts: - elasticsearch:9200 entrypoint-init.sh: | #!/bin/bash set -e wait-for-it elasticsearch:9200 -s --timeout=0 echo "Waiting for ElasticSearch cluster to be up" cat << EOF | python3 import elasticsearch es = elasticsearch.Elasticsearch(['elasticsearch:9200']) es.cluster.health(wait_for_status='yellow') EOF echo "Initialize elasticsearch index" swh search -C $SWH_CONFIG_FILENAME initialize entrypoint.sh: | #!/bin/bash set -e exec gunicorn --bind 0.0.0.0:5010 \ --reload \ --threads 4 \ --workers 2 \ --log-level DEBUG \ --timeout 3600 \ --config 'python:swh.core.api.gunicorn_config' \ 'swh.search.api.server:make_app_from_configfile()' ;; --- apiVersion: apps/v1 kind: Deployment metadata: name: search labels: app: search spec: replicas: 1 selector: matchLabels: app: search strategy: type: RollingUpdate rollingUpdate: maxSurge: 1 template: metadata: labels: app: search spec: initContainers: - name: search-init image: swh/search:latest imagePullPolicy: Always command: - /entrypoint.sh env: - name: SWH_CONFIG_FILENAME value: /etc/softwareheritage/config.yml volumeMounts: - name: config mountPath: /etc/softwareheritage/config.yml subPath: config.yml readOnly: true - name: config mountPath: /entrypoint.sh subPath: entrypoint-init.sh readOnly: true containers: - name: search image: swh/search:latest imagePullPolicy: Always command: - /entrypoint.sh ports: - containerPort: 5010 env: - name: PORT value: "5010" - name: STATSD_HOST value: "prometheus-statsd-exporter" - name: STATSD_PORT value: "9125" - name: SWH_CONFIG_FILENAME value: /etc/softwareheritage/config.yml volumeMounts: - name: config mountPath: /etc/softwareheritage/config.yml subPath: config.yml readOnly: true - name: config mountPath: /entrypoint.sh subPath: entrypoint.sh readOnly: true resources: requests: - memory: "512Mi" - cpu: "250m" + memory: "100Mi" + cpu: "50m" limits: - memory: "1024Mi" - cpu: "500m" + memory: "200Mi" + cpu: "100m" volumes: - name: config configMap: name: search defaultMode: 0777 --- apiVersion: v1 kind: Service metadata: name: search spec: type: ClusterIP selector: app: search ports: - port: 5010 targetPort: 5010 --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: search spec: rules: - host: search.default http: paths: - path: / pathType: Prefix backend: service: name: search port: number: 5010