Related to T4144
Details
Details
- Reviewers
vsellier - Group Reviewers
System administrators - Maniphest Tasks
- T4144: Elastic worker infrastructure
- Commits
- R260:eb5ce179e46d: Deploy graphql application to staging cluster
helm install in dry-run is happy:
$ helm install --values values-swh-application-versions.yaml --values swh/values/default.yaml --values swh/values/staging.yaml swh-test swh --dry-run --debug --set namespace=test 2>&1 install.go:178: [debug] Original chart version: "" install.go:195: [debug] CHART PATH: /home/tony/work/swh/sysadm-environment/swh-charts/swh NAME: swh-test LAST DEPLOYED: Thu Aug 25 18:22:05 2022 NAMESPACE: default STATUS: pending-install REVISION: 1 TEST SUITE: None USER-SUPPLIED VALUES: graphql: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: swh/rpc operator: In values: - "true" enabled: true gunicorn: Workers: 2 threads: 4 timeout: 3600 ingress: enabled: true httpPath: / logLevel: DEBUG replicas: 1 storageUrl: http://webapp.internal.staging.swh.network:5002/ loaders: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: swh/worker operator: In values: - "true" amqp: host: scheduler0.internal.staging.swh.network deployments: git: autoScaling: maxReplicaCount: 3 minReplicacount: 1 queueThreshold: 5 queues: - swh.loader.git.tasks.UpdateGitRepository - swh.loader.git.tasks.LoadDiskGitRepository - swh.loader.git.tasks.UncompressAndLoadDiskGitRepository requestedCpu: 200m requestedMemory: 256Mi sentrySwhPackage: swh.loader.git enabled: true storage: host: storage.internal.staging.swh.network port: 5002 namespace: test sentry: environment: staging statsd_exporter: enabled: true storage_replayer: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: node-role.kubernetes.io/etcd operator: NotIn values: - "true" cassandra: consistencyLevel: LOCAL_QUORUM keySpace: swh journalBrokers: secretName: storage-replayer-broker-secret maxMessagesBytes: "524288000" storageClass: cassandra swh_graphql_image: softwareheritage/graphql swh_graphql_image_version: latest swh_loaders_image: softwareheritage/loaders swh_loaders_image_version: "2022-05-17" swh_storage_replayer_image: softwareheritage/storage-replayer swh_storage_replayer_image_version: "20220819.1" COMPUTED VALUES: graphql: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: swh/rpc operator: In values: - "true" enabled: true gunicorn: Workers: 2 threads: 4 timeout: 3600 ingress: enabled: true httpPath: / logLevel: DEBUG replicas: 1 storageUrl: http://webapp.internal.staging.swh.network:5002/ loaders: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: swh/worker operator: In values: - "true" amqp: host: scheduler0.internal.staging.swh.network deployments: git: autoScaling: maxReplicaCount: 3 minReplicacount: 1 queueThreshold: 5 queues: - swh.loader.git.tasks.UpdateGitRepository - swh.loader.git.tasks.LoadDiskGitRepository - swh.loader.git.tasks.UncompressAndLoadDiskGitRepository requestedCpu: 200m requestedMemory: 256Mi sentrySwhPackage: swh.loader.git enabled: true storage: host: storage.internal.staging.swh.network port: 5002 namespace: test sentry: environment: staging statsd_exporter: enabled: true image: prom/statsd-exporter imageVersion: v0.22.7 storage_replayer: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: node-role.kubernetes.io/etcd operator: NotIn values: - "true" cassandra: consistencyLevel: LOCAL_QUORUM initKeyspace: false keySpace: swh seeds: - seed1 - seed2 deployments: null enabled: false journalBrokers: hosts: - broker1 - broker2 secretName: storage-replayer-broker-secret user: myuser maxMessagesBytes: "524288000" storageClass: cassandra swh_graphql_image: softwareheritage/graphql swh_graphql_image_version: latest swh_loaders_image: softwareheritage/loaders swh_loaders_image_version: "2022-05-17" swh_storage_replayer_image: softwareheritage/storage-replayer swh_storage_replayer_image_version: "20220819.1" HOOKS: MANIFEST: --- # Source: swh/templates/graphql/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: graphql namespace: test data: config.yml: | storage: cls: remote url: http://webapp.internal.staging.swh.network:5002/ debug: yes server-type: wsgi --- # Source: swh/templates/loaders/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: loader-git namespace: test data: config.yml: | storage: cls: pipeline steps: - cls: buffer min_batch_size: content: 1000 content_bytes: 52428800 directory: 1000 directory_entries: 12000 revision: 1000 revision_parents: 2000 revision_bytes: 52428800 release: 1000 release_bytes: 52428800 extid: 1000 - cls: filter - cls: retry - cls: remote url: http://storage-loaders:5002/ celery: task_broker: ##amqp_host## task_queues: - swh.loader.git.tasks.UpdateGitRepository - swh.loader.git.tasks.LoadDiskGitRepository - swh.loader.git.tasks.UncompressAndLoadDiskGitRepository entrypoint.sh: | #!/bin/bash set -e # Create the full config filename cat /etc/softwareheritage/config.yml > $SWH_CONFIG_FILENAME # contains required credentials for git loader (with metadata loader inside) # ignored by the other loaders cat /etc/credentials/metadata-fetcher/data >> $SWH_CONFIG_FILENAME # Install the rabbitmq host information sed -i 's,##amqp_host##,'$RABBITMQ_HOST',g' $SWH_CONFIG_FILENAME echo Starting the swh Celery worker exec python -m celery \ --app=swh.scheduler.celery_backend.config.app \ worker \ --pool=prefork \ --concurrency=${CONCURRENCY} \ --max-tasks-per-child=${MAX_TASKS_PER_CHILD} \ -Ofair --loglevel=${LOGLEVEL} \ --without-gossip \ --without-mingle \ --hostname "${HOSTNAME}" --- # Source: swh/templates/statsd-exporter/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: prometheus-statsd-exporter namespace: test data: config.yml: | defaults: timer_type: histogram buckets: - .005 - .01 - .025 - .05 - .1 - .25 - .5 - .75 - 1 - 2 - 5 - 10 - 15 - 30 - 45 - 60 - 120 - 300 - 600 - 900 - 1800 - 2700 - 3600 - 7200 mappings: - match: "(.*_percent)" name: "${1}" match_type: regex observer_type: histogram histogram_options: buckets: - 0.0 - 0.05 - 0.1 - 0.15 - 0.2 - 0.25 - 0.3 - 0.35 - 0.4 - 0.45 - 0.5 - 0.55 - 0.6 - 0.65 - 0.7 - 0.75 - 0.8 - 0.85 - 0.9 - 0.95 - 1. --- # Source: swh/templates/graphql/service.yaml apiVersion: v1 kind: Service metadata: name: graphql namespace: test spec: type: ClusterIP selector: app: graphql ports: - port: 5013 targetPort: 5013 --- # Source: swh/templates/loaders/services.yaml apiVersion: v1 kind: Service metadata: name: storage-loaders namespace: test spec: type: ExternalName externalName: storage.internal.staging.swh.network --- # Source: swh/templates/statsd-exporter/service.yaml apiVersion: v1 kind: Service metadata: name: prometheus-statsd-exporter namespace: test labels: app: prometheus-statsd-exporter spec: type: ClusterIP selector: app: prometheus-statsd-exporter ports: - name: statsd port: 9125 targetPort: 9125 protocol: UDP - name: http port: 9102 targetPort: 9102 --- # Source: swh/templates/graphql/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: graphql namespace: test labels: app: graphql spec: replicas: 1 selector: matchLabels: app: graphql strategy: type: RollingUpdate rollingUpdate: maxSurge: 1 template: metadata: labels: app: graphql annotations: # Force a rollout upgrade if the configuration changes checksum/config: 5216eb171588b91c17a38ea0f3c304eaa8de49fcc65bd97cd819c321ef17afd0 spec: containers: - name: graphql image: softwareheritage/graphql:latest imagePullPolicy: Always ports: - containerPort: 5013 env: - name: PORT value: "5013" - name: THREADS value: "4" - name: WORKERS value: - name: LOG_LEVEL value: "DEBUG" - name: TIMEOUT value: "3600" volumeMounts: - name: config mountPath: /etc/swh/config.yml subPath: config.yml readOnly: true volumes: - name: config configMap: name: graphql defaultMode: 0444 --- # Source: swh/templates/loaders/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: loader-git namespace: test labels: app: loader-git spec: selector: matchLabels: app: loader-git strategy: type: RollingUpdate rollingUpdate: maxSurge: 1 template: metadata: labels: app: loader-git spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: swh/worker operator: In values: - "true" containers: - name: loaders image: softwareheritage/loaders:2022-05-17 imagePullPolicy: Always command: - /entrypoint.sh resources: requests: memory: 256Mi cpu: 200m limits: memory: "4000Mi" cpu: "1200m" lifecycle: preStop: exec: command: ["kill", "1"] env: - name: STATSD_HOST value: prometheus-statsd-exporter - name: STATSD_PORT value: "9125" - name: CONCURRENCY value: "1" - name: MAX_TASKS_PER_CHILD value: "5" - name: LOGLEVEL value: "INFO" - name: SWH_CONFIG_FILENAME # FIXME: built by entrypoint.sh, determine how to properly declare this value: /tmp/config.yml - name: SWH_SENTRY_ENVIRONMENT value: staging - name: SWH_MAIN_PACKAGE value: swh.loader.git - name: SWH_SENTRY_DSN valueFrom: secretKeyRef: name: loader-git-sentry-secrets key: sentry-dsn # 'name' secret must exist & include key "host" optional: false - name: RABBITMQ_HOST valueFrom: secretKeyRef: name: amqp-access-credentials key: host # 'name' secret must exist & include key "host" optional: false volumeMounts: - name: config mountPath: /etc/softwareheritage/config.yml subPath: config.yml readOnly: true - name: config mountPath: /entrypoint.sh subPath: entrypoint.sh readOnly: true - name: metadata-fetcher-credentials mountPath: /etc/credentials/metadata-fetcher readOnly: true - mountPath: /tmp name: tmp-volume volumes: - name: config configMap: name: loader-git defaultMode: 0777 - name: tmp-volume emptyDir: {} - name: metadata-fetcher-credentials secret: secretName: metadata-fetcher-credentials --- # Source: swh/templates/statsd-exporter/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: prometheus-statsd-exporter namespace: test labels: app: prometheus-statsd-exporter spec: replicas: 1 selector: matchLabels: app: prometheus-statsd-exporter template: metadata: labels: app: prometheus-statsd-exporter spec: containers: - name: prometheus-statsd-exporter image: prom/statsd-exporter:v0.22.7 imagePullPolicy: Always args: - "--statsd.mapping-config=/etc/prometheus/statsd-mapping.yml" ports: - containerPort: 9125 volumeMounts: - name: config mountPath: /etc/prometheus/statsd-mapping.yml subPath: config.yml readOnly: true volumes: - name: config configMap: name: prometheus-statsd-exporter --- # Source: swh/templates/graphql/ingress.yaml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: namespace: test name: graphql-ingress annotations: nginx.ingress.kubernetes.io/rewrite-target: / spec: rules: - http: paths: - path: / pathType: Prefix backend: service: name: graphql port: number: 5013 --- # Source: swh/templates/loaders/keda-autoscaling.yaml apiVersion: keda.sh/v1alpha1 kind: ScaledObject metadata: name: loader-git-operators namespace: test spec: scaleTargetRef: apiVersion: apps/v1 # Optional. Default: apps/v1 kind: Deployment # Optional. Default: Deployment # Mandatory. Must be in same namespace as ScaledObject name: loader-git # envSourceContainerName: {container-name} # Optional. Default: # .spec.template.spec.containers[0] pollingInterval: 30 # Optional. Default: 30 seconds cooldownPeriod: 300 # Optional. Default: 300 seconds idleReplicaCount: 0 # Optional. Must be less than # minReplicaCount minReplicaCount: 0 maxReplicaCount: 3 fallback: # Optional. Section to specify fallback # options failureThreshold: 3 # Mandatory if fallback section is # included replicas: 6 # Mandatory if fallback section is # included advanced: # Optional. Section to specify advanced # options restoreToOriginalReplicaCount: false # Optional. Default: false horizontalPodAutoscalerConfig: # Optional. Section to specify HPA # related options behavior: # Optional. Use to modify HPA's scaling # behavior scaleDown: stabilizationWindowSeconds: 60 # default 300 policies: - type: Percent value: 2 periodSeconds: 15 triggers: - type: rabbitmq authenticationRef: name: amqp-authentication metadata: host: host # Optional. If not specified, it must be done # by using TriggerAuthentication. protocol: auto # Optional. Specifies protocol to use, # either amqp or http, or auto to # autodetect based on the `host` value. # Default value is auto. mode: QueueLength # QueueLength or MessageRate # message backlog or publish/sec. # target per instance value: "5" queueName: swh.loader.git.tasks.UpdateGitRepository vhostName: / # Optional. If not specified, use the vhost in the # `host` connection string. Alternatively, you can # use existing environment variables to read # configuration from: See details in "Parameter # list" section hostFromEnv: RABBITMQ_HOST% - type: rabbitmq authenticationRef: name: amqp-authentication metadata: host: host # Optional. If not specified, it must be done # by using TriggerAuthentication. protocol: auto # Optional. Specifies protocol to use, # either amqp or http, or auto to # autodetect based on the `host` value. # Default value is auto. mode: QueueLength # QueueLength or MessageRate # message backlog or publish/sec. # target per instance value: "5" queueName: swh.loader.git.tasks.LoadDiskGitRepository vhostName: / # Optional. If not specified, use the vhost in the # `host` connection string. Alternatively, you can # use existing environment variables to read # configuration from: See details in "Parameter # list" section hostFromEnv: RABBITMQ_HOST% - type: rabbitmq authenticationRef: name: amqp-authentication metadata: host: host # Optional. If not specified, it must be done # by using TriggerAuthentication. protocol: auto # Optional. Specifies protocol to use, # either amqp or http, or auto to # autodetect based on the `host` value. # Default value is auto. mode: QueueLength # QueueLength or MessageRate # message backlog or publish/sec. # target per instance value: "5" queueName: swh.loader.git.tasks.UncompressAndLoadDiskGitRepository vhostName: / # Optional. If not specified, use the vhost in the # `host` connection string. Alternatively, you can # use existing environment variables to read # configuration from: See details in "Parameter # list" section hostFromEnv: RABBITMQ_HOST% --- # Source: swh/templates/statsd-exporter/servicemonitor.yaml apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: swh-statsd-exporter namespace: test spec: endpoints: - path: /metrics port: http interval: 10s selector: matchLabels: app: prometheus-statsd-exporter namespaceSelector: any: true --- # Source: swh/templates/loaders/keda-autoscaling.yaml apiVersion: keda.sh/v1alpha1 kind: TriggerAuthentication metadata: name: amqp-authentication namespace: test spec: secretTargetRef: # Optional. - parameter: host name: amqp-access-credentials key: host
Diff Detail
Diff Detail
- Repository
- R260 Helm charts for swh packages
- Branch
- master
- Lint
No Linters Available - Unit
No Unit Test Coverage - Build Status
Buildable 31102 Build 48656: arc lint + arc unit
Event Timeline
swh/values/default.yaml | ||
---|---|---|
38 | I've adapted one node to have such label: $ kubectl get nodes --show-labels | grep rpc rancher-node-staging-worker1 Ready worker 25h v1.22.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=rancher-node-staging-worker1,kubernetes.io/os=linux,node-role.kubernetes.io/worker=true,node_type=generic,swh/rpc=true |