diff --git a/sysadmin/T3592-elastic-workers/worker/README.md b/sysadmin/T3592-elastic-workers/worker/README.md
index d2b99cc..b66d765 100644
--- a/sysadmin/T3592-elastic-workers/worker/README.md
+++ b/sysadmin/T3592-elastic-workers/worker/README.md
@@ -1,136 +1,135 @@
 # Goal
 
 - autoscaling workers depending on repositories to load and allocated resources.
 
 # keda
 
 This uses KEDA - K(ubernetes) E(vents)-D(riven) A(utoscaling):
 ```
 $ helm repo add kedacore https://kedacore.github.io/charts
 $ helm repo update
 swhworker@poc-rancher:~$ kubectl create namespace keda
 namespace/keda created
 swhworker@poc-rancher:~$ helm install keda kedacore/keda --namespace keda
 NAME: keda
 LAST DEPLOYED: Fri Oct  8 09:48:40 2021
 NAMESPACE: keda
 STATUS: deployed
 REVISION: 1
 TEST SUITE: None
 ```
 source: https://keda.sh/docs/2.4/deploy/
 
 # helm
 
 We use helm to ease the cluster application management.
 
 # Install
 
 Install the worker declaration from this directory in the cluster
 ```
 $ export KUBECONFIG=export KUBECONFIG=staging-workers.yaml
 $ TYPE=git; REL=workers-$TYPE; \
   helm install -f ./loader-$TYPE.staging.values.yaml $REL ../worker
 $ TYPE=pypi; REL=workers-$TYPE; \
   helm install -f ./loader-$TYPE.staging.values.yaml $REL ../worker
 ```
 
 Where:
 ```
 $ cat ../loader-git.staging.values.yaml
 # Default values for worker.
 # This is a YAML-formatted file.
 # Declare variables to be passed into your templates.
 
 amqp:
   host: scheduler0.internal.staging.swh.network
   queue_threshold: 10  # spawn worker per increment of `value` messages
   queues:
       - swh.loader.git.tasks.UpdateGitRepository
       - swh.loader.git.tasks.LoadDiskGitRepository
       - swh.loader.git.tasks.UncompressAndLoadDiskGitRepository
 
 storage:
   host: storage1.internal.staging.swh.network
 
 loader:
   name: loaders
   type: git
 ```
 
 # List
 
 List currently deployed applications:
 
 ```
 $ helm list
 helm list
 NAME            NAMESPACE       REVISION        UPDATED                                         STATUS          CHART           APP VERSION
 workers-bzr     default         1               2022-04-29 12:59:32.111950055 +0200 CEST        deployed        worker-0.1.0    1.16.0
 workers-git     default         4               2022-04-29 12:50:12.322826487 +0200 CEST        deployed        worker-0.1.0    1.16.0
 workers-pypi    default         1               2022-04-29 12:51:22.506259018 +0200 CEST        deployed        worker-0.1.0    1.16.0
 ```
 
 # Upgrade
 
 When adapting the worker definition, you can apply the changes by upgrading the
 deployed application:
 
 ```
 $ TYPE=git; REL=workers-$TYPE; \
   helm upgrade -f ./loader-$TYPE.staging.values.yaml $REL ../worker
 ```
 
 # Secrets
 
 The current work requires credentials (installed as secret within the cluster):
 - metadata fetcher credentials `metadata-fetcher-credentials`
 - amqp credentials ``
 
 More details describing the secrets:
 ```
 $ kubectl describe secret metadata-fetcher-credentials
 ```
 
 Installed through:
 
 ```
 $ TYPE=git
 $ kubectl -f $SECRET_FILE apply
 # for secret file in {
 # loader-$TYPE-metadata-fetcher-credentials.yaml
 # loader-$TYPE-sentry-secrets.yaml
 # amqp-access-credentials.yaml
 # }
 $ cat loader-$TYPE-metadata-fetcher-credentials.yaml
 apiVersion: v1
 kind: Secret
 metadata:
   name: metadata-fetcher-credentials
 type: Opaque
 stringData:
   data: |
     metadata_fetcher_credentials:
       github:
         github:
         - username: <redacted>
           password: <redacted>
         - ...
 $ cat amqp-access-credentials.yaml
 apiVersion: v1
 kind: Secret
 metadata:
   name: amqp-access-credentials
 type: Opaque
-data:
-  username: <base64-encoded-pass>  # output of: echo -n 'redacted-pass' | base64
-  password: <base64-encoded-pass>
+stringData:
+  host: amqp://<redacted>:<redacted>@scheduler0.internal.staging.swh.network:5672/%2f
 $ cat loaders-$TYPE-sentry-secrets.yaml
 apiVersion: v1
 kind: Secret
 metadata:
   name: loaders-$TYPE-sentry-secrets
 type: Opaque
 stringData:
   sentry-dsn: https://<redacted>@sentry.softwareheritage.org/8
 ```
diff --git a/sysadmin/T3592-elastic-workers/worker/templates/autoscale.yaml b/sysadmin/T3592-elastic-workers/worker/templates/autoscale.yaml
index 6aba442..c53530a 100644
--- a/sysadmin/T3592-elastic-workers/worker/templates/autoscale.yaml
+++ b/sysadmin/T3592-elastic-workers/worker/templates/autoscale.yaml
@@ -1,59 +1,73 @@
+---
+apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+  name: amqp-authentication
+  namespace: default # must be same namespace as the ScaledObject
+spec:
+  secretTargetRef: # Optional.
+  - parameter: host
+    name: amqp-access-credentials
+    key: host
+
 ---
 apiVersion: keda.sh/v1alpha1
 kind: ScaledObject
 metadata:
   name: loaders-{{ .Values.loader.name }}-{{ .Values.loader.type }}-operators
 spec:
   scaleTargetRef:
     apiVersion:    apps/v1     # Optional. Default: apps/v1
     kind:          Deployment  # Optional. Default: Deployment
     # Mandatory. Must be in same namespace as ScaledObject
     name:          {{ .Values.loader.name }}-{{ .Values.loader.type }}
     # envSourceContainerName: {container-name} # Optional. Default:
                                                # .spec.template.spec.containers[0]
   pollingInterval:  30                         # Optional. Default: 30 seconds
   cooldownPeriod:   300                        # Optional. Default: 300 seconds
   idleReplicaCount: 0                          # Optional. Must be less than
                                                # minReplicaCount
   minReplicaCount:  {{ .Values.swh.loader.replicas.min }} # Optional. Default: 0
   maxReplicaCount:  {{ .Values.swh.loader.replicas.max }} # Optional. Default: 100
   fallback:                                    # Optional. Section to specify fallback
                                                # options
     failureThreshold: 3                        # Mandatory if fallback section is
                                                # included
     replicas: 6                                # Mandatory if fallback section is
                                                # included
   advanced:                                    # Optional. Section to specify advanced
                                                # options
     restoreToOriginalReplicaCount: false       # Optional. Default: false
     horizontalPodAutoscalerConfig:             # Optional. Section to specify HPA
                                                # related options
       behavior:                                # Optional. Use to modify HPA's scaling
                                                # behavior
         scaleDown:
           stabilizationWindowSeconds: 60       # default 300
           policies:
           - type: Percent
             value: 2
             periodSeconds: 15
   triggers:
     {{- range .Values.amqp.queues }}
   - type: rabbitmq
+    authenticationRef:
+      name: amqp-authentication
     metadata:
-      host: amqp://{{ $.Values.amqp.username }}:{{ $.Values.amqp.password }}@{{ $.Values.amqp.host }}//
+      host: host
                                      # Optional. If not specified, it must be done
                                      # by using TriggerAuthentication.
       protocol: auto                 # Optional. Specifies protocol to use,
                                      # either amqp or http, or auto to
                                      # autodetect based on the `host` value.
                                      # Default value is auto.
       mode: QueueLength              # QueueLength or MessageRate
       value: {{ $.Values.amqp.queue_threshold | quote }} # message backlog or publish/sec.
                                                  # target per instance
       queueName: {{ . }}
       vhostName: /                   # Optional. If not specified, use the vhost in the
                                      # `host` connection string. Alternatively, you can
                                      # use existing environment variables to read
                                      # configuration from: See details in "Parameter
                                      # list" section hostFromEnv: RABBITMQ_HOST%
     {{- end }}
diff --git a/sysadmin/T3592-elastic-workers/worker/templates/config-map.yaml b/sysadmin/T3592-elastic-workers/worker/templates/config-map.yaml
index bb18004..6030ca8 100644
--- a/sysadmin/T3592-elastic-workers/worker/templates/config-map.yaml
+++ b/sysadmin/T3592-elastic-workers/worker/templates/config-map.yaml
@@ -1,60 +1,56 @@
 ---
 apiVersion: v1
 kind: ConfigMap
 metadata:
   name: {{ .Values.loader.name }}-{{ .Values.loader.type }}
 data:
   config.yml: |
     storage:
       cls: pipeline
       steps:
       - cls: buffer
         min_batch_size:
           content: 1000
-          content_bytes: 52428800  # 50 MB
+          content_bytes: 52428800
           directory: 1000
           directory_entries: 12000
           revision: 1000
           revision_parents: 2000
           revision_bytes: 52428800
           release: 1000
           release_bytes: 52428800
           extid: 1000
       - cls: filter
       - cls: retry
       - cls: remote
         url: http://{{ .Values.storage.host }}:5002/
 
     celery:
-      task_broker: amqp://##username##:##password##@{{ .Values.amqp.host }}//
+      task_broker: ##amqp_host##
       task_queues:
     {{- range .Values.amqp.queues }}
       - {{ . }}
     {{- end }}
   entrypoint.sh: |
     #!/bin/bash
 
     set -e
 
     # Create the full config filename
     cat /etc/softwareheritage/config.yml > $SWH_CONFIG_FILENAME
     # contains required credentials for git loader (with metadata loader inside)
     # ignored by the other loaders
     cat /etc/credentials/metadata-fetcher/data >> $SWH_CONFIG_FILENAME
 
-    # Work around configuration setup
-    amqp_username=$(cat /etc/credentials/amqp/username)
-    amqp_password=$(cat /etc/credentials/amqp/password)
-
-    sed -i 's/##username##/'$amqp_username'/g' $SWH_CONFIG_FILENAME
-    sed -i 's/##password##/'$amqp_password'/g' $SWH_CONFIG_FILENAME
+    # Install the rabbitmq host information
+    sed -i 's,##amqp_host##,'$RABBITMQ_HOST',g' $SWH_CONFIG_FILENAME
 
     echo Starting the swh Celery worker
     exec python -m celery \
                 --app=swh.scheduler.celery_backend.config.app \
                 worker \
                 --pool=prefork \
                 --concurrency=${CONCURRENCY} \
                 --max-tasks-per-child=${MAX_TASKS_PER_CHILD} \
                 -Ofair --loglevel=${LOGLEVEL} \
                 --hostname "${HOSTNAME}"
diff --git a/sysadmin/T3592-elastic-workers/worker/templates/deployment.yaml b/sysadmin/T3592-elastic-workers/worker/templates/deployment.yaml
index b830ab2..0ad690f 100644
--- a/sysadmin/T3592-elastic-workers/worker/templates/deployment.yaml
+++ b/sysadmin/T3592-elastic-workers/worker/templates/deployment.yaml
@@ -1,89 +1,90 @@
 ---
 apiVersion: apps/v1
 kind: Deployment
 metadata:
   name: {{ .Values.loader.name }}-{{ .Values.loader.type }}
   labels:
     app: {{ .Values.loader.name }}-{{ .Values.loader.type }}
 spec:
   replicas: {{ .Values.swh.loader.replicas.min }}
   selector:
     matchLabels:
       app: {{ .Values.loader.name }}-{{ .Values.loader.type }}
   strategy:
     type: RollingUpdate
     rollingUpdate:
       maxSurge: 1
   template:
     metadata:
       labels:
         app: {{ .Values.loader.name }}-{{ .Values.loader.type }}
     spec:
       containers:
       - name: loaders
         image: {{ .Values.swh.loader.image }}:{{ .Values.swh.loader.version }}
         imagePullPolicy: Always
         command:
           - /entrypoint.sh
         resources:
           requests:
             memory: "256Mi"
             cpu: "200m"
           limits:
             memory: "4000Mi"
             cpu: "1200m"
         lifecycle:
           preStop:
            exec:
              command: ["kill", "1"]
         env:
         - name: CONCURRENCY
           value: "1"
         - name: MAX_TASKS_PER_CHILD
           value: "5"
         - name: LOGLEVEL
           value: "INFO"
         - name: SWH_CONFIG_FILENAME
           # FIXME: built by entrypoint.sh, determine how to properly declare this
           value: /tmp/config.yml
         - name: SWH_SENTRY_ENVIRONMENT
           value: {{ .Values.sentry.environment }}
         - name: SWH_MAIN_PACKAGE
           value: {{ .Values.sentry.swhpackage }}
         - name: SWH_SENTRY_DSN
           valueFrom:
             secretKeyRef:
               name: {{ .Values.loader.name }}-{{ .Values.loader.type }}-sentry-secrets
               key: sentry-dsn
-              # 'name' secret must exist & include key "sentry-dsn"
+              # 'name' secret must exist & include key "host"
+              optional: false
+        - name: RABBITMQ_HOST
+          valueFrom:
+            secretKeyRef:
+              name: amqp-access-credentials
+              key: host
+              # 'name' secret must exist & include key "host"
               optional: false
         volumeMounts:
           - name: config
             mountPath: /etc/softwareheritage/config.yml
             subPath: config.yml
             readOnly: true
           - name: config
             mountPath: /entrypoint.sh
             subPath: entrypoint.sh
             readOnly: true
           - name: metadata-fetcher-credentials
             mountPath: /etc/credentials/metadata-fetcher
             readOnly: true
-          - name: amqp-access-credentials
-            mountPath: /etc/credentials/amqp
-            readOnly: true
           - mountPath: /tmp
             name: tmp-volume
       volumes:
         - name: config
           configMap:
             name: {{ .Values.loader.name }}-{{ .Values.loader.type }}
             defaultMode: 0777
         - name: tmp-volume
           emptyDir: {}
         - name: metadata-fetcher-credentials
           secret:
             secretName: metadata-fetcher-credentials
-        - name: amqp-access-credentials
-          secret:
-            secretName: amqp-access-credentials