diff --git a/swh/values/cassandra-replay.yaml b/swh/values/cassandra-replay.yaml index 4630ae6..1cefb8d 100644 --- a/swh/values/cassandra-replay.yaml +++ b/swh/values/cassandra-replay.yaml @@ -1,149 +1,149 @@ namespace: cassandra-replay sentry: environment: production storage_replayer: enabled: true affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: "swh/replayer" operator: In values: - "true" journalBrokers: hosts: - kafka1.internal.softwareheritage.org:9094 - kafka2.internal.softwareheritage.org:9094 - kafka3.internal.softwareheritage.org:9094 - kafka4.internal.softwareheritage.org:9094 user: swh-cassandra-replayer-prod cassandra: initKeyspace: true # only to bootstrap a new cassandra database seeds: - cassandra01.internal.softwareheritage.org - cassandra02.internal.softwareheritage.org - cassandra03.internal.softwareheritage.org - cassandra04.internal.softwareheritage.org - cassandra05.internal.softwareheritage.org - cassandra06.internal.softwareheritage.org deployments: # content: # objects: # - content # requestedCpu: 425m # requestedMemory: 200Mi # autoScaling: # maxReplicaCount: 0 # minReplicaCount: 0 - directory: - objects: - - directory - batchSize: 250 - requestedCpu: 350m - requestedMemory: 250Mi - autoScaling: - maxReplicaCount: 20 - specific_options: - directory_entries_insert_algo: batch + # directory: + # objects: + # - directory + # batchSize: 250 + # requestedCpu: 350m + # requestedMemory: 250Mi + # autoScaling: + # maxReplicaCount: 20 + # specific_options: + # directory_entries_insert_algo: batch extid: objects: - extid batchSize: 1000 # Full replay #requestedCpu: 400m requestedMemory: 200Mi #Follow up consumption requestedCpu: 50m autoScaling: maxReplicaCount: 5 metadata: objects: - metadata_authority - metadata_fetcher # follow up consumption requestedCpu: 50m requestedMemory: 100Mi autoScaling: maxReplicaCount: 5 raw-extrinsic-metadata: objects: - raw_extrinsic_metadata batchSize: 250 # Full replay #requestedCpu: 400m requestedMemory: 200Mi # follow up consumption requestedCpu: 50m autoScaling: maxReplicaCount: 5 origin: objects: - origin batchSize: 1000 # Full replay #requestedCpu: 400m requestedMemory: 200Mi #Follow up consumption requestedCpu: 50m autoScaling: maxReplicaCount: 5 origin-visit: objects: - origin_visit batchSize: 1000 requestedCpu: 400m requestedMemory: 400Mi autoScaling: - maxReplicaCount: 20 - origin-visit-status: - objects: - - origin_visit_status - batchSize: 1000 - requestedCpu: 500m - requestedMemory: 300Mi - autoScaling: - maxReplicaCount: 20 + maxReplicaCount: 50 + # origin-visit-status: + # objects: + # - origin_visit_status + # batchSize: 1000 + # requestedCpu: 500m + # requestedMemory: 300Mi + # autoScaling: + # maxReplicaCount: 20 release: objects: - release batchSize: 1000 privileged: true # Full replay #requestedCpu: 600m requestedMemory: 300Mi # follow up consumption requestedCpu: 50m autoScaling: maxReplicaCount: 5 revision: objects: - revision batchSize: 1000 privileged: true requestedCpu: 1000m requestedMemory: 300Mi autoScaling: maxReplicaCount: 10 skipped-content: objects: - skipped_content batchSize: 100 # Full replay #requestedCpu: 300m requestedMemory: 400Mi # follow up consumption requestedCpu: 50m autoScaling: maxReplicaCount: 5 - # snapshot: - # objects: - # - snapshot - # batchSize: 250 - # requestedCpu: 400m - # requestedMemory: 250Mi - # autoScaling: - # maxReplicaCount: 20 + snapshot: + objects: + - snapshot + batchSize: 250 + requestedCpu: 400m + requestedMemory: 250Mi + autoScaling: + maxReplicaCount: 10