Currently there is a attempt to configure the backfiller via puppet via the role `profile::swh::deploy::journal::backfill` applied on `storage1.staging`
It should be adapted to:
- use a read-only database/storage configuration
- don't configure the storage's journal_writer configuration
- use a remote objstorage
- perhaps adapt the client id to `swh.backfiller.something`
```
root@storage1:/etc/softwareheritage/journal# cat backfill.yml
---
storage:
cls: local
args:
db: host=db1.internal.staging.swh.network port=5432 user=swh dbname=swh password=xxxxxxx
objstorage:
cls: pathslicing
args:
root: "/srv/softwareheritage/objects"
slicing: 0:1/1:5
journal_writer:
cls: kafka
args:
brokers:
- journal0.internal.staging.swh.network
prefix: swh.journal.objects
client_id: swh.storage.journal_writer.storage1
anonymize: true
producer_config:
message.max.bytes: 1000000000
journal_writer:
cls: kafka
args:
brokers:
- journal0.internal.staging.swh.network
prefix: swh.journal.objects
client_id: swh.storage.journal_writer.storage1
anonymize: true
producer_config:
message.max.bytes: 1000000000
```