diff --git a/site-modules/profile/files/elasticsearch/elasticsearch_close_index.py b/site-modules/profile/files/elasticsearch/elasticsearch_close_index.py
new file mode 100644
index 00000000..6695314b
--- /dev/null
+++ b/site-modules/profile/files/elasticsearch/elasticsearch_close_index.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+
+import click
+import datetime
+import iso8601
+
+import elasticsearch
+
+@click.command()
+@click.option('--host', '-h', 'hosts',
+              multiple=True,
+              help="Elasticsearch node instances")
+@click.option('--timeout', '-t', default=1200)
+@click.option('--freeze-after-days', '-f', default=7)
+@click.option('--close-after-days', '-c', default=30)
+def main(hosts, timeout, freeze_after_days, close_after_days):
+    """Janitor script to manage (freeze or close) indices when respective date threshold are
+       exceeded.
+
+    """
+    today = datetime.date.today()
+    days = lambda n: datetime.timedelta(days=n)
+
+    if not hosts:
+        raise ValueError("Provide a list of elasticsearch nodes")
+
+    es = elasticsearch.Elasticsearch(hosts=hosts, timeout=timeout)
+
+    for l in sorted(es.cat.indices(h='i,sth,status').splitlines()):
+        i, throttled, status = l.split()
+        throttled = throttled == 'true'
+        if throttled and status != 'open':
+            continue
+        # ignore dot-prefixed indexes (e.g. kibana settings)
+        if i.startswith('.'):
+            continue
+        date = i.split('-')[-1]
+        if not date.startswith('20'):
+            continue
+        date = date.replace('.', '-')
+        date = iso8601.parse_date(date).date()
+        info = es.indices.get(i)[i]
+        shards = int(info['settings']['index']['number_of_shards'])
+
+        if not throttled and date < today - days(freeze_after_days):
+            print('freezing', i)
+            es.indices.freeze(i, wait_for_active_shards=shards)
+            status = 'open'
+
+        if status == 'open' and date < today - days(close_after_days):
+            print('closing', i)
+            es.indices.close(i)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/site-modules/profile/manifests/elasticsearch/index_janitor.pp b/site-modules/profile/manifests/elasticsearch/index_janitor.pp
new file mode 100644
index 00000000..12071458
--- /dev/null
+++ b/site-modules/profile/manifests/elasticsearch/index_janitor.pp
@@ -0,0 +1,33 @@
+# Elasticsearch index janitor
+class profile::elasticsearch::index_janitor {
+  $elasticsearch_nodes = lookup('swh::elasticsearch::storage_nodes')
+
+  $packages = ['python3-click', 'python3-elasticsearch', 'python3-iso8601']
+
+  package {$packages:
+    ensure => present,
+  }
+
+  $script_name = 'elasticsearch_close_index.py'
+  $script_path = "/usr/local/bin/${script_name}"
+
+  file {$script_path:
+    ensure  => present,
+    owner   => 'root',
+    group   => 'root',
+    mode    => '0755',
+    source  => "puppet:///modules/profile/elasticsearch/${script_name}",
+    require => Package[$packages],
+  }
+
+  $elasticsearch_hosts = $elasticsearch_nodes.map |$host_info| { $host_info['host'] }
+  $flag_es_hosts = join($elasticsearch_hosts, " --host ")
+
+  profile::cron::d {'elasticsearch-close-index':
+    target  => 'elasticsearch',
+    command => "chronic sh -c '${script_path} --host ${flag_es_hosts} --timeout 1200'",
+    user    => 'root',
+    minute  => 'fqdn_rand',
+    hour    => 'fqdn_rand',
+  }
+}
diff --git a/site-modules/role/manifests/swh_logstash_instance.pp b/site-modules/role/manifests/swh_logstash_instance.pp
index 10b51e56..02ccadd6 100644
--- a/site-modules/role/manifests/swh_logstash_instance.pp
+++ b/site-modules/role/manifests/swh_logstash_instance.pp
@@ -1,3 +1,5 @@
 class role::swh_logstash_instance inherits role::swh_base {
   include profile::logstash
+  # Logstash node elected to close indices to avoid unbalance the cluster
+  include profile::elasticsearch::index_janitor
 }