diff --git a/swh/indexer/metadata.py b/swh/indexer/metadata.py
index 8d17ea2..672afa0 100644
--- a/swh/indexer/metadata.py
+++ b/swh/indexer/metadata.py
@@ -1,336 +1,310 @@
 # Copyright (C) 2017-2018  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
-import click
-import logging
 from copy import deepcopy
 
 from swh.indexer.indexer import ContentIndexer, RevisionIndexer, OriginIndexer
 from swh.indexer.origin_head import OriginHeadIndexer
 from swh.indexer.metadata_dictionary import MAPPINGS
 from swh.indexer.metadata_detector import detect_metadata
 from swh.indexer.metadata_detector import extract_minimal_metadata_dict
 from swh.indexer.storage import INDEXER_CFG_KEY
 
 from swh.model import hashutil
 
 
 class ContentMetadataIndexer(ContentIndexer):
     """Content-level indexer
 
     This indexer is in charge of:
 
     - filtering out content already indexed in content_metadata
     - reading content from objstorage with the content's id sha1
     - computing translated_metadata by given context
     - using the metadata_dictionary as the 'swh-metadata-translator' tool
     - store result in content_metadata table
 
     """
-    # Note: This used when the content metadata indexer is used alone
-    # (not the case for example in the case of the RevisionMetadataIndexer)
-    CONFIG_BASE_FILENAME = 'indexer/content_metadata'
-
     def filter(self, ids):
         """Filter out known sha1s and return only missing ones.
         """
         yield from self.idx_storage.content_metadata_missing((
             {
                 'id': sha1,
                 'indexer_configuration_id': self.tool['id'],
             } for sha1 in ids
         ))
 
     def index(self, id, data, log_suffix='unknown revision'):
         """Index sha1s' content and store result.
 
         Args:
             id (bytes): content's identifier
             data (bytes): raw content in bytes
 
         Returns:
             dict: dictionary representing a content_metadata. If the
             translation wasn't successful the translated_metadata keys will
             be returned as None
 
         """
         result = {
             'id': id,
             'indexer_configuration_id': self.tool['id'],
             'translated_metadata': None
         }
         try:
             mapping_name = self.tool['tool_configuration']['context']
             log_suffix += ', content_id=%s' % hashutil.hash_to_hex(id)
             result['translated_metadata'] = \
                 MAPPINGS[mapping_name](log_suffix).translate(data)
         except Exception:
             self.log.exception(
                 "Problem during metadata translation "
                 "for content %s" % hashutil.hash_to_hex(id))
         if result['translated_metadata'] is None:
             return None
         return result
 
     def persist_index_computations(self, results, policy_update):
         """Persist the results in storage.
 
         Args:
             results ([dict]): list of content_metadata, dict with the
               following keys:
               - id (bytes): content's identifier (sha1)
               - translated_metadata (jsonb): detected metadata
             policy_update ([str]): either 'update-dups' or 'ignore-dups' to
               respectively update duplicates or ignore them
 
         """
         self.idx_storage.content_metadata_add(
             results, conflict_update=(policy_update == 'update-dups'))
 
 
 class RevisionMetadataIndexer(RevisionIndexer):
     """Revision-level indexer
 
     This indexer is in charge of:
 
     - filtering revisions already indexed in revision_metadata table with
       defined computation tool
     - retrieve all entry_files in root directory
     - use metadata_detector for file_names containing metadata
     - compute metadata translation if necessary and possible (depends on tool)
     - send sha1s to content indexing if possible
     - store the results for revision
 
     """
-    CONFIG_BASE_FILENAME = 'indexer/revision_metadata'
-
     ADDITIONAL_CONFIG = {
         'tools': ('dict', {
             'name': 'swh-metadata-detector',
             'version': '0.0.2',
             'configuration': {
                 'type': 'local',
                 'context': list(MAPPINGS),
             },
         }),
     }
 
     def filter(self, sha1_gits):
         """Filter out known sha1s and return only missing ones.
 
         """
         yield from self.idx_storage.revision_metadata_missing((
             {
                 'id': sha1_git,
                 'indexer_configuration_id': self.tool['id'],
             } for sha1_git in sha1_gits
         ))
 
     def index(self, rev):
         """Index rev by processing it and organizing result.
 
         use metadata_detector to iterate on filenames
 
         - if one filename detected -> sends file to content indexer
         - if multiple file detected -> translation needed at revision level
 
         Args:
           rev (dict): revision artifact from storage
 
         Returns:
             dict: dictionary representing a revision_metadata, with keys:
 
             - id (str): rev's identifier (sha1_git)
             - indexer_configuration_id (bytes): tool used
             - translated_metadata: dict of retrieved metadata
 
         """
         result = {
             'id': rev['id'],
             'indexer_configuration_id': self.tool['id'],
             'mappings': None,
             'translated_metadata': None
         }
 
         try:
             root_dir = rev['directory']
             dir_ls = self.storage.directory_ls(root_dir, recursive=False)
             files = [entry for entry in dir_ls if entry['type'] == 'file']
             detected_files = detect_metadata(files)
             (mappings, metadata) = self.translate_revision_metadata(
                 detected_files,
                 log_suffix='revision=%s' % hashutil.hash_to_hex(rev['id']))
             result['mappings'] = mappings
             result['translated_metadata'] = metadata
         except Exception as e:
             self.log.exception(
                 'Problem when indexing rev: %r', e)
         return result
 
     def persist_index_computations(self, results, policy_update):
         """Persist the results in storage.
 
         Args:
             results ([dict]): list of content_mimetype, dict with the
               following keys:
               - id (bytes): content's identifier (sha1)
               - mimetype (bytes): mimetype in bytes
               - encoding (bytes): encoding in bytes
             policy_update ([str]): either 'update-dups' or 'ignore-dups' to
               respectively update duplicates or ignore them
 
         """
         # TODO: add functions in storage to keep data in revision_metadata
         self.idx_storage.revision_metadata_add(
             results, conflict_update=(policy_update == 'update-dups'))
 
     def translate_revision_metadata(self, detected_files, log_suffix):
         """
         Determine plan of action to translate metadata when containing
         one or multiple detected files:
 
         Args:
             detected_files (dict): dictionary mapping context names (e.g.,
               "npm", "authors") to list of sha1
 
         Returns:
             (List[str], dict): list of mappings used and dict with
             translated metadata according to the CodeMeta vocabulary
 
         """
         used_mappings = [MAPPINGS[context].name for context in detected_files]
         translated_metadata = []
         tool = {
                 'name': 'swh-metadata-translator',
                 'version': '0.0.2',
                 'configuration': {
                     'type': 'local',
                     'context': None
                 },
             }
         # TODO: iterate on each context, on each file
         # -> get raw_contents
         # -> translate each content
         config = {
             k: self.config[k]
             for k in [INDEXER_CFG_KEY, 'objstorage', 'storage']
         }
         config['tools'] = [tool]
         for context in detected_files.keys():
             cfg = deepcopy(config)
             cfg['tools'][0]['configuration']['context'] = context
             c_metadata_indexer = ContentMetadataIndexer(config=cfg)
             # sha1s that are in content_metadata table
             sha1s_in_storage = []
             metadata_generator = self.idx_storage.content_metadata_get(
                 detected_files[context])
             for c in metadata_generator:
                 # extracting translated_metadata
                 sha1 = c['id']
                 sha1s_in_storage.append(sha1)
                 local_metadata = c['translated_metadata']
                 # local metadata is aggregated
                 if local_metadata:
                     translated_metadata.append(local_metadata)
 
             sha1s_filtered = [item for item in detected_files[context]
                               if item not in sha1s_in_storage]
 
             if sha1s_filtered:
                 # content indexing
                 try:
                     c_metadata_indexer.run(sha1s_filtered,
                                            policy_update='ignore-dups',
                                            log_suffix=log_suffix)
                     # on the fly possibility:
                     for result in c_metadata_indexer.results:
                         local_metadata = result['translated_metadata']
                         translated_metadata.append(local_metadata)
 
                 except Exception:
                     self.log.exception(
                         "Exception while indexing metadata on contents")
 
         # transform translated_metadata into min set with swh-metadata-detector
         min_metadata = extract_minimal_metadata_dict(translated_metadata)
         return (used_mappings, min_metadata)
 
 
 class OriginMetadataIndexer(OriginIndexer):
-    CONFIG_BASE_FILENAME = 'indexer/origin_intrinsic_metadata'
-
-    ADDITIONAL_CONFIG = {
-        'tools': ('list', [])
-    }
+    ADDITIONAL_CONFIG = RevisionMetadataIndexer.ADDITIONAL_CONFIG
 
     USE_TOOLS = False
 
-    def __init__(self):
-        super().__init__()
-        self.origin_head_indexer = OriginHeadIndexer()
-        self.revision_metadata_indexer = RevisionMetadataIndexer()
+    def __init__(self, config, **kwargs):
+        super().__init__(config=config, **kwargs)
+        self.origin_head_indexer = OriginHeadIndexer(config=config)
+        self.revision_metadata_indexer = RevisionMetadataIndexer(config=config)
 
     def index_list(self, origins):
         head_rev_ids = []
         origins_with_head = []
         for origin in origins:
             head_result = self.origin_head_indexer.index(origin)
             if head_result:
                 origins_with_head.append(origin)
                 head_rev_ids.append(head_result['revision_id'])
 
         head_revs = list(self.storage.revision_get(head_rev_ids))
         assert len(head_revs) == len(head_rev_ids)
 
         results = []
         for (origin, rev) in zip(origins_with_head, head_revs):
             if not rev:
                 self.log.warning('Missing head revision of origin %r',
                                  origin)
                 continue
 
             rev_metadata = self.revision_metadata_indexer.index(rev)
             orig_metadata = {
                 'from_revision': rev_metadata['id'],
                 'origin_id': origin['id'],
                 'metadata': rev_metadata['translated_metadata'],
                 'mappings': rev_metadata['mappings'],
                 'indexer_configuration_id':
                     rev_metadata['indexer_configuration_id'],
             }
             results.append((orig_metadata, rev_metadata))
         return results
 
     def persist_index_computations(self, results, policy_update):
         conflict_update = (policy_update == 'update-dups')
 
         # Deduplicate revisions
         rev_metadata = []
         orig_metadata = []
         for (orig_item, rev_item) in results:
             if rev_item not in rev_metadata:
                 rev_metadata.append(rev_item)
             if orig_item not in orig_metadata:
                 orig_metadata.append(orig_item)
 
         self.idx_storage.revision_metadata_add(
             rev_metadata, conflict_update=conflict_update)
 
         self.idx_storage.origin_intrinsic_metadata_add(
             orig_metadata, conflict_update=conflict_update)
-
-
-@click.command()
-@click.option('--revs', '-i',
-              help='Default sha1_git to lookup', multiple=True)
-def main(revs):
-    _git_sha1s = list(map(hashutil.hash_to_bytes, revs))
-    rev_metadata_indexer = RevisionMetadataIndexer()
-    rev_metadata_indexer.run(_git_sha1s, 'update-dups')
-
-
-if __name__ == '__main__':
-    logging.basicConfig(level=logging.INFO)
-    main()
diff --git a/swh/indexer/origin_head.py b/swh/indexer/origin_head.py
index 0a95cc9..57bfb45 100644
--- a/swh/indexer/origin_head.py
+++ b/swh/indexer/origin_head.py
@@ -1,162 +1,154 @@
 # Copyright (C) 2018  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import re
 import click
 import logging
 
 from swh.indexer.indexer import OriginIndexer
 
 
 class OriginHeadIndexer(OriginIndexer):
     """Origin-level indexer.
 
     This indexer is in charge of looking up the revision that acts as the
     "head" of an origin.
 
     In git, this is usually the commit pointed to by the 'master' branch."""
 
-    ADDITIONAL_CONFIG = {
-        'tools': ('dict', {
-            'name': 'origin-metadata',
-            'version': '0.0.1',
-            'configuration': {},
-        }),
-    }
-
-    CONFIG_BASE_FILENAME = 'indexer/origin_head'
+    USE_TOOLS = False
 
     def persist_index_computations(self, results, policy_update):
         """Do nothing. The indexer's results are not persistent, they
         should only be piped to another indexer."""
         pass
 
     # Dispatch
 
     def index(self, origin):
         origin_id = origin['id']
         latest_snapshot = self.storage.snapshot_get_latest(origin_id)
         method = getattr(self, '_try_get_%s_head' % origin['type'], None)
         if method is None:
             method = self._try_get_head_generic
         rev_id = method(latest_snapshot)
         if rev_id is None:
             return None
         result = {
                 'origin_id': origin_id,
                 'revision_id': rev_id,
                 }
         return result
 
     # VCSs
 
     def _try_get_vcs_head(self, snapshot):
         try:
             if isinstance(snapshot, dict):
                 branches = snapshot['branches']
                 if branches[b'HEAD']['target_type'] == 'revision':
                     return branches[b'HEAD']['target']
         except KeyError:
             return None
 
     _try_get_hg_head = _try_get_git_head = _try_get_vcs_head
 
     # Tarballs
 
     _archive_filename_re = re.compile(
             rb'^'
             rb'(?P<pkgname>.*)[-_]'
             rb'(?P<version>[0-9]+(\.[0-9])*)'
             rb'(?P<preversion>[-+][a-zA-Z0-9.~]+?)?'
             rb'(?P<extension>(\.[a-zA-Z0-9]+)+)'
             rb'$')
 
     @classmethod
     def _parse_version(cls, filename):
         """Extracts the release version from an archive filename,
         to get an ordering whose maximum is likely to be the last
         version of the software
 
         >>> OriginHeadIndexer._parse_version(b'foo')
         (-inf,)
         >>> OriginHeadIndexer._parse_version(b'foo.tar.gz')
         (-inf,)
         >>> OriginHeadIndexer._parse_version(b'gnu-hello-0.0.1.tar.gz')
         (0, 0, 1, 0)
         >>> OriginHeadIndexer._parse_version(b'gnu-hello-0.0.1-beta2.tar.gz')
         (0, 0, 1, -1, 'beta2')
         >>> OriginHeadIndexer._parse_version(b'gnu-hello-0.0.1+foobar.tar.gz')
         (0, 0, 1, 1, 'foobar')
         """
         res = cls._archive_filename_re.match(filename)
         if res is None:
             return (float('-infinity'),)
         version = [int(n) for n in res.group('version').decode().split('.')]
         if res.group('preversion') is None:
             version.append(0)
         else:
             preversion = res.group('preversion').decode()
             if preversion.startswith('-'):
                 version.append(-1)
                 version.append(preversion[1:])
             elif preversion.startswith('+'):
                 version.append(1)
                 version.append(preversion[1:])
             else:
                 assert False, res.group('preversion')
         return tuple(version)
 
     def _try_get_ftp_head(self, snapshot):
         archive_names = list(snapshot['branches'])
         max_archive_name = max(archive_names, key=self._parse_version)
         r = self._try_resolve_target(snapshot['branches'], max_archive_name)
         return r
 
     # Generic
 
     def _try_get_head_generic(self, snapshot):
         # Works on 'deposit', 'svn', and 'pypi'.
         try:
             if isinstance(snapshot, dict):
                 branches = snapshot['branches']
         except KeyError:
             return None
         else:
             return (
                     self._try_resolve_target(branches, b'HEAD') or
                     self._try_resolve_target(branches, b'master')
                     )
 
     def _try_resolve_target(self, branches, target_name):
         try:
             target = branches[target_name]
             while target['target_type'] == 'alias':
                 target = branches[target['target']]
             if target['target_type'] == 'revision':
                 return target['target']
             elif target['target_type'] == 'content':
                 return None  # TODO
             elif target['target_type'] == 'directory':
                 return None  # TODO
             elif target['target_type'] == 'release':
                 return None  # TODO
             else:
                 assert False
         except KeyError:
             return None
 
 
 @click.command()
 @click.option('--origins', '-i',
               help='Origins to lookup, in the "type+url" format',
               multiple=True)
 def main(origins):
     rev_metadata_indexer = OriginHeadIndexer()
     rev_metadata_indexer.run(origins)
 
 
 if __name__ == '__main__':
     logging.basicConfig(level=logging.INFO)
     main()
diff --git a/swh/indexer/tests/test_origin_metadata.py b/swh/indexer/tests/test_origin_metadata.py
index fb72a5a..7481c4a 100644
--- a/swh/indexer/tests/test_origin_metadata.py
+++ b/swh/indexer/tests/test_origin_metadata.py
@@ -1,188 +1,157 @@
 # Copyright (C) 2018-2019  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
-import pytest
-
-from unittest.mock import patch
-
 from swh.model.hashutil import hash_to_bytes
 
 from swh.indexer.metadata import OriginMetadataIndexer
 
-from .utils import BASE_TEST_CONFIG, YARN_PARSER_METADATA
+from .utils import YARN_PARSER_METADATA
 from .test_metadata import REVISION_METADATA_CONFIG
 
 
-ORIGIN_HEAD_CONFIG = {
-    **BASE_TEST_CONFIG,
-    'tools': {
-        'name': 'origin-metadata',
-        'version': '0.0.1',
-        'configuration': {},
-    },
-    'tasks': {
-        'revision_metadata': 'revision_metadata',
-        'origin_intrinsic_metadata': 'origin_intrinsic_metadata',
-    }
-}
-
-
-@pytest.fixture
-def origin_metadata_indexer():
-    prefix = 'swh.indexer.'
-    suffix = '.parse_config_file'
-    with patch(prefix + 'metadata.OriginMetadataIndexer' + suffix) as omi, \
-            patch(prefix + 'origin_head.OriginHeadIndexer' + suffix) as ohi, \
-            patch(prefix + 'metadata.RevisionMetadataIndexer' + suffix) as rmi:
-        omi.return_value = BASE_TEST_CONFIG
-        ohi.return_value = ORIGIN_HEAD_CONFIG
-        rmi.return_value = REVISION_METADATA_CONFIG
-        yield OriginMetadataIndexer()
-
-
 def test_origin_metadata_indexer(
-        idx_storage, storage, obj_storage, origin_metadata_indexer):
+        idx_storage, storage, obj_storage):
 
-    indexer = OriginMetadataIndexer()
+    indexer = OriginMetadataIndexer(config=REVISION_METADATA_CONFIG)
     indexer.run(["git+https://github.com/librariesio/yarn-parser"])
 
     origin = storage.origin_get({
         'type': 'git',
         'url': 'https://github.com/librariesio/yarn-parser'})
     rev_id = hash_to_bytes('8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f')
 
     rev_metadata = {
         'id': rev_id,
         'translated_metadata': YARN_PARSER_METADATA,
         'mappings': ['npm'],
     }
     origin_metadata = {
         'origin_id': origin['id'],
         'from_revision': rev_id,
         'metadata': YARN_PARSER_METADATA,
         'mappings': ['npm'],
     }
 
     results = list(indexer.idx_storage.revision_metadata_get([rev_id]))
     for result in results:
         del result['tool']
     assert results == [rev_metadata]
 
     results = list(indexer.idx_storage.origin_intrinsic_metadata_get([
         origin['id']]))
     for result in results:
         del result['tool']
     assert results == [origin_metadata]
 
 
 def test_origin_metadata_indexer_duplicate_origin(
-        idx_storage, storage, obj_storage, origin_metadata_indexer):
-    indexer = OriginMetadataIndexer()
+        idx_storage, storage, obj_storage):
+    indexer = OriginMetadataIndexer(config=REVISION_METADATA_CONFIG)
     indexer.storage = storage
     indexer.idx_storage = idx_storage
     indexer.run(["git+https://github.com/librariesio/yarn-parser"])
 
     indexer.run(["git+https://github.com/librariesio/yarn-parser"]*2)
 
     origin = storage.origin_get({
         'type': 'git',
         'url': 'https://github.com/librariesio/yarn-parser'})
     rev_id = hash_to_bytes('8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f')
 
     results = list(indexer.idx_storage.revision_metadata_get([rev_id]))
     assert len(results) == 1
 
     results = list(indexer.idx_storage.origin_intrinsic_metadata_get([
         origin['id']]))
     assert len(results) == 1
 
 
 def test_origin_metadata_indexer_missing_head(
-        idx_storage, storage, obj_storage, origin_metadata_indexer):
+        idx_storage, storage, obj_storage):
 
     storage.origin_add([{
         'type': 'git',
         'url': 'https://example.com'
     }])
 
-    indexer = OriginMetadataIndexer()
+    indexer = OriginMetadataIndexer(config=REVISION_METADATA_CONFIG)
     indexer.run(["git+https://example.com"])
 
     origin = storage.origin_get({
         'type': 'git',
         'url': 'https://example.com'})
 
     results = list(indexer.idx_storage.origin_intrinsic_metadata_get([
         origin['id']]))
     assert results == []
 
 
 def test_origin_metadata_indexer_partial_missing_head(
-        idx_storage, storage, obj_storage, origin_metadata_indexer):
+        idx_storage, storage, obj_storage):
 
     storage.origin_add([{
         'type': 'git',
         'url': 'https://example.com'
     }])
 
-    indexer = OriginMetadataIndexer()
+    indexer = OriginMetadataIndexer(config=REVISION_METADATA_CONFIG)
     indexer.run(["git+https://example.com",
                  "git+https://github.com/librariesio/yarn-parser"])
 
     origin1 = storage.origin_get({
         'type': 'git',
         'url': 'https://example.com'})
     origin2 = storage.origin_get({
         'type': 'git',
         'url': 'https://github.com/librariesio/yarn-parser'})
     rev_id = hash_to_bytes('8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f')
 
     rev_metadata = {
         'id': rev_id,
         'translated_metadata': YARN_PARSER_METADATA,
         'mappings': ['npm'],
     }
     origin_metadata = {
         'origin_id': origin2['id'],
         'from_revision': rev_id,
         'metadata': YARN_PARSER_METADATA,
         'mappings': ['npm'],
     }
 
     results = list(indexer.idx_storage.revision_metadata_get([rev_id]))
     for result in results:
         del result['tool']
     assert results == [rev_metadata]
 
     results = list(indexer.idx_storage.origin_intrinsic_metadata_get([
         origin1['id'], origin2['id']]))
     for result in results:
         del result['tool']
     assert results == [origin_metadata]
 
 
 def test_origin_metadata_indexer_duplicate_revision(
-        idx_storage, storage, obj_storage, origin_metadata_indexer):
-    indexer = OriginMetadataIndexer()
+        idx_storage, storage, obj_storage):
+    indexer = OriginMetadataIndexer(config=REVISION_METADATA_CONFIG)
     indexer.storage = storage
     indexer.idx_storage = idx_storage
     indexer.run(["git+https://github.com/librariesio/yarn-parser",
                  "git+https://github.com/librariesio/yarn-parser.git"])
 
     origin1 = storage.origin_get({
         'type': 'git',
         'url': 'https://github.com/librariesio/yarn-parser'})
     origin2 = storage.origin_get({
         'type': 'git',
         'url': 'https://github.com/librariesio/yarn-parser.git'})
     assert origin1['id'] != origin2['id']
     rev_id = hash_to_bytes('8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f')
 
     results = list(indexer.idx_storage.revision_metadata_get([rev_id]))
     assert len(results) == 1
 
     results = list(indexer.idx_storage.origin_intrinsic_metadata_get([
         origin1['id'], origin2['id']]))
     assert len(results) == 2