diff --git a/swh/indexer/ctags.py b/swh/indexer/ctags.py --- a/swh/indexer/ctags.py +++ b/swh/indexer/ctags.py @@ -30,8 +30,8 @@ path: path to the file lang: language for that path (optional) - Returns: - ctags' output + Yields: + dict: ctags' output """ optional = [] @@ -105,9 +105,10 @@ data (bytes): raw content in bytes Returns: - A dict, representing a content_mimetype, with keys: - - id (bytes): content's identifier (sha1) - - ctags ([dict]): ctags list of symbols + dict: a dict representing a content_mimetype with keys: + + - **id** (bytes): content's identifier (sha1) + - **ctags** ([dict]): ctags list of symbols """ lang = compute_language(data, log=self.log)['lang'] @@ -144,11 +145,11 @@ Args: results ([dict]): list of content_mimetype, dict with the - following keys: + following keys: - id (bytes): content's identifier (sha1) - ctags ([dict]): ctags list of symbols policy_update ([str]): either 'update-dups' or 'ignore-dups' to - respectively update duplicates or ignore them + respectively update duplicates or ignore them """ self.idx_storage.content_ctags_add( diff --git a/swh/indexer/fossology_license.py b/swh/indexer/fossology_license.py --- a/swh/indexer/fossology_license.py +++ b/swh/indexer/fossology_license.py @@ -17,7 +17,8 @@ path: filepath to determine the license Returns: - A dict with the following keys: + dict: A dict with the following keys: + - licenses ([str]): associated detected licenses to path - path (bytes): content filepath @@ -79,7 +80,8 @@ path: filepath to determine the license Returns: - A dict with the following keys: + dict: A dict with the following keys: + - licenses ([str]): associated detected licenses to path - path (bytes): content filepath @@ -94,11 +96,12 @@ raw_content (bytes): associated raw content to content id Returns: - A dict, representing a content_license, with keys: - - id (bytes): content's identifier (sha1) - - license (bytes): license in bytes - - path (bytes): path - - indexer_configuration_id (int): tool used to compute the output + dict: A dict, representing a content_license, with keys: + + - id (bytes): content's identifier (sha1) + - license (bytes): license in bytes + - path (bytes): path + - indexer_configuration_id (int): tool used to compute the output """ content_path = self.write_to_temp( @@ -121,12 +124,14 @@ Args: results ([dict]): list of content_license, dict with the - following keys: + following keys: + - id (bytes): content's identifier (sha1) - license (bytes): license in bytes - path (bytes): path + policy_update ([str]): either 'update-dups' or 'ignore-dups' to - respectively update duplicates or ignore them + respectively update duplicates or ignore them """ self.idx_storage.content_fossology_license_add( @@ -136,6 +141,7 @@ class ContentFossologyLicenseIndexer( MixinFossologyLicenseIndexer, DiskIndexer, ContentIndexer): """Indexer in charge of: + - filtering out content already indexed - reading content from objstorage per the content's id (sha1) - computing {license, encoding} from that content @@ -158,10 +164,9 @@ MixinFossologyLicenseIndexer, DiskIndexer, ContentRangeIndexer): """FossologyLicense Range Indexer working on range of content identifiers. - It: - filters out the non textual content - (optionally) filters out content already indexed (cf - :func:`indexed_contents_in_range`) + :meth:`.indexed_contents_in_range`) - reads content from objstorage per the content's id (sha1) - computes {mimetype, encoding} from that content - stores result in storage @@ -170,15 +175,16 @@ def indexed_contents_in_range(self, start, end): """Retrieve indexed content id within range [start, end]. - Args - **start** (bytes): Starting bound from range identifier - **end** (bytes): End range identifier + Args: + start (bytes): Starting bound from range identifier + end (bytes): End range identifier Returns: - a dict with keys: + dict: a dict with keys: + - **ids** [bytes]: iterable of content ids within the range. - **next** (Optional[bytes]): The next range of sha1 starts at - this sha1 if any + this sha1 if any """ return self.idx_storage.content_fossology_license_get_range( diff --git a/swh/indexer/indexer.py b/swh/indexer/indexer.py --- a/swh/indexer/indexer.py +++ b/swh/indexer/indexer.py @@ -38,7 +38,7 @@ Args: filename (str): one of sha1's many filenames data (bytes): the sha1's content to write in temporary - file + file Returns: The path to the temporary file created. That file is @@ -82,7 +82,7 @@ To implement a new object type indexer, inherit from the BaseIndexer and implement indexing: - :func:`run`: + :meth:`.run`: object_ids are different depending on object. For example: sha1 for content, sha1_git for revision, directory, release, and id for origin @@ -92,27 +92,27 @@ Then you need to implement the following functions: - :func:`filter`: + :meth:`.filter`: filter out data already indexed (in storage). - :func:`index_object`: + :meth:`.index_object`: compute index on id with data (retrieved from the storage or the objstorage by the id key) and return the resulting index computation. - :func:`persist_index_computations`: + :meth:`.persist_index_computations`: persist the results of multiple index computations in the storage. The new indexer implementation can also override the following functions: - :func:`prepare`: + :meth:`.prepare`: Configuration preparation for the indexer. When overriding, this must call the `super().prepare()` instruction. - :func:`check`: + :meth:`.check`: Configuration check for the indexer. When overriding, this must call the `super().check()` instruction. - :func:`register_tools`: + :meth:`.register_tools`: This should return a dict of the tool(s) to use when indexing or filtering. @@ -200,10 +200,10 @@ tools (dict/[dict]): Either a dict or a list of dict. Returns: - List of dict with additional id key. + list: List of dicts with additional id key. Raises: - ValueError if not a list nor a dict. + ValueError: if not a list nor a dict. """ if isinstance(tools, list): @@ -225,11 +225,11 @@ Args: id (bytes): identifier data (bytes): id's data from storage or objstorage depending on - object type + object type Returns: - a dict that makes sense for the persist_index_computations - function. + dict: a dict that makes sense for the + :meth:`.persist_index_computations` method. """ pass @@ -241,10 +241,9 @@ Args: results ([result]): List of results. One result is the - result of the index function. + result of the index function. policy_update ([str]): either 'update-dups' or 'ignore-dups' to - respectively update duplicates or ignore - them + respectively update duplicates or ignore them Returns: None @@ -260,10 +259,10 @@ Args: results ([result]): List of results (dict) as returned - by index function. + by index function. task (dict): a dict in the form expected by - `scheduler.backend.SchedulerBackend.create_tasks` - without `next_run`, plus an optional `result_name` key. + `scheduler.backend.SchedulerBackend.create_tasks` + without `next_run`, plus an optional `result_name` key. Returns: None @@ -293,10 +292,10 @@ Args: ids ([bytes]): id's identifier list policy_update (str): either 'update-dups' or 'ignore-dups' to - respectively update duplicates or ignore them + respectively update duplicates or ignore them next_step (dict): a dict in the form expected by - `scheduler.backend.SchedulerBackend.create_tasks` - without `next_run`, plus a `result_name` key. + `scheduler.backend.SchedulerBackend.create_tasks` + without `next_run`, plus a `result_name` key. **kwargs: passed to the `index` method """ @@ -383,12 +382,12 @@ def indexed_contents_in_range(self, start, end): """Retrieve indexed contents within range [start, end]. - Args - **start** (bytes): Starting bound from range identifier - **end** (bytes): End range identifier + Args: + start (bytes): Starting bound from range identifier + end (bytes): End range identifier Yields: - Content identifier (bytes) present in the range [start, end] + bytes: Content identifier present in the range ``[start, end]`` """ pass @@ -398,12 +397,12 @@ end]. The already indexed contents are skipped. Args: - **start** (bytes): Starting bound from range identifier - **end** (bytes): End range identifier - **indexed** (Set[bytes]): Set of content already indexed. + start (bytes): Starting bound from range identifier + end (bytes): End range identifier + indexed (Set[bytes]): Set of content already indexed. Yields: - Identifier (bytes) of contents to index. + bytes: Identifier of contents to index. """ while start: @@ -420,12 +419,12 @@ """Index the contents from within range [start, end] Args: - **start** (bytes): Starting bound from range identifier - **end** (bytes): End range identifier - **indexed** (Set[bytes]): Set of content already indexed. + start (bytes): Starting bound from range identifier + end (bytes): End range identifier + indexed (Set[bytes]): Set of content already indexed. Yields: - Data indexed (dict) to persist using the indexer storage + dict: Data indexed to persist using the indexer storage """ for sha1 in self._list_contents_to_index(start, end, indexed): @@ -443,12 +442,12 @@ """Index not already indexed contents in range [start, end]. Args: - **start** (Union[bytes, str]): Starting range identifier - **end** (Union[bytes, str]): Ending range identifier + start** (Union[bytes, str]): Starting range identifier + end (Union[bytes, str]): Ending range identifier Yields: - Content identifier (bytes) present in the range [start, - end] which are not already indexed. + bytes: Content identifier present in the range + ``[start, end]`` which are not already indexed. """ while start: @@ -466,14 +465,14 @@ everything from scratch). Args: - **start** (Union[bytes, str]): Starting range identifier - **end** (Union[bytes, str]): Ending range identifier - **skip_existing** (bool): Skip existing indexed data - (default) or not + start (Union[bytes, str]): Starting range identifier + end (Union[bytes, str]): Ending range identifier + skip_existing (bool): Skip existing indexed data + (default) or not **kwargs: passed to the `index` method Returns: - a boolean. True if data was indexed, False otherwise. + bool: True if data was indexed, False otherwise. """ with_indexed_data = False @@ -520,13 +519,12 @@ Args: ids ([Union[int, Tuple[str, bytes]]]): list of origin ids or - (type, url) tuples. + (type, url) tuples. policy_update (str): either 'update-dups' or 'ignore-dups' to - respectively update duplicates (default) - or ignore them + respectively update duplicates (default) or ignore them next_step (dict): a dict in the form expected by - `scheduler.backend.SchedulerBackend.create_tasks` - without `next_run`, plus an optional `result_name` key. + `scheduler.backend.SchedulerBackend.create_tasks` without + `next_run`, plus an optional `result_name` key. parse_ids (bool): Do we need to parse id or not (default) **kwargs: passed to the `index` method @@ -584,8 +582,7 @@ Args: ids ([bytes or str]): sha1_git's identifier list policy_update (str): either 'update-dups' or 'ignore-dups' to - respectively update duplicates or ignore - them + respectively update duplicates or ignore them """ results = [] diff --git a/swh/indexer/language.py b/swh/indexer/language.py --- a/swh/indexer/language.py +++ b/swh/indexer/language.py @@ -56,8 +56,8 @@ max_size (int): max size to split the raw content at Returns: - Dict with keys: - - lang: None if nothing found or the possible language + dict: Dict with keys: + - **lang**: None if nothing found or the possible language """ try: @@ -87,8 +87,8 @@ raw_content (bytes): raw content to work with Returns: - Dict with keys: - - lang: None if nothing found or the possible language + dict: Dict with keys: + - **lang**: None if nothing found or the possible language """ try: @@ -155,9 +155,9 @@ data (bytes): raw content in bytes Returns: - A dict, representing a content_mimetype, with keys: - - id (bytes): content's identifier (sha1) - - lang (bytes): detected language + dict: Dict that represents a content_mimetype, with keys: + - id (bytes): content's identifier (sha1) + - lang (bytes): detected language """ result = { @@ -198,11 +198,11 @@ Args: results ([dict]): list of content_mimetype, dict with the - following keys: + following keys: - id (bytes): content's identifier (sha1) - lang (bytes): detected language policy_update ([str]): either 'update-dups' or 'ignore-dups' to - respectively update duplicates or ignore them + respectively update duplicates or ignore them """ self.idx_storage.content_language_add( diff --git a/swh/indexer/metadata.py b/swh/indexer/metadata.py --- a/swh/indexer/metadata.py +++ b/swh/indexer/metadata.py @@ -84,11 +84,11 @@ Args: results ([dict]): list of content_metadata, dict with the - following keys: + following keys: - id (bytes): content's identifier (sha1) - translated_metadata (jsonb): detected metadata policy_update ([str]): either 'update-dups' or 'ignore-dups' to - respectively update duplicates or ignore them + respectively update duplicates or ignore them """ self.idx_storage.content_metadata_add( @@ -153,9 +153,9 @@ Returns: dict: dictionary representing a revision_metadata, with keys: - - id (str): rev's identifier (sha1_git) - - indexer_configuration_id (bytes): tool used - - translated_metadata: dict of retrieved metadata + - id (str): rev's identifier (sha1_git) + - indexer_configuration_id (bytes): tool used + - translated_metadata: dict of retrieved metadata """ result = { @@ -181,12 +181,12 @@ Args: results ([dict]): list of content_mimetype, dict with the - following keys: + following keys: - id (bytes): content's identifier (sha1) - mimetype (bytes): mimetype in bytes - encoding (bytes): encoding in bytes policy_update ([str]): either 'update-dups' or 'ignore-dups' to - respectively update duplicates or ignore them + respectively update duplicates or ignore them """ # TODO: add functions in storage to keep data in revision_metadata @@ -280,11 +280,10 @@ as first argument; ie. not a list of ids as other indexers would. Args: - - * `origin_head` (dict): {str(origin_id): rev_id} + origin_head (dict): {str(origin_id): rev_id} keys `origin_id` and `revision_id`, which is the result of OriginHeadIndexer. - * `policy_update`: `'ignore-dups'` or `'update-dups'` + policy_update (str): `'ignore-dups'` or `'update-dups'` """ origin_head_map = {int(origin_id): hashutil.hash_to_bytes(rev_id) for (origin_id, rev_id) in origin_head.items()} diff --git a/swh/indexer/metadata_detector.py b/swh/indexer/metadata_detector.py --- a/swh/indexer/metadata_detector.py +++ b/swh/indexer/metadata_detector.py @@ -11,12 +11,12 @@ def detect_metadata(files): """ Detects files potentially containing metadata + Args: - - file_entries (list): list of files + file_entries (list): list of files Returns: - - empty list if nothing was found - - dictionary {mapping_filenames[name]:f['sha1']} + dict: {mapping_filenames[name]:f['sha1']} (may be empty) """ results = {} for (mapping_name, mapping) in MAPPINGS.items(): @@ -39,14 +39,16 @@ def extract_minimal_metadata_dict(metadata_list): """ Every item in the metadata_list is a dict of translated_metadata in the - CodeMeta vocabulary - we wish to extract a minimal set of terms and keep all values corresponding - to this term without duplication + CodeMeta vocabulary. + + We wish to extract a minimal set of terms and keep all values corresponding + to this term without duplication. + Args: - - metadata_list (list): list of dicts of translated_metadata + metadata_list (list): list of dicts of translated_metadata Returns: - - minimal_dict (dict): one dict with selected values of metadata + dict: minimal_dict; dict with selected values of metadata """ minimal_dict = {} for document in metadata_list: diff --git a/swh/indexer/metadata_dictionary.py b/swh/indexer/metadata_dictionary.py --- a/swh/indexer/metadata_dictionary.py +++ b/swh/indexer/metadata_dictionary.py @@ -39,12 +39,12 @@ def detect_metadata_files(self, files): """ Detects files potentially containing metadata + Args: - - file_entries (list): list of files + file_entries (list): list of files Returns: - - empty list if nothing was found - - list of sha1 otherwise + list: list of sha1 (possibly empty) """ pass @@ -88,11 +88,11 @@ and translating with the appropriate mapping Args: - content_dict (dict) + content_dict (dict): content dict to translate Returns: dict: translated metadata in json-friendly form needed for - the indexer + the indexer """ translated_metadata = {'@type': SCHEMA_URI + 'SoftwareSourceCode'} @@ -128,11 +128,11 @@ json data and translating with the appropriate mapping Args: - raw_content: bytes + raw_content (bytes): raw content to translate Returns: dict: translated metadata in json-friendly form needed for - the indexer + the indexer """ try: diff --git a/swh/indexer/mimetype.py b/swh/indexer/mimetype.py --- a/swh/indexer/mimetype.py +++ b/swh/indexer/mimetype.py @@ -17,7 +17,7 @@ raw_content (bytes): content's raw data Returns: - A dict with mimetype and encoding key and corresponding values + dict: mimetype and encoding key and corresponding values (as bytes). """ @@ -60,11 +60,11 @@ data (bytes): raw content in bytes Returns: - A dict, representing a content_mimetype, with keys: + dict: content's mimetype; dict keys being - - id (bytes): content's identifier (sha1) - - mimetype (bytes): mimetype in bytes - - encoding (bytes): encoding in bytes + - **id** (bytes): content's identifier (sha1) + - **mimetype** (bytes): mimetype in bytes + - **encoding** (bytes): encoding in bytes """ try: @@ -84,15 +84,11 @@ """Persist the results in storage. Args: - results ([dict]): list of content_mimetype, dict with the - following keys: - - - id (bytes): content's identifier (sha1) - - mimetype (bytes): mimetype in bytes - - encoding (bytes): encoding in bytes + results ([dict]): list of content's mimetype dicts + (see :meth:`.index`) policy_update ([str]): either 'update-dups' or 'ignore-dups' to - respectively update duplicates or ignore them + respectively update duplicates or ignore them """ self.idx_storage.content_mimetype_add( @@ -103,14 +99,17 @@ """Mimetype Indexer working on list of content identifiers. It: - - (optionally) filters out content already indexed (cf. :callable:`filter`) + + - (optionally) filters out content already indexed (cf. + :meth:`.filter`) - reads content from objstorage per the content's id (sha1) - computes {mimetype, encoding} from that content - stores result in storage FIXME: - - 1. Rename redundant ContentMimetypeIndexer to MimetypeIndexer - - 2. Do we keep it afterwards? ~> i think this can be used with the journal + + 1. Rename redundant ContentMimetypeIndexer to MimetypeIndexer + 2. Do we keep it afterwards? ~> i think this can be used with the journal """ def filter(self, ids): @@ -129,7 +128,9 @@ """Mimetype Range Indexer working on range of content identifiers. It: - - (optionally) filters out content already indexed (cf :callable:`range`) + + - (optionally) filters out content already indexed (cf + :meth:`.indexed_contents_in_range`) - reads content from objstorage per the content's id (sha1) - computes {mimetype, encoding} from that content - stores result in storage @@ -138,15 +139,16 @@ def indexed_contents_in_range(self, start, end): """Retrieve indexed content id within range [start, end]. - Args - **start** (bytes): Starting bound from range identifier - **end** (bytes): End range identifier + Args: + start (bytes): Starting bound from range identifier + end (bytes): End range identifier Returns: - a dict with keys: + dict: a dict with keys: + - **ids** [bytes]: iterable of content ids within the range. - **next** (Optional[bytes]): The next range of sha1 starts at - this sha1 if any + this sha1 if any """ return self.idx_storage.content_mimetype_get_range( diff --git a/swh/indexer/rehash.py b/swh/indexer/rehash.py --- a/swh/indexer/rehash.py +++ b/swh/indexer/rehash.py @@ -97,11 +97,11 @@ Args: all_contents ([dict]): List of contents as dictionary with - the necessary primary keys + the necessary primary keys checksum_algorithms ([str]): List of checksums to compute Yields: - tuple of: content to update, list of checksums computed + tuple: tuple of (content to update, list of checksums computed) """ content_ids = self._read_content_ids(all_contents) diff --git a/swh/indexer/storage/converters.py b/swh/indexer/storage/converters.py --- a/swh/indexer/storage/converters.py +++ b/swh/indexer/storage/converters.py @@ -22,11 +22,11 @@ Returns: list: list of ctags entries as dicts with the following keys: - - id (bytes): content's identifier - - name (str): symbol's name - - kind (str): symbol's kind - - language (str): language for that content - - tool_id (int): tool id used to compute ctags + - id (bytes): content's identifier + - name (str): symbol's name + - kind (str): symbol's kind + - language (str): language for that content + - tool_id (int): tool id used to compute ctags """ id = ctags['id'] @@ -47,15 +47,17 @@ Args: ctags (dict): ctags entry with the following keys: - - id (bytes): content's identifier - - ctags ([dict]): List of dictionary with the following keys: - - name (str): symbol's name - - kind (str): symbol's kind - - line (int): symbol's line in the content - - language (str): language + + - id (bytes): content's identifier + - ctags ([dict]): List of dictionary with the following keys: + - name (str): symbol's name + - kind (str): symbol's kind + - line (int): symbol's line in the content + - language (str): language Returns: - List of ctags ready entry (dict with the following keys): + list: list of ctags ready entry (dict with the following keys): + - id (bytes): content's identifier - name (str): symbol's name - kind (str): symbol's kind diff --git a/swh/indexer/storage/db.py b/swh/indexer/storage/db.py --- a/swh/indexer/storage/db.py +++ b/swh/indexer/storage/db.py @@ -20,7 +20,7 @@ Args: table (str): Table name (e.g content_mimetype, content_language, - etc...) + etc...) data (dict): Dict of data to read from hash_keys ([str]): List of keys to read in the data dict. @@ -65,11 +65,12 @@ def _convert_key(self, key, main_table='c'): """Convert keys according to specific use in the module. + Args: key (str): Key expression to change according to the alias - used in the query + used in the query main_table (str): Alias to use for the main table. Default - to c for content_{something}. + to c for content_{something}. Expected: Tables content_{something} being aliased as 'c' (something diff --git a/swh/indexer/storage/in_memory.py b/swh/indexer/storage/in_memory.py --- a/swh/indexer/storage/in_memory.py +++ b/swh/indexer/storage/in_memory.py @@ -50,11 +50,11 @@ ids (iterable): sha1 checksums Yields: - dictionaries with the following keys: + dict: dictionaries with the following keys: - id (bytes) - translated_metadata (str): associated metadata - tool (dict): tool used to compute metadata + - **id** (bytes) + - **translated_metadata** (str): associated metadata + - **tool** (dict): tool used to compute metadata """ for id_ in ids: @@ -72,13 +72,13 @@ Args: metadata (iterable): dictionaries with keys: - - **id**: sha1 - - **translated_metadata**: arbitrary dict - - **indexer_configuration_id**: tool used to compute the - results + - **id**: sha1 + - **translated_metadata**: arbitrary dict + - **indexer_configuration_id**: tool used to compute the + results - conflict_update: Flag to determine if we want to overwrite (true) - or skip duplicates (false) + conflict_update (bool): Flag to determine if we want to overwrite + (true) or skip duplicates (false) """ for item in metadata: @@ -108,9 +108,9 @@ Args: metadata (iterable): dictionaries with keys: - - **id** (bytes): sha1 identifier - - **indexer_configuration_id** (int): tool used to compute - the results + - **id** (bytes): sha1 identifier + - **indexer_configuration_id** (int): tool used to compute + the results Yields: missing sha1s @@ -127,9 +127,9 @@ Yields: dictionaries with the following keys: - id (bytes) - translated_metadata (str): associated metadata - tool (dict): tool used to compute metadata + - **id** (bytes) + - **translated_metadata** (str): associated metadata + - **tool** (dict): tool used to compute metadata """ yield from self._content_metadata.get(ids) @@ -140,10 +140,10 @@ Args: metadata (iterable): dictionaries with keys: - - **id**: sha1 - - **translated_metadata**: arbitrary dict - - **indexer_configuration_id**: tool used to compute the - results + - **id**: sha1 + - **translated_metadata**: arbitrary dict + - **indexer_configuration_id**: tool used to compute the + results conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) @@ -157,9 +157,9 @@ Args: metadata (iterable): dictionaries with keys: - - **id** (bytes): sha1_git revision identifier - - **indexer_configuration_id** (int): tool used to compute - the results + - **id** (bytes): sha1_git revision identifier + - **indexer_configuration_id** (int): tool used to compute + the results Yields: missing ids @@ -176,9 +176,9 @@ Yields: dictionaries with the following keys: - - **id** (bytes) - - **translated_metadata** (str): associated metadata - - **tool** (dict): tool used to compute metadata + - **id** (bytes) + - **translated_metadata** (str): associated metadata + - **tool** (dict): tool used to compute metadata """ yield from self._revision_metadata.get(ids) @@ -189,9 +189,9 @@ Args: metadata (iterable): dictionaries with keys: - - **id**: sha1_git of revision - - **translated_metadata**: arbitrary dict - - **indexer_configuration_id**: tool used to compute metadata + - **id**: sha1_git of revision + - **translated_metadata**: arbitrary dict + - **indexer_configuration_id**: tool used to compute metadata conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) @@ -204,16 +204,16 @@ Args: tools ([dict]): List of dictionary representing tool to - insert in the db. Dictionary with the following keys: + insert in the db. Dictionary with the following keys: - - **tool_name** (str): tool's name - - **tool_version** (str): tool's version - - **tool_configuration** (dict): tool's configuration - (free form dict) + - **tool_name** (str): tool's name + - **tool_version** (str): tool's version + - **tool_configuration** (dict): tool's configuration + (free form dict) Returns: - List of dict inserted in the db (holding the id key as - well). The order of the list is not guaranteed to match + list: List of dict inserted in the db (holding the id key as + well). The order of the list is not guaranteed to match the order of the initial list. """ @@ -231,12 +231,12 @@ Args: tool (dict): Dictionary representing a tool with the - following keys: + following keys: - - **tool_name** (str): tool's name - - **tool_version** (str): tool's version - - **tool_configuration** (dict): tool's configuration - (free form dict) + - **tool_name** (str): tool's name + - **tool_version** (str): tool's version + - **tool_configuration** (dict): tool's configuration + (free form dict) Returns: The same dictionary with an `id` key, None otherwise.