diff --git a/swh/dataset/exporter.py b/swh/dataset/exporter.py index 4111706..f2145b9 100644 --- a/swh/dataset/exporter.py +++ b/swh/dataset/exporter.py @@ -1,42 +1,58 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from types import TracebackType from typing import Any, Dict, Optional, Type class Exporter: + """ + Base class for all the exporters. + + Each export can have multiple exporters, so we can read the journal a single + time, then export the objects we read in different formats without having to + re-read them every time. + + Override this class with the behavior for an export in a specific export + format. You have to overwrite process_object() to make it write to the + appropriate export files. + + You can also put setup and teardown logic in __enter__ and __exit__, and it + will be called automatically. + """ + def __init__(self, config: Dict[str, Any], *args: Any, **kwargs: Any) -> None: self.config: Dict[str, Any] = config def __enter__(self) -> "Exporter": return self def __exit__( self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> Optional[bool]: pass def process_object(self, object_type: str, object: Dict[str, Any]) -> None: """ Process a SWH object to export. - Override this with your custom worker. + Override this with your custom exporter. """ raise NotImplementedError class ExporterDispatch(Exporter): """ - Like Exporter, but dispatches each object type to a different function. + Like Exporter, but dispatches each object type to a different function + (e.g you can override `process_origin(self, object)` to process origins.) """ def process_object(self, object_type: str, object: Dict[str, Any]) -> None: method_name = "process_" + object_type if hasattr(self, method_name): getattr(self, method_name)(object) diff --git a/swh/dataset/graph.py b/swh/dataset/graph.py index 650b43a..189d592 100644 --- a/swh/dataset/graph.py +++ b/swh/dataset/graph.py @@ -1,250 +1,250 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import base64 import os import os.path import pathlib import shlex import subprocess import tempfile import uuid from swh.dataset.exporter import ExporterDispatch from swh.dataset.journalprocessor import ParallelJournalProcessor from swh.dataset.utils import ZSTFile from swh.model.identifiers import origin_identifier, swhid class GraphEdgesExporter(ExporterDispatch): """ Implementation of an exporter which writes all the graph edges - of a specific type in a Zstandard-compressed CSV file. + of a specific type to a Zstandard-compressed CSV file. Each row of the CSV is in the format: ` """ def __init__(self, config, export_path, **kwargs): super().__init__(config) self.export_path = export_path def __enter__(self): dataset_path = pathlib.Path(self.export_path) dataset_path.mkdir(exist_ok=True, parents=True) unique_id = str(uuid.uuid4()) nodes_file = dataset_path / ("graph-{}.nodes.csv.zst".format(unique_id)) edges_file = dataset_path / ("graph-{}.edges.csv.zst".format(unique_id)) self.node_writer = ZSTFile(nodes_file, "w") self.edge_writer = ZSTFile(edges_file, "w") self.node_writer.__enter__() self.edge_writer.__enter__() return self def __exit__(self, exc_type, exc_value, traceback): self.node_writer.__exit__(exc_type, exc_value, traceback) self.edge_writer.__exit__(exc_type, exc_value, traceback) def write_node(self, node): node_type, node_id = node if node_id is None: return node_swhid = swhid(object_type=node_type, object_id=node_id) self.node_writer.write("{}\n".format(node_swhid)) def write_edge(self, src, dst, *, labels=None): src_type, src_id = src dst_type, dst_id = dst if src_id is None or dst_id is None: return src_swhid = swhid(object_type=src_type, object_id=src_id) dst_swhid = swhid(object_type=dst_type, object_id=dst_id) edge_line = " ".join([src_swhid, dst_swhid] + (labels if labels else [])) self.edge_writer.write("{}\n".format(edge_line)) def process_origin(self, origin): origin_id = origin_identifier({"url": origin["url"]}) self.write_node(("origin", origin_id)) def process_origin_visit_status(self, visit_status): origin_id = origin_identifier({"url": visit_status["origin"]}) self.write_edge(("origin", origin_id), ("snapshot", visit_status["snapshot"])) def process_snapshot(self, snapshot): if self.config.get("remove_pull_requests"): self.remove_pull_requests(snapshot) self.write_node(("snapshot", snapshot["id"])) for branch_name, branch in snapshot["branches"].items(): original_branch_name = branch_name while branch and branch.get("target_type") == "alias": branch_name = branch["target"] branch = snapshot["branches"][branch_name] if branch is None or not branch_name: continue self.write_edge( ("snapshot", snapshot["id"]), (branch["target_type"], branch["target"]), labels=[base64.b64encode(original_branch_name).decode(),], ) def process_release(self, release): self.write_node(("release", release["id"])) self.write_edge( ("release", release["id"]), (release["target_type"], release["target"]) ) def process_revision(self, revision): self.write_node(("revision", revision["id"])) self.write_edge( ("revision", revision["id"]), ("directory", revision["directory"]) ) for parent in revision["parents"]: self.write_edge(("revision", revision["id"]), ("revision", parent)) def process_directory(self, directory): self.write_node(("directory", directory["id"])) for entry in directory["entries"]: entry_type_mapping = { "file": "content", "dir": "directory", "rev": "revision", } self.write_edge( ("directory", directory["id"]), (entry_type_mapping[entry["type"]], entry["target"]), labels=[base64.b64encode(entry["name"]).decode(), str(entry["perms"]),], ) def process_content(self, content): self.write_node(("content", content["sha1_git"])) def remove_pull_requests(self, snapshot): """ Heuristic to filter out pull requests in snapshots: remove all branches that start with refs/ but do not start with refs/heads or refs/tags. """ # Copy the items with list() to remove items during iteration for branch_name, branch in list(snapshot["branches"].items()): original_branch_name = branch_name while branch and branch.get("target_type") == "alias": branch_name = branch["target"] branch = snapshot["branches"][branch_name] if branch is None or not branch_name: continue if branch_name.startswith(b"refs/") and not ( branch_name.startswith(b"refs/heads") or branch_name.startswith(b"refs/tags") ): snapshot["branches"].pop(original_branch_name) def export_edges(config, export_path, export_id, processes): """Run the edge exporter for each edge type.""" object_types = [ "origin", "origin_visit_status", "snapshot", "release", "revision", "directory", "content", ] for obj_type in object_types: print("{} edges:".format(obj_type)) exporters = [ (GraphEdgesExporter, {"export_path": os.path.join(export_path, obj_type)}), ] parallel_exporter = ParallelJournalProcessor( config, exporters, export_id, obj_type, node_sets_path=pathlib.Path(export_path) / ".node_sets", processes=processes, ) parallel_exporter.run() def sort_graph_nodes(export_path, config): """ Generate the node list from the edges files. We cannot solely rely on the object IDs that are read in the journal, as some nodes that are referred to as destinations in the edge file might not be present in the archive (e.g a rev_entry referring to a revision that we do not have crawled yet). The most efficient way of getting all the nodes that are mentioned in the edges file is therefore to use sort(1) on the gigantic edge files to get all the unique node IDs, while using the disk as a temporary buffer. This pipeline does, in order: - concatenate and write all the compressed edges files in graph.edges.csv.zst (using the fact that ZST compression is an additive function) ; - deflate the edges ; - count the number of edges and write it in graph.edges.count.txt ; - count the number of occurrences of each edge type and write them in graph.edges.stats.txt ; - concatenate all the (deflated) nodes from the export with the destination edges, and sort the output to get the list of unique graph nodes ; - count the number of unique graph nodes and write it in graph.nodes.count.txt ; - count the number of occurrences of each node type and write them in graph.nodes.stats.txt ; - compress and write the resulting nodes in graph.nodes.csv.zst. """ # Use awk as a replacement of `sort | uniq -c` to avoid buffering everything # in memory counter_command = "awk '{ t[$0]++ } END { for (i in t) print i,t[i] }'" sort_script = """ pv {export_path}/*/*.edges.csv.zst | tee {export_path}/graph.edges.csv.zst | zstdcat | tee >( wc -l > {export_path}/graph.edges.count.txt ) | tee >( cut -d: -f3,6 | {counter_command} | sort \ > {export_path}/graph.edges.stats.txt ) | tee >( cut -d' ' -f3 | grep . | \ sort -u -S{sort_buffer_size} -T{buffer_path} | \ zstdmt > {export_path}/graph.labels.csv.zst ) | cut -d' ' -f2 | cat - <( zstdcat {export_path}/*/*.nodes.csv.zst ) | sort -u -S{sort_buffer_size} -T{buffer_path} | tee >( wc -l > {export_path}/graph.nodes.count.txt ) | tee >( cut -d: -f3 | {counter_command} | sort \ > {export_path}/graph.nodes.stats.txt ) | zstdmt > {export_path}/graph.nodes.csv.zst """ # Use bytes for the sorting algorithm (faster than being locale-specific) env = { **os.environ.copy(), "LC_ALL": "C", "LC_COLLATE": "C", "LANG": "C", } sort_buffer_size = config.get("sort_buffer_size", "4G") disk_buffer_dir = config.get("disk_buffer_dir", export_path) with tempfile.TemporaryDirectory( prefix=".graph_node_sort_", dir=disk_buffer_dir ) as buffer_path: subprocess.run( [ "bash", "-c", sort_script.format( export_path=shlex.quote(str(export_path)), buffer_path=shlex.quote(str(buffer_path)), sort_buffer_size=shlex.quote(sort_buffer_size), counter_command=counter_command, ), ], env=env, ) diff --git a/swh/dataset/journalprocessor.py b/swh/dataset/journalprocessor.py index 6cf332b..626004c 100644 --- a/swh/dataset/journalprocessor.py +++ b/swh/dataset/journalprocessor.py @@ -1,366 +1,366 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import collections import concurrent.futures from concurrent.futures import FIRST_EXCEPTION, ProcessPoolExecutor import contextlib from hashlib import sha1 import multiprocessing from pathlib import Path import time from typing import Any, Dict, Mapping, Sequence, Tuple, Type from confluent_kafka import TopicPartition import tqdm from swh.dataset.exporter import Exporter from swh.dataset.utils import SQLiteSet from swh.journal.client import JournalClient from swh.journal.serializers import kafka_to_value from swh.model.identifiers import origin_identifier from swh.storage.fixer import fix_objects class JournalClientOffsetRanges(JournalClient): """ A subclass of JournalClient reading only inside some specific offset range. Partition assignments have to be manually given to the class. This client can only read a single topic at a time. """ def __init__( self, *args, offset_ranges: Mapping[int, Tuple[int, int]] = None, assignment: Sequence[int] = None, progress_queue: multiprocessing.Queue = None, refresh_every: int = 200, **kwargs, ): """ Args: offset_ranges: A mapping of partition_id -> (low, high) offsets that define the boundaries of the messages to consume. assignment: The list of partitions to assign to this client. progress_queue: a multiprocessing.Queue where the current progress will be reported. refresh_every: the refreshing rate of the progress reporting. """ self.offset_ranges = offset_ranges self.progress_queue = progress_queue self.refresh_every = refresh_every self.assignment = assignment self.count = None self.topic_name = None super().__init__(*args, **kwargs) def subscribe(self): self.topic_name = self.subscription[0] time.sleep(0.1) # https://github.com/edenhill/librdkafka/issues/1983 self.consumer.assign( [TopicPartition(self.topic_name, pid) for pid in self.assignment] ) def process(self, *args, **kwargs): self.count = 0 try: self.handle_committed_offsets() super().process(*args, **kwargs) except EOFError: pass finally: self.progress_queue.put(None) def handle_committed_offsets(self,): """ Handle already committed partition offsets before starting processing. """ committed = self.consumer.committed( [TopicPartition(self.topic_name, pid) for pid in self.assignment] ) for tp in committed: self.handle_offset(tp.partition, tp.offset) def handle_offset(self, partition_id, offset): """ Check whether the client has reached the end of the current partition, and trigger a reassignment if that is the case. Raise EOFError if all the partitions have reached the end. """ if offset < 0: # Uninitialized partition offset return if self.count % self.refresh_every == 0: self.progress_queue.put({partition_id: offset}) if offset >= self.offset_ranges[partition_id][1] - 1: self.assignment = [pid for pid in self.assignment if pid != partition_id] self.subscribe() if not self.assignment: raise EOFError def deserialize_message(self, message): """ Override of the message deserialization to hook the handling of the message offset. We also return the raw objects instead of deserializing them because we will need the partition ID later. """ # XXX: this is a bad hack that we have to do because of how the # journal API works. Ideally it would be better to change the API so # that journal clients can know the partition of the message they are # handling. self.handle_offset(message.partition(), message.offset()) self.count += 1 # return super().deserialize_message(message) return message class ParallelJournalProcessor: """ Reads the given object type from the journal in parallel. It creates one JournalExportWorker per process. """ def __init__( self, config, exporters: Sequence[Tuple[Type[Exporter], Dict[str, Any]]], export_id: str, obj_type: str, node_sets_path: Path, processes: int = 1, ): """ Args: config: the exporter config, which should also include the JournalClient configuration. exporters: a list of Exporter to process the objects export_id: a unique identifier for the export that will be used as part of a Kafka consumer group ID. obj_type: The type of SWH object to export. - processes: The number of processes to run. node_sets_path: A directory where to store the node sets. + processes: The number of processes to run. """ self.config = config self.exporters = exporters self.export_id = "swh-dataset-export-{}".format(export_id) self.obj_type = obj_type self.processes = processes self.node_sets_path = node_sets_path self.offsets = None def get_offsets(self): """ First pass to fetch all the current low and high offsets of each partition to define the consumption boundaries. """ if self.offsets is None: client = JournalClient( **self.config["journal"], object_types=[self.obj_type], group_id=self.export_id, ) topic_name = client.subscription[0] topics = client.consumer.list_topics(topic_name).topics partitions = topics[topic_name].partitions self.offsets = {} for partition_id in tqdm.tqdm( partitions.keys(), desc=" - Partition offsets" ): tp = TopicPartition(topic_name, partition_id) (lo, hi) = client.consumer.get_watermark_offsets(tp) self.offsets[partition_id] = (lo, hi) return self.offsets def run(self): """ Run the parallel export. """ offsets = self.get_offsets() to_assign = list(offsets.keys()) manager = multiprocessing.Manager() q = manager.Queue() with ProcessPoolExecutor(self.processes + 1) as pool: futures = [] for i in range(self.processes): futures.append( pool.submit( self.export_worker, assignment=to_assign[i :: self.processes], progress_queue=q, ) ) futures.append(pool.submit(self.progress_worker, queue=q)) concurrent.futures.wait(futures, return_when=FIRST_EXCEPTION) for f in futures: if f.running(): continue exc = f.exception() if exc: pool.shutdown(wait=False) f.result() raise exc def progress_worker(self, *args, queue=None): """ An additional worker process that reports the current progress of the export between all the different parallel consumers and across all the partitions, by consuming the shared progress reporting Queue. """ d = {} active_workers = self.processes offset_diff = sum((hi - lo) for lo, hi in self.offsets.values()) with tqdm.tqdm(total=offset_diff, desc=" - Journal export") as pbar: while active_workers: item = queue.get() if item is None: active_workers -= 1 continue d.update(item) progress = sum(n - self.offsets[p][0] for p, n in d.items()) pbar.set_postfix( active_workers=active_workers, total_workers=self.processes ) pbar.update(progress - pbar.n) def export_worker(self, assignment, progress_queue): worker = JournalProcessorWorker( self.config, self.exporters, self.export_id, self.obj_type, self.offsets, assignment, progress_queue, self.node_sets_path, ) with worker: worker.run() class JournalProcessorWorker: """ Worker process that processes all the messages and calls the given exporters for each object read from the journal. """ def __init__( self, config, exporters: Sequence[Tuple[Type[Exporter], Dict[str, Any]]], export_id: str, obj_type: str, offsets: Dict[int, Tuple[int, int]], assignment: Sequence[int], progress_queue: multiprocessing.Queue, node_sets_path: Path, ): self.config = config self.export_id = export_id self.obj_type = obj_type self.offsets = offsets self.assignment = assignment self.progress_queue = progress_queue self.node_sets_path = node_sets_path self.node_sets_path.mkdir(exist_ok=True, parents=True) self.node_sets: Dict[int, SQLiteSet] = {} self.exporters = [ exporter_class(config, **kwargs) for exporter_class, kwargs in exporters ] self.exit_stack: contextlib.ExitStack = contextlib.ExitStack() def __enter__(self): self.exit_stack.__enter__() for exporter in self.exporters: self.exit_stack.enter_context(exporter) return self def __exit__(self, exc_type, exc_value, traceback): self.exit_stack.__exit__(exc_type, exc_value, traceback) def get_node_set_for_partition(self, partition_id: int): """ Return an on-disk set object, which stores the nodes that have already been processed. Node sets are sharded by partition ID, as each object is guaranteed to be assigned to a deterministic Kafka partition. """ if partition_id not in self.node_sets: node_set_file = self.node_sets_path / "nodes-part-{}.sqlite".format( partition_id ) node_set = SQLiteSet(node_set_file) self.exit_stack.enter_context(node_set) self.node_sets[partition_id] = node_set return self.node_sets[partition_id] def run(self): """ Start a Journal client on the given assignment and process all the incoming messages. """ client = JournalClientOffsetRanges( **self.config["journal"], object_types=[self.obj_type], group_id=self.export_id, debug="cgrp,broker", offset_ranges=self.offsets, assignment=self.assignment, progress_queue=self.progress_queue, **{"message.max.bytes": str(500 * 1024 * 1024)}, ) client.process(self.process_messages) def process_messages(self, messages): """ Process the incoming Kafka messages. """ for object_type, message_list in messages.items(): fixed_objects_by_partition = collections.defaultdict(list) for message in message_list: fixed_objects_by_partition[message.partition()].extend( - fix_objects(object_type, (kafka_to_value(message.value()),)) + fix_objects(object_type, [kafka_to_value(message.value())]) ) for partition, objects in fixed_objects_by_partition.items(): for object in objects: self.process_message(object_type, partition, object) def process_message(self, object_type, partition, object): """ Process a single incoming Kafka message if the object it refers to has not been processed yet. It uses an on-disk set to make sure that each object is only ever processed once. """ node_set = self.get_node_set_for_partition(partition) if object_type == "origin_visit_status": origin_id = origin_identifier({"url": object["origin"]}) visit = object["visit"] node_id = sha1("{}:{}".format(origin_id, visit).encode()).digest() elif object_type == "origin": node_id = sha1(object["url"].encode()).digest() elif object_type == "content": node_id = object["sha1_git"] else: node_id = object["id"] if not node_set.add(node_id): # Node already processed, skipping. return for exporter in self.exporters: exporter.process_object(object_type, object)