(swh) archit@work-pc:~/swh-environment$ doco logs kafka kafka_1 | log.cleaner.min.cleanable.ratio = 0.5 kafka_1 | log.cleaner.min.compaction.lag.ms = 0 kafka_1 | log.cleaner.threads = 1 kafka_1 | log.cleanup.policy = [delete] kafka_1 | log.dir = /tmp/kafka-logs kafka_1 | log.dirs = /kafka/kafka-logs-574a66d1e445 kafka_1 | log.flush.interval.messages = 9223372036854775807 kafka_1 | log.flush.interval.ms = null kafka_1 | log.flush.offset.checkpoint.interval.ms = 60000 kafka_1 | log.flush.scheduler.interval.ms = 9223372036854775807 kafka_1 | log.flush.start.offset.checkpoint.interval.ms = 60000 kafka_1 | log.index.interval.bytes = 4096 kafka_1 | log.index.size.max.bytes = 10485760 kafka_1 | log.message.downconversion.enable = true kafka_1 | log.message.format.version = 2.2-IV1 kafka_1 | log.message.timestamp.difference.max.ms = 9223372036854775807 kafka_1 | log.message.timestamp.type = CreateTime kafka_1 | log.preallocate = false kafka_1 | log.retention.bytes = -1 kafka_1 | log.retention.check.interval.ms = 300000 kafka_1 | log.retention.hours = 168 kafka_1 | log.retention.minutes = null kafka_1 | log.retention.ms = null kafka_1 | log.roll.hours = 168 kafka_1 | log.roll.jitter.hours = 0 kafka_1 | log.roll.jitter.ms = null kafka_1 | log.roll.ms = null kafka_1 | log.segment.bytes = 1073741824 kafka_1 | log.segment.delete.delay.ms = 60000 kafka_1 | max.connections.per.ip = 2147483647 kafka_1 | max.connections.per.ip.overrides = kafka_1 | max.incremental.fetch.session.cache.slots = 1000 kafka_1 | message.max.bytes = 1000012 kafka_1 | metric.reporters = [] kafka_1 | metrics.num.samples = 2 kafka_1 | metrics.recording.level = INFO kafka_1 | metrics.sample.window.ms = 30000 kafka_1 | min.insync.replicas = 1 kafka_1 | num.io.threads = 8 kafka_1 | num.network.threads = 3 kafka_1 | num.partitions = 1 kafka_1 | num.recovery.threads.per.data.dir = 1 kafka_1 | num.replica.alter.log.dirs.threads = null kafka_1 | num.replica.fetchers = 1 kafka_1 | offset.metadata.max.bytes = 4096 kafka_1 | offsets.commit.required.acks = -1 kafka_1 | offsets.commit.timeout.ms = 5000 kafka_1 | offsets.load.buffer.size = 5242880 kafka_1 | offsets.retention.check.interval.ms = 600000 kafka_1 | offsets.retention.minutes = 10080 kafka_1 | offsets.topic.compression.codec = 0 kafka_1 | offsets.topic.num.partitions = 50 kafka_1 | offsets.topic.replication.factor = 1 kafka_1 | offsets.topic.segment.bytes = 104857600 kafka_1 | password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding kafka_1 | password.encoder.iterations = 4096 kafka_1 | password.encoder.key.length = 128 kafka_1 | password.encoder.keyfactory.algorithm = null kafka_1 | password.encoder.old.secret = null kafka_1 | password.encoder.secret = null kafka_1 | port = 9092 kafka_1 | principal.builder.class = null kafka_1 | producer.purgatory.purge.interval.requests = 1000 kafka_1 | queued.max.request.bytes = -1 kafka_1 | queued.max.requests = 500 kafka_1 | quota.consumer.default = 9223372036854775807 kafka_1 | quota.producer.default = 9223372036854775807 kafka_1 | quota.window.num = 11 kafka_1 | quota.window.size.seconds = 1 kafka_1 | replica.fetch.backoff.ms = 1000 kafka_1 | replica.fetch.max.bytes = 1048576 kafka_1 | replica.fetch.min.bytes = 1 kafka_1 | replica.fetch.response.max.bytes = 10485760 kafka_1 | replica.fetch.wait.max.ms = 500 kafka_1 | replica.high.watermark.checkpoint.interval.ms = 5000 kafka_1 | replica.lag.time.max.ms = 10000 kafka_1 | replica.socket.receive.buffer.bytes = 65536 kafka_1 | replica.socket.timeout.ms = 30000 kafka_1 | replication.quota.window.num = 11 kafka_1 | replication.quota.window.size.seconds = 1 kafka_1 | request.timeout.ms = 30000 kafka_1 | reserved.broker.max.id = 1000 kafka_1 | sasl.client.callback.handler.class = null kafka_1 | sasl.enabled.mechanisms = [GSSAPI] kafka_1 | sasl.jaas.config = null kafka_1 | sasl.kerberos.kinit.cmd = /usr/bin/kinit kafka_1 | sasl.kerberos.min.time.before.relogin = 60000 kafka_1 | sasl.kerberos.principal.to.local.rules = [DEFAULT] kafka_1 | sasl.kerberos.service.name = null kafka_1 | sasl.kerberos.ticket.renew.jitter = 0.05 kafka_1 | sasl.kerberos.ticket.renew.window.factor = 0.8 kafka_1 | sasl.login.callback.handler.class = null kafka_1 | sasl.login.class = null kafka_1 | sasl.login.refresh.buffer.seconds = 300 kafka_1 | sasl.login.refresh.min.period.seconds = 60 kafka_1 | sasl.login.refresh.window.factor = 0.8 kafka_1 | sasl.login.refresh.window.jitter = 0.05 kafka_1 | sasl.mechanism.inter.broker.protocol = GSSAPI kafka_1 | sasl.server.callback.handler.class = null kafka_1 | security.inter.broker.protocol = PLAINTEXT kafka_1 | socket.receive.buffer.bytes = 102400 kafka_1 | socket.request.max.bytes = 104857600 kafka_1 | socket.send.buffer.bytes = 102400 kafka_1 | ssl.cipher.suites = [] kafka_1 | ssl.client.auth = none kafka_1 | ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] kafka_1 | ssl.endpoint.identification.algorithm = https kafka_1 | ssl.key.password = null kafka_1 | ssl.keymanager.algorithm = SunX509 kafka_1 | ssl.keystore.location = null kafka_1 | ssl.keystore.password = null kafka_1 | ssl.keystore.type = JKS kafka_1 | ssl.principal.mapping.rules = [DEFAULT] kafka_1 | ssl.protocol = TLS kafka_1 | ssl.provider = null kafka_1 | ssl.secure.random.implementation = null kafka_1 | ssl.trustmanager.algorithm = PKIX kafka_1 | ssl.truststore.location = null kafka_1 | ssl.truststore.password = null kafka_1 | ssl.truststore.type = JKS kafka_1 | transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000 kafka_1 | transaction.max.timeout.ms = 900000 kafka_1 | transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 kafka_1 | transaction.state.log.load.buffer.size = 5242880 kafka_1 | transaction.state.log.min.isr = 1 kafka_1 | transaction.state.log.num.partitions = 50 kafka_1 | transaction.state.log.replication.factor = 1 kafka_1 | transaction.state.log.segment.bytes = 104857600 kafka_1 | transactional.id.expiration.ms = 604800000 kafka_1 | unclean.leader.election.enable = false kafka_1 | zookeeper.connect = zookeeper:2181 kafka_1 | zookeeper.connection.timeout.ms = 6000 kafka_1 | zookeeper.max.in.flight.requests = 10 kafka_1 | zookeeper.session.timeout.ms = 6000 kafka_1 | zookeeper.set.acl = false kafka_1 | zookeeper.sync.time.ms = 2000 kafka_1 | (kafka.server.KafkaConfig) kafka_1 | [2019-07-14 11:04:22,060] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) kafka_1 | [2019-07-14 11:04:22,060] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) kafka_1 | [2019-07-14 11:04:22,108] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) kafka_1 | [2019-07-14 11:04:22,436] INFO Log directory /kafka/kafka-logs-574a66d1e445 not found, creating it. (kafka.log.LogManager) kafka_1 | [2019-07-14 11:04:22,619] INFO Loading logs. (kafka.log.LogManager) kafka_1 | [2019-07-14 11:04:22,695] INFO Logs loading complete in 76 ms. (kafka.log.LogManager) kafka_1 | [2019-07-14 11:04:22,845] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) kafka_1 | [2019-07-14 11:04:22,855] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) kafka_1 | [2019-07-14 11:04:25,653] INFO Awaiting socket connections on s0.0.0.0:9092. (kafka.network.Acceptor) kafka_1 | [2019-07-14 11:04:25,700] INFO [SocketServer brokerId=1004] Created data-plane acceptor and processors for endpoint : EndPoint(null,9092,ListenerName(PLAINTEXT),PLAINTEXT) (kafka.network.SocketServer) kafka_1 | [2019-07-14 11:04:25,702] INFO [SocketServer brokerId=1004] Started 1 acceptor threads for data-plane (kafka.network.SocketServer) kafka_1 | [2019-07-14 11:04:25,804] INFO [ExpirationReaper-1004-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) kafka_1 | [2019-07-14 11:04:25,806] INFO [ExpirationReaper-1004-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) kafka_1 | [2019-07-14 11:04:25,808] INFO [ExpirationReaper-1004-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) kafka_1 | [2019-07-14 11:04:25,809] INFO [ExpirationReaper-1004-ElectPreferredLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) kafka_1 | [2019-07-14 11:04:25,823] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) kafka_1 | [2019-07-14 11:04:25,899] INFO Creating /brokers/ids/1004 (is it secure? false) (kafka.zk.KafkaZkClient) kafka_1 | [2019-07-14 11:04:26,006] INFO Stat of the created znode at /brokers/ids/1004 is: 350,350,1563102265912,1563102265912,1,0,0,72058175869812736,182,0,350 kafka_1 | (kafka.zk.KafkaZkClient) kafka_1 | [2019-07-14 11:04:26,008] INFO Registered broker 1004 at path /brokers/ids/1004 with addresses: ArrayBuffer(EndPoint(kafka,9092,ListenerName(PLAINTEXT),PLAINTEXT)), czxid (broker epoch): 350 (kafka.zk.KafkaZkClient) kafka_1 | [2019-07-14 11:04:26,014] WARN No meta.properties file under dir /kafka/kafka-logs-574a66d1e445/meta.properties (kafka.server.BrokerMetadataCheckpoint) kafka_1 | [2019-07-14 11:04:26,600] INFO [ExpirationReaper-1004-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) kafka_1 | [2019-07-14 11:04:26,673] INFO [ExpirationReaper-1004-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) kafka_1 | [2019-07-14 11:04:26,675] INFO [ExpirationReaper-1004-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) kafka_1 | [2019-07-14 11:04:26,786] INFO [GroupCoordinator 1004]: Starting up. (kafka.coordinator.group.GroupCoordinator) kafka_1 | [2019-07-14 11:04:26,788] INFO [GroupCoordinator 1004]: Startup complete. (kafka.coordinator.group.GroupCoordinator) kafka_1 | [2019-07-14 11:04:26,796] INFO [GroupMetadataManager brokerId=1004] Removed 0 expired offsets in 8 milliseconds. (kafka.coordinator.group.GroupMetadataManager) kafka_1 | [2019-07-14 11:04:26,952] INFO [ProducerId Manager 1004]: Acquired new producerId block (brokerId:1004,blockStartProducerId:8000,blockEndProducerId:8999) by writing to Zk with path version 9 (kafka.coordinator.transaction.ProducerIdManager) kafka_1 | [2019-07-14 11:04:27,139] INFO [TransactionCoordinator id=1004] Starting up. (kafka.coordinator.transaction.TransactionCoordinator) kafka_1 | [2019-07-14 11:04:27,141] INFO [Transaction Marker Channel Manager 1004]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager) kafka_1 | [2019-07-14 11:04:27,141] INFO [TransactionCoordinator id=1004] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator) kafka_1 | [2019-07-14 11:04:27,225] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) kafka_1 | [2019-07-14 11:04:27,302] INFO [SocketServer brokerId=1004] Started data-plane processors for 1 acceptors (kafka.network.SocketServer) kafka_1 | [2019-07-14 11:04:27,325] INFO Kafka version: 2.2.1 (org.apache.kafka.common.utils.AppInfoParser) kafka_1 | [2019-07-14 11:04:27,325] INFO Kafka commitId: 55783d3133a5a49a (org.apache.kafka.common.utils.AppInfoParser) kafka_1 | [2019-07-14 11:04:27,327] INFO [KafkaServer id=1004] started (kafka.server.KafkaServer) kafka_1 | [2019-07-14 11:14:26,788] INFO [GroupMetadataManager brokerId=1004] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)