Page Menu
Home
Software Heritage
Search
Configure Global Search
Log In
Files
F9123964
server.properties.erb
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
6 KB
Subscribers
None
server.properties.erb
View Options
# Note: This file is managed by Puppet.
#
# see kafka.server.KafkaConfig for additional details and defaults
#
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=
<%=
@broker_id
%>
############################# Socket Server Settings #############################
# The port the socket server listens on
port=
<%=
@broker_port
%>
# Hostname the broker will bind to and advertise to producers and consumers.
# If not set, the server will bind to all interfaces and advertise the value returned from
# from java.net.InetAddress.getCanonicalHostName().
#host.name=localhost
# The number of threads handling network requests
num.network.threads=
<%=
@num_network_threads
%>
# The number of threads doing disk I/O
num.io.threads=
<%=
@num_io_threads
%>
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=
<%=
@socket_send_buffer_bytes
%>
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=
<%=
@socket_receive_buffer_bytes
%>
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=
<%=
@socket_request_max_bytes
%>
############################# Log Basics #############################
# The directory under which to store log files
log.dirs=
<%=
Array
(
@log_dirs
)
.
join
(
','
)
%>
# The default number of logical partitions per topic per server. More
# partitions allow greater parallelism for consumption, but also mean more files.
# Default to the number of log.dirs.
num.partitions=
<%=
Array
(
@log_dirs
)
.
length
%>
# The default replication factor for automatically created topics.
# Default to the number of brokers in this cluster.
default.replication.factor=
<%=
@brokers
.
keys
.
length
%>
# Enable auto creation of topic on the server. If this is set to true
# then attempts to produce, consume, or fetch metadata for a non-existent
# topic will automatically create it with the default replication factor
# and number of partitions.
auto.create.topics.enable=
<%=
@auto_create_topics_enable
?
'true'
:
'false'
%>
<%
if
@replica_lag_time_max_ms
-%>
# If a follower hasn't sent any fetch requests for this window of time,
# the leader will remove the follower from ISR and treat it as dead.
replica.lag.time.max.ms=
<%=
@replica_lag_time_max_ms
%>
<%
end
-%>
<%
if
@replica_lag_max_messages
-%>
# If a replica falls more than this many messages behind the leader,
# the leader will remove the follower from ISR and treat it as dead.
replica.lag.max.messages=
<%=
@replica_lag_max_messages
%>
<%
end
-%>
<%
if
@replica_socket_timeout_ms
-%>
# The socket timeout for network requests to the leader for replicating data.
replica.socket.timeout.ms=
<%=
@replica_socket_timeout_ms
%>
<%
end
-%>
<%
if
@replica_socket_receive_buffer_bytes
-%>
# The socket receive buffer for network requests to the leader for replicating data.
replica.socket.receive.buffer.bytes=
<%=
@replica_socket_receive_buffer_bytes
%>
<%
end
-%>
# Number of threads used to replicate messages from leaders. Increasing this
# value can increase the degree of I/O parallelism in the follower broker.
# This is useful to temporarily increase if you have a broker that needs
# to catch up on messages to get back into the ISR.
num.replica.fetchers=
<%=
@num_replica_fetchers
%>
# The number of byes of messages to attempt to fetch for each partition in the
# fetch requests the replicas send to the leader.
replica.fetch.max.bytes=
<%=
@replica_fetch_max_bytes
%>
############################# Log Flush Policy #############################
# The following configurations control the flush of data to disk. This is the most
# important performance knob in kafka.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data is at greater risk of loss in the event of a crash.
# 2. Latency: Data is not made available to consumers until it is flushed (which adds latency).
# 3. Throughput: The flush is generally the most expensive operation.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
log.flush.interval.messages=
<%=
@log_flush_interval_messages
%>
# The maximum amount of time a message can sit in a log before we force a flush
log.flush.interval.ms=
<%=
@log_flush_interval_ms
%>
# Per-topic overrides for log.flush.interval.ms
#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion
<%=
@log_retention_hours
?
"log.retention.hours=
#{
log_retention_hours
}
"
:
"#log.retention.hours="
%>
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.bytes.
<%=
@log_retention_bytes
?
"log.retention.bytes=
#{
log_retention_bytes
}
"
:
"#log.retention.bytes="
%>
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=
<%=
@log_segment_bytes
%>
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.cleanup.interval.mins=
<%=
@log_cleanup_interval_mins
%>
log.cleanup.policy=
<%=
@log_cleanup_policy
%>
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=
<%=
@zookeeper_hosts
.
sort
.
join
(
','
)
%><%=
@zookeeper_chroot
if
@zookeeper_chroot
%>
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=
<%=
@zookeeper_connection_timeout_ms
%>
<%
if
@metrics_properties
-%>
############################# Kafka Metrics Reporter ############################
kafka.metrics.polling.interval.secs=5
<%
@metrics_properties
.
keys
.
sort
.
each
do
|
key
|
-%>
<%=
key
%>
=
<%=
@metrics_properties
[
key
]
%>
<%
end
-%>
<%
end
# if @metrics_properties
-%>
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sat, Jun 21, 6:28 PM (2 w, 2 h ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3271501
Attached To
rSPKFK Puppet Kafka module
Event Timeline
Log In to Comment