# The maximum size of a message body, in bytes.message.max.bytes=1000012# Whether to allow automatic topic creation. The default setting is false. Currently, topics can be created via the console or cloud API.auto.create.topics.enable=false# Whether to allow calling an API to delete a topic.delete.topic.enable=true# The maximum request size allowed by the broker is 16 MB.socket.request.max.bytes=16777216# Each IP address can establish up to 5000 connections with the broker.max.connections.per.ip=5000# The offset retention period is 7 days by default.offsets.retention.minutes=10080# When there are no access control list (ACL) settings, access is allowed for anyone.allow.everyone.if.no.acl.found=true# The log segment size is 1 GB.log.segment.bytes=1073741824# The log rolling check interval is 5 minutes. When the retention period is set to less than 5 minutes, you may need to wait for 5 minutes to clear the log.log.retention.check.interval.ms=300000
Num = max( T/PT , T/CT ) = T / min( PT , CT )
# Maximum message size at topic level.max.message.bytes=1000012# The message format of version 0.10.2 is the V1 format.message.format.version=0.10.2-IV0# A replica not in the in-sync replicas (ISRs) can be selected as the leader. Availability is higher than reliability, and there is a data loss risk.unclean.leader.election.enable=true# The minimum number of ISRs to submit producer requests. If the number of ISRs is less than this value, the server will no longer accept write requests with request.required.acks set to -1 or all.min.insync.replicas=1
# Producers will attempt to package messages sent to the same partition into a single batch and send it to a broker. An upper limit of the batch.size can be set, with a default value of 16 KB. Setting batch.size too small will cause a decrease in throughput, while setting it too large will result in excessive memory usage.batch.size=16384# Three acknowledgment mechanisms are available for the Kafka producer, as described below:# -1 or all: The broker responds to the producer to allow it to continue sending the next message or next batch of messages only after the leader has received the data and synchronized it to followers in all ISRs. This configuration provides the highest data reliability; as long as one ISR is alive, there will be no message loss. Note: This configuration does not ensure that all replicas have read and written the data before returning. It can be used in conjunction with the topic-level parameter min.insync.replicas.# 0: The producer continues to send the next (batch of) message(s) without waiting for the broker acknowledgment that the synchronization is completed. This configuration provides high production performance but low data reliability. (Data may be lost if the broker server where the leader replica is stored fails, because the server will not receive any message if the producer is unaware of the failure.)# 1: The producer sends the next (batch of) message(s) after the leader has successfully received the data as acknowledged. This configuration balances the production throughput and data reliability. (Messages may be lost if the broker where the leader replica is stored fails, but the replica is not copied.)# The default value 1 is used if the configuration is not displayed. You can set it based on your business requirements.acks=1# Configure the memory that the producer uses to cache messages to be sent to the broker. The user should adjust it based on the total memory size of the process where the producer resides.buffer.memory=33554432# If the speed of producing messages is faster than the speed at which messages are sent by the sender thread to the broker, the memory configured by buffer.memory will be used up, blocking the sending operations by the producer. This parameter is used to configure the maximum blocking time.max.block.ms=60000# Set the latency (ms) for sending messages, allowing more messages to be sent in batches. The default value 0 indicates sending messages immediately. When the messages to be sent reach the size configured for batch.size, the request will be sent immediately, regardless of whether the time set by linger.ms has been reached.# It is recommended that users set linger.ms from 100 to 1000 based on actual scenarios. A larger value relatively increases throughput but will correspondingly increase latency.linger.ms=100# Set the amount of cached messages in the partition (bytes). When this value is reached, the producer will send batched messages to the broker. The default value is 16384. A too-small batch.size will increase the frequency of sending requests, which may degrade performance and impact stability. Users can appropriately increase this value according to the actual scenario. Note: This value is the upper limit. If the time has reached linger.ms before reaching this value, the producer will send messages.batch.size=16384# The upper size limit of the request packet that the producer can send is 1 MB by default. Note that when modifying this value, it must not exceed the upper size limit of 16 MB of the packet configured by the broker.max.request.size=1048576# Configure the compression format. Currently, compression is not allowed for versions 0.9 and earlier, and GZIP compression is not allowed for versions 0.10 and later.compression.type=[none, snappy, lz4]# The timeout for requests sent by clients to the broker should not be less than the replica.lag.time.max.ms configured for the broker, which is currently 10000 ms.request.timeout.ms=30000# The maximum number of unconfirmed requests that a client can send on each connection. When this parameter is greater than 1 and the number of retries is greater than 0, it may cause out-of-order data. When strict message ordering is required, it is recommended that clients set this value to 1.max.in.flight.requests.per.connection=5# The number of retries when a request error occurs. It is recommended to set this value to greater than 0 to ensure that the message is not lost to the maximum extent during failed retries.retries=0# The time between the failed request transmission and the next retry request.retry.backoff.ms=100
# Configure whether to synchronize the offset to the broker after consuming messages, so that the latest offset can be obtained from the broker when the consumer fails.enable.auto.commit=true# Interval for automatically committing offsets when auto.commit.enable is set to true. It is recommended to set it to at least 1000.auto.commit.interval.ms=5000# The method to initialize an offset when there is no offset on the broker (such as first consumption or the offset expires 7 days later), and reset the offset when an OFFSET_OUT_OF_RANGE error is received.# earliest: indicates automatically resetting to the smallest offset in the partition.# latest: indicates automatically resetting to the largest offset in the partition. It is the default value.# none: indicates no automatic resetting. In this case, an OffsetOutOfRangeException error is prompted.auto.offset.reset=latest# Identify the consumer group to which the consumer belongs.group.id=""# Consumer timeout when the Kafka consumer group mechanism is used. When the broker does not receive a heartbeat from the consumer within this time, the consumer is considered to have failed, and the broker restarts the rebalance process. Currently, this value must be set in the broker, with group.min.session.timeout.ms set to 6000 and group.max.session.timeout.ms set to 300000.session.timeout.ms=10000# Interval at which consumers send heartbeats when the Kafka consumer group mechanism is used. This value must be less than session.timeout.ms, typically less than one-third of that value.heartbeat.interval.ms=3000# Maximum interval allowed between successive poll calls when the Kafka consumer group mechanism is used. If the poll is not called again within this period, the consumer is considered to have failed, and the broker will restart the rebalance process to assign its partitions to other consumers.max.poll.interval.ms=300000# The minimum size of data returned by a fetch request, set to 1 B by default, indicating that the request can be returned as soon as possible. Increasing this value will increase throughput but will also increase latency.fetch.min.bytes=1# The maximum size of data returned by a fetch request, set to 50 MB by default.fetch.max.bytes=52428800# Fetch request waiting time.fetch.max.wait.ms=500# The maximum size of data returned per partition for a fetch request, which is 1 MB by default.max.partition.fetch.bytes=1048576# Number of records returned in one poll call.max.poll.records=500# Client request timeout period. If no response is received after this period, the request times out and fails.request.timeout.ms=305000
Feedback