Guest User

Untitled

a guest
Oct 27th, 2022
37
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 6.11 KB | None | 0 0
  1. # Licensed to the Apache Software Foundation (ASF) under one or more
  2. # contributor license agreements. See the NOTICE file distributed with
  3. # this work for additional information regarding copyright ownership.
  4. # The ASF licenses this file to You under the Apache License, Version 2.0
  5. # (the "License"); you may not use this file except in compliance with
  6. # the License. You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15.  
  16. #
  17. # This configuration file is intended for use in KRaft mode, where
  18. # Apache ZooKeeper is not present. See config/kraft/README.md for details.
  19. #
  20.  
  21. ############################# Server Basics #############################
  22.  
  23. # The role of this server. Setting this puts us in KRaft mode
  24. process.roles=broker,controller
  25.  
  26. # The node id associated with this instance's roles
  27. node.id=2
  28.  
  29. # The connect string for the controller quorum
  30. controller.quorum.voters=1@kafka-node-1:19092,2@kafka-node-2:19092,3@kafka-node-3:19092
  31.  
  32. ############################# Socket Server Settings #############################
  33.  
  34. # The address the socket server listens on. It will get the value returned from
  35. # java.net.InetAddress.getCanonicalHostName() if not configured.
  36. # FORMAT:
  37. # listeners = listener_name://host_name:port
  38. # EXAMPLE:
  39. # listeners = PLAINTEXT://your.host.name:9092
  40. listeners=PLAINTEXT://:9092,CONTROLLER://:19092
  41. inter.broker.listener.name=PLAINTEXT
  42.  
  43. # Hostname and port the broker will advertise to producers and consumers. If not set,
  44. # it uses the value for "listeners" if configured. Otherwise, it will use the value
  45. # returned from java.net.InetAddress.getCanonicalHostName().
  46. advertised.listeners=PLAINTEXT://localhost:9092
  47.  
  48. # Listener, host name, and port for the controller to advertise to the brokers. If
  49. # this server is a controller, this listener must be configured.
  50. controller.listener.names=CONTROLLER
  51.  
  52. # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
  53. listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
  54.  
  55. # The number of threads that the server uses for receiving requests from the network and sending responses to the network
  56. num.network.threads=3
  57.  
  58. # The number of threads that the server uses for processing requests, which may include disk I/O
  59. num.io.threads=8
  60.  
  61. # The send buffer (SO_SNDBUF) used by the socket server
  62. socket.send.buffer.bytes=102400
  63.  
  64. # The receive buffer (SO_RCVBUF) used by the socket server
  65. socket.receive.buffer.bytes=102400
  66.  
  67. # The maximum size of a request that the socket server will accept (protection against OOM)
  68. socket.request.max.bytes=104857600
  69.  
  70.  
  71. ############################# Log Basics #############################
  72.  
  73. # A comma separated list of directories under which to store log files
  74. log.dirs=/tmp/kraft-combined-logs
  75.  
  76. # The default number of log partitions per topic. More partitions allow greater
  77. # parallelism for consumption, but this will also result in more files across
  78. # the brokers.
  79. num.partitions=1
  80.  
  81. # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
  82. # This value is recommended to be increased for installations with data dirs located in RAID array.
  83. num.recovery.threads.per.data.dir=1
  84.  
  85. ############################# Internal Topic Settings #############################
  86. # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
  87. # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
  88. offsets.topic.replication.factor=1
  89. transaction.state.log.replication.factor=1
  90. transaction.state.log.min.isr=1
  91.  
  92. ############################# Log Flush Policy #############################
  93.  
  94. # Messages are immediately written to the filesystem but by default we only fsync() to sync
  95. # the OS cache lazily. The following configurations control the flush of data to disk.
  96. # There are a few important trade-offs here:
  97. # 1. Durability: Unflushed data may be lost if you are not using replication.
  98. # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
  99. # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
  100. # The settings below allow one to configure the flush policy to flush data after a period of time or
  101. # every N messages (or both). This can be done globally and overridden on a per-topic basis.
  102.  
  103. # The number of messages to accept before forcing a flush of data to disk
  104. #log.flush.interval.messages=10000
  105.  
  106. # The maximum amount of time a message can sit in a log before we force a flush
  107. #log.flush.interval.ms=1000
  108.  
  109. ############################# Log Retention Policy #############################
  110.  
  111. # The following configurations control the disposal of log segments. The policy can
  112. # be set to delete segments after a period of time, or after a given size has accumulated.
  113. # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
  114. # from the end of the log.
  115.  
  116. # The minimum age of a log file to be eligible for deletion due to age
  117. log.retention.hours=168
  118.  
  119. # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
  120. # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
  121. #log.retention.bytes=1073741824
  122.  
  123. # The maximum size of a log segment file. When this size is reached a new log segment will be created.
  124. log.segment.bytes=1073741824
  125.  
  126. # The interval at which log segments are checked to see if they can be deleted according
  127. # to the retention policies
  128. log.retention.check.interval.ms=300000
  129.  
Add Comment
Please, Sign In to add comment