Advertisement
Guest User

Untitled

a guest
Jun 11th, 2021
240
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 41.57 KB | None | 0 0
  1. ## Global Docker image parameters
  2. ## Please, note that this will override the image parameters, including dependencies, configured to use the global value
  3. ## Current available global Docker image parameters: imageRegistry and imagePullSecrets
  4. ##
  5. # global:
  6. # imageRegistry: myRegistryName
  7. # imagePullSecrets:
  8. # - myRegistryKeySecretName
  9. # storageClass: myStorageClass
  10.  
  11. ## Bitnami Kafka image version
  12. ## ref: https://hub.docker.com/r/bitnami/kafka/tags/
  13. ##
  14. image:
  15. registry: docker.io
  16. repository: bitnami/kafka
  17. tag: 2.8.0-debian-10-r30
  18. ## Specify a imagePullPolicy
  19. ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
  20. ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
  21. ##
  22. pullPolicy: IfNotPresent
  23. ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
  24. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
  25. ## Example:
  26. ## pullSecrets:
  27. ## - myRegistryKeySecretName
  28. ##
  29. pullSecrets: []
  30.  
  31. ## Set to true if you would like to see extra information on logs
  32. ##
  33. debug: false
  34.  
  35. ## String to partially override kafka.fullname template (will maintain the release name)
  36. ##
  37. # nameOverride:
  38.  
  39. ## String to fully override kafka.fullname template
  40. ##
  41. # fullnameOverride:
  42.  
  43. ## Deployment pod host aliases
  44. ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
  45. ##
  46. hostAliases: []
  47.  
  48. ## Use an alternate scheduler, e.g. "stork".
  49. ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
  50. ##
  51. # schedulerName:
  52.  
  53. ## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel
  54. ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy
  55. ##
  56. podManagementPolicy: Parallel
  57.  
  58. ## Kubernetes Cluster Domain
  59. ##
  60. clusterDomain: cluster.local
  61.  
  62. ## Add labels to all the deployed resources
  63. ##
  64. commonLabels: {}
  65.  
  66. ## Add annotations to all the deployed resources
  67. ##
  68. commonAnnotations: {}
  69.  
  70. ## Kafka Configuration
  71. ## Specify content for server.properties
  72. ## NOTE: This will override any KAFKA_CFG_ environment variables (including those set by the chart)
  73. ## The server.properties is auto-generated based on other parameters when this parameter is not specified
  74. ##
  75. ## Example:
  76. config: |-
  77. ## broker.id=-1
  78. ## listeners=PLAINTEXT://:9092
  79. ## advertised.listeners=PLAINTEXT://KAFKA_IP:9092
  80. ## num.network.threads=3
  81. ## num.io.threads=8
  82. ## socket.send.buffer.bytes=102400
  83. ## socket.receive.buffer.bytes=102400
  84. ## socket.request.max.bytes=104857600
  85. ## log.dirs=/bitnami/kafka/data
  86. ## num.partitions=1
  87. ## num.recovery.threads.per.data.dir=1
  88. ## offsets.topic.replication.factor=1
  89. ## transaction.state.log.replication.factor=1
  90. ## transaction.state.log.min.isr=1
  91. ## log.flush.interval.messages=10000
  92. ## log.flush.interval.ms=1000
  93. ## log.retention.hours=168
  94. ## log.retention.bytes=1073741824
  95. ## log.segment.bytes=1073741824
  96. ## log.retention.check.interval.ms=300000
  97. ## zookeeper.connect=ZOOKEEPER_SERVICE_NAME
  98. ## zookeeper.connection.timeout.ms=6000
  99. ## group.initial.rebalance.delay.ms=0
  100. ##
  101. # config:
  102.  
  103. ## ConfigMap with Kafka Configuration
  104. ## NOTE: This will override config AND any KAFKA_CFG_ environment variables.
  105. ##
  106. # existingConfigmap:
  107.  
  108. ## Kafka Log4J Configuration
  109. ## An optional log4j.properties file to overwrite the default of the Kafka brokers.
  110. ## See an example log4j.properties at:
  111. ## https://github.com/apache/kafka/blob/trunk/config/log4j.properties
  112. ##
  113. # log4j:
  114.  
  115. ## Kafka Log4j ConfigMap
  116. ## The name of an existing ConfigMap containing a log4j.properties file.
  117. ## NOTE: this will override log4j.
  118. ##
  119. # existingLog4jConfigMap:
  120.  
  121. ## Kafka's Java Heap size
  122. ##
  123. heapOpts: -Xmx1024m -Xms1024m
  124.  
  125. ## Switch to enable topic deletion or not.
  126. ##
  127. deleteTopicEnable: false
  128.  
  129. ## Switch to enable auto creation of topics.
  130. ## Enabling auto creation of topics not recommended for production or similar environments.
  131. ##
  132. autoCreateTopicsEnable: true
  133.  
  134. ## The number of messages to accept before forcing a flush of data to disk.
  135. ##
  136. logFlushIntervalMessages: _10000
  137.  
  138. ## The maximum amount of time a message can sit in a log before we force a flush.
  139. ##
  140. logFlushIntervalMs: 1000
  141.  
  142. ## A size-based retention policy for logs.
  143. ##
  144. logRetentionBytes: _1073741824
  145.  
  146. ## The interval at which log segments are checked to see if they can be deleted.
  147. ##
  148. logRetentionCheckIntervalMs: 300000
  149.  
  150. ## The minimum age of a log file to be eligible for deletion due to age.
  151. ##
  152. logRetentionHours: 168
  153.  
  154. ## The maximum size of a log segment file. When this size is reached a new log segment will be created.
  155. ##
  156. logSegmentBytes: _1073741824
  157.  
  158. ## A comma separated list of directories under which to store log files.
  159. ##
  160. logsDirs: /bitnami/kafka/data
  161.  
  162. ## The largest record batch size allowed by Kafka
  163. ##
  164. maxMessageBytes: _1000012
  165.  
  166. ## Default replication factors for automatically created topics
  167. ##
  168. defaultReplicationFactor: 1
  169.  
  170. ## The replication factor for the offsets topic
  171. ##
  172. offsetsTopicReplicationFactor: 1
  173.  
  174. ## The replication factor for the transaction topic
  175. ##
  176. transactionStateLogReplicationFactor: 1
  177.  
  178. ## Overridden min.insync.replicas config for the transaction topic
  179. ##
  180. transactionStateLogMinIsr: 1
  181.  
  182. ## The number of threads doing disk I/O.
  183. ##
  184. numIoThreads: 8
  185.  
  186. ## The number of threads handling network requests.
  187. ##
  188. numNetworkThreads: 3
  189.  
  190. ## The default number of log partitions per topic.
  191. ##
  192. numPartitions: 1
  193.  
  194. ## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
  195. ##
  196. numRecoveryThreadsPerDataDir: 1
  197.  
  198. ## The receive buffer (SO_RCVBUF) used by the socket server.
  199. ##
  200. socketReceiveBufferBytes: 102400
  201.  
  202. ## The maximum size of a request that the socket server will accept (protection against OOM).
  203. ##
  204. socketRequestMaxBytes: _104857600
  205.  
  206. ## The send buffer (SO_SNDBUF) used by the socket server.
  207. ##
  208. socketSendBufferBytes: 102400
  209.  
  210. ## Timeout in ms for connecting to zookeeper.
  211. ##
  212. zookeeperConnectionTimeoutMs: 6000
  213.  
  214. ## Command and args for running the container. Use array form
  215. ##
  216. command:
  217. - /scripts/setup.sh
  218. args: []
  219.  
  220. ## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY}
  221. ## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration
  222. ## Example:
  223. ## extraEnvVars:
  224. ## - name: KAFKA_CFG_BACKGROUND_THREADS
  225. ## value: "10"
  226. ##
  227. extraEnvVars: []
  228.  
  229. ## extraVolumes and extraVolumeMounts allows you to mount other volumes
  230. ## Examples:
  231. # extraVolumes:
  232. # - name: kafka-jaas
  233. # secret:
  234. # secretName: kafka-jaas
  235. # extraVolumeMounts:
  236. # - name: kafka-jaas
  237. # mountPath: /bitnami/kafka/config/kafka_jaas.conf
  238. # subPath: kafka_jaas.conf
  239. extraVolumes: []
  240. extraVolumeMounts: []
  241.  
  242. ## Extra objects to deploy (value evaluated as a template)
  243. ##
  244. extraDeploy: []
  245.  
  246. ## Authentication parameteres
  247. ## https://github.com/bitnami/bitnami-docker-kafka#security
  248. ##
  249. auth:
  250. ## Authentication protocol for client and inter-broker communications
  251. ## Supported values: 'plaintext', 'tls', 'mtls', 'sasl' and 'sasl_tls'
  252. ## This table shows the security provided on each protocol:
  253. ## | Method | Authentication | Encryption via TLS |
  254. ## | plaintext | None | No |
  255. ## | tls | None | Yes |
  256. ## | mtls | Yes (two-way authentication) | Yes |
  257. ## | sasl | Yes (via SASL) | No |
  258. ## | sasl_tls | Yes (via SASL) | Yes |
  259. ##
  260. clientProtocol: plaintext
  261. interBrokerProtocol: plaintext
  262.  
  263. ## SASL configuration
  264. ##
  265. sasl:
  266. ## Comma separated list of allowed SASL mechanisms.
  267. ## Note: ignored unless `auth.clientProtocol` or `auth.interBrokerProtocol` are using either `sasl` or `sasl_tls`
  268. ##
  269. mechanisms: plain,scram-sha-256,scram-sha-512
  270. ## SASL mechanism for inter broker communication.
  271. ##
  272. interBrokerMechanism: plain
  273. ## JAAS configuration for SASL authentication.
  274. ##
  275. jaas:
  276. ## Kafka client user list
  277. ##
  278. ## clientUsers:
  279. ## - user1
  280. ## - user2
  281. ##
  282. clientUsers:
  283. - user
  284. ## Kafka client passwords. This is mandatory if more than one user is specified in clientUsers.
  285. ##
  286. ## clientPasswords:
  287. ## - password1
  288. ## - password2"
  289. ##
  290. clientPasswords: []
  291. ## Kafka inter broker communication user
  292. ##
  293. interBrokerUser: admin
  294. ## Kafka inter broker communication password
  295. ##
  296. interBrokerPassword: ""
  297. ## Kafka Zookeeper user
  298. ##
  299. zookeeperUser: ""
  300. ## Kafka Zookeeper password
  301. ##
  302. zookeeperPassword: ""
  303. ## Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser.
  304. ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create:
  305. ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD
  306. ##
  307. existingSecret: ""
  308.  
  309. ## DEPRECATED: use `auth.sasl.mechanisms` instead.
  310. saslMechanisms: plain,scram-sha-256,scram-sha-512
  311. ## DEPRECATED: use `auth.sasl.interBrokerMechanism` instead.
  312. saslInterBrokerMechanism: plain
  313. ## DEPRECATED: use `auth.sasl.jaas` instead.
  314. jaas:
  315. clientUsers:
  316. - user
  317. clientPasswords: []
  318. interBrokerUser: admin
  319. interBrokerPassword: ""
  320. zookeeperUser: ""
  321. zookeeperPassword: ""
  322. existingSecret: ""
  323.  
  324. ## TLS configuration
  325. ##
  326. tls:
  327. ## Format to use for TLS certificates
  328. ## Supported values: 'jks' and 'pem'
  329. ##
  330. type: jks
  331. ## Name of an existing secret containing the TLS certificates
  332. ##
  333. ## When using 'jks' format for certificates, the secret should contain:
  334. ## - A truststore
  335. ## - One keystore per Kafka broker you have in the cluster
  336. ## Create this secret following the steps below:
  337. ## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh
  338. ## 2) Rename your truststore to `kafka.truststore.jks`.
  339. ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker.
  340. ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create:
  341. ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ...
  342. ##
  343. ## When using 'pem' format for certificates, the secret should contain:
  344. ## - A public CA certificate
  345. ## - One public certificate and one private key per Kafka broker you have in the cluster
  346. ## Create this secret following the steps below:
  347. ## 1) Create a certificate key and signing request per Kafka broker, and sign the signing request with your CA
  348. ## 2) Rename your CA file to `kafka.truststore.pem`.
  349. ## 3) Rename your certificates to `kafka-X.keystore.pem` where X is the ID of each Kafka broker.
  350. ## 3) Rename your keys to `kafka-X.keystore.key` where X is the ID of each Kafka broker.
  351. ## 5) Run the command below where SECRET_NAME is the name of the secret you want to create:
  352. ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.pem --from-file=./kafka-0.keystore.pem --from-file=./kafka-0.keystore.key --from-file=./kafka-1.keystore.pem --from-file=./kafka-1.keystore.key ...
  353. ##
  354. existingSecret: ""
  355. ## Create self-signed TLS certificates. Currently only supported for 'pem' format.
  356. ## Note: ignored when using 'jks' format or `auth.tls.existingSecret` is not empty
  357. ##
  358. autoGenerated: false
  359. ## Password to access the JKS files or PEM key when they are password-protected.
  360. ##
  361. password: ""
  362. ## Name of an existing secret containing your JKS truststore if the JKS truststore doesn't exist
  363. ## or is different from the one in the `auth.tls.existingSecret`.
  364. ## Note: ignored when using 'pem' format for certificates .
  365. ##
  366. jksTruststoreSecret: ""
  367. ## The secret key from the `auth.tls.existingSecret` containing the keystore with a SAN certificate.
  368. ## The SAN certificate in it should be issued with Subject Alternative Names for all headless services:
  369. ## - kafka-0.kafka-headless.kafka.svc.cluster.local
  370. ## - kafka-1.kafka-headless.kafka.svc.cluster.local
  371. ## - kafka-2.kafka-headless.kafka.svc.cluster.local
  372. ## Note: ignored when using 'pem' format for certificates.
  373. ##
  374. jksKeystoreSAN: ""
  375. ## The secret key from the `auth.tls.existingSecret` or `auth.tls.jksTruststoreSecret` containing the truststore.
  376. ## Note: ignored when using 'pem' format for certificates.
  377. ##
  378. jksTruststore: ""
  379. ## The endpoint identification algorithm used by clients to validate server host name.
  380. ## Disable server host name verification by setting it to an empty string.
  381. ## ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings
  382. ##
  383. endpointIdentificationAlgorithm: https
  384.  
  385. ## DEPRECATED: use `auth.tls.existingSecret` instead.
  386. jksSecret: ""
  387. ## DEPRECATED: use `auth.tls.jksTruststoreSecret` instead.
  388. jksTruststoreSecret: ""
  389. ## DEPRECATED: use `auth.tls.jksKeystoreSAN` instead.
  390. jksKeystoreSAN: ""
  391. ## DEPRECATED: use `auth.tls.jksTruststore` instead.
  392. jksTruststore: ""
  393. ## DEPRECATED: use `auth.tls.password` instead.
  394. jksPassword: ""
  395. ## DEPRECATED: use `auth.tls.endpointIdentificationAlgorithm` instead.
  396. tlsEndpointIdentificationAlgorithm: https
  397.  
  398. ## The address(es) the socket server listens on.
  399. ## When it's set to an empty array, the listeners will be configured
  400. ## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters)
  401. ##
  402. listeners: []
  403.  
  404. ## The address(es) (hostname:port) the brokers will advertise to producers and consumers.
  405. ## When it's set to an empty array, the advertised listeners will be configured
  406. ## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters)
  407. ##
  408. advertisedListeners: []
  409.  
  410. ## The listener->protocol mapping
  411. ## When it's nil, the listeners will be configured
  412. ## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters)
  413. ##
  414. # listenerSecurityProtocolMap:
  415.  
  416. ## Allow to use the PLAINTEXT listener.
  417. ##
  418. allowPlaintextListener: true
  419.  
  420. ## Name of listener used for communication between brokers.
  421. ##
  422. interBrokerListenerName: INTERNAL
  423.  
  424. ## Number of Kafka brokers to deploy
  425. ##
  426. replicaCount: 1
  427.  
  428. ## Minimal broker.id value
  429. ## Brokers increment their ID starting at this minimal value.
  430. ## E.g., with `minBrokerId=100` and 3 nodes, IDs will be 100, 101, 102 for brokers 0, 1, and 2, respectively.
  431. ##
  432. minBrokerId: 0
  433.  
  434. ## StrategyType, can be set to RollingUpdate or OnDelete by default.
  435. ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
  436. ##
  437. updateStrategy: RollingUpdate
  438.  
  439. ## Partition update strategy
  440. ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
  441. ##
  442. # rollingUpdatePartition:
  443.  
  444. ## Pod labels. Evaluated as a template
  445. ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
  446. ##
  447. podLabels: {}
  448.  
  449. ## Pod annotations. Evaluated as a template
  450. ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
  451. ##
  452. podAnnotations: {}
  453.  
  454. ## Name of the priority class to be used by kafka pods, priority class needs to be created beforehand
  455. ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
  456. ##
  457. priorityClassName: ""
  458.  
  459. ## Pod affinity preset
  460. ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
  461. ## Allowed values: soft, hard
  462. ##
  463. podAffinityPreset: ""
  464.  
  465. ## Pod anti-affinity preset
  466. ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
  467. ## Allowed values: soft, hard
  468. ##
  469. podAntiAffinityPreset: soft
  470.  
  471. ## Node affinity preset
  472. ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
  473. ## Allowed values: soft, hard
  474. ##
  475. nodeAffinityPreset:
  476. ## Node affinity type
  477. ## Allowed values: soft, hard
  478. ##
  479. type: ""
  480. ## Node label key to match
  481. ## E.g.
  482. ## key: "kubernetes.io/e2e-az-name"
  483. ##
  484. key: ""
  485. ## Node label values to match
  486. ## E.g.
  487. ## values:
  488. ## - e2e-az1
  489. ## - e2e-az2
  490. ##
  491. values: []
  492.  
  493. ## Affinity for pod assignment
  494. ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
  495. ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
  496. ##
  497. affinity: {}
  498.  
  499. ## Node labels for pod assignment
  500. ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
  501. ##
  502. nodeSelector: {}
  503.  
  504. ## Tolerations for pod assignment
  505. ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
  506. ##
  507. tolerations: []
  508.  
  509. ## Configure the grace time period for sig term
  510. ## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution
  511. ##
  512. # terminationGracePeriodSeconds: 30
  513.  
  514. ## Kafka pods' Security Context
  515. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
  516. ##
  517. podSecurityContext:
  518. enabled: true
  519. fsGroup: 1001
  520. runAsUser: 1001
  521.  
  522. ## Kafka containers' Security Context
  523. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
  524. ## Example:
  525. ## containerSecurityContext:
  526. ## capabilities:
  527. ## drop: ["NET_RAW"]
  528. ## readOnlyRootFilesystem: true
  529. ##
  530. containerSecurityContext: {}
  531.  
  532. ## Kafka containers' resource requests and limits
  533. ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
  534. ##
  535. resources:
  536. # We usually recommend not to specify default resources and to leave this as a conscious
  537. # choice for the user. This also increases chances charts run on environments with little
  538. # resources, such as Minikube. If you do want to specify resources, uncomment the following
  539. # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
  540. limits: {}
  541. # cpu: 250m
  542. # memory: 1Gi
  543. requests: {}
  544. # cpu: 250m
  545. # memory: 256Mi
  546.  
  547. ## Kafka containers' liveness and readiness probes. Evaluated as a template.
  548. ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
  549. ##
  550. livenessProbe:
  551. enabled: true
  552. initialDelaySeconds: 10
  553. timeoutSeconds: 5
  554. # failureThreshold: 3
  555. # periodSeconds: 10
  556. # successThreshold: 1
  557. readinessProbe:
  558. enabled: true
  559. initialDelaySeconds: 5
  560. failureThreshold: 6
  561. timeoutSeconds: 5
  562. # periodSeconds: 10
  563. # successThreshold: 1
  564.  
  565. ## Custom liveness/readiness probes that will override the default ones
  566. ##
  567. customLivenessProbe: {}
  568. customReadinessProbe: {}
  569.  
  570. ## Pod Disruption Budget configuration
  571. ## The PDB will only be created if replicaCount is greater than 1
  572. ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions
  573. ##
  574. pdb:
  575. create: false
  576. ## Min number of pods that must still be available after the eviction
  577. ##
  578. # minAvailable: 1
  579. ## Max number of pods that can be unavailable after the eviction
  580. ##
  581. maxUnavailable: 1
  582.  
  583. ## Add sidecars to the pod.
  584. ## Example:
  585. ## sidecars:
  586. ## - name: your-image-name
  587. ## image: your-image
  588. ## imagePullPolicy: Always
  589. ## ports:
  590. ## - name: portname
  591. ## containerPort: 1234
  592. ##
  593. sidecars: {}
  594.  
  595. ## Service parameters
  596. ##
  597. service:
  598. ## Service type
  599. ##
  600. type: ClusterIP
  601. ## Kafka port for client connections
  602. ##
  603. port: 9092
  604. ## Kafka port for inter-broker connections
  605. ##
  606. internalPort: 9093
  607. ## Kafka port for external connections
  608. ##
  609. externalPort: 9094
  610. ## Specify the nodePort value for the LoadBalancer and NodePort service types.
  611. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
  612. ##
  613. nodePorts:
  614. client: ""
  615. external: ""
  616. ## Set the LoadBalancer service type to internal only.
  617. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
  618. ##
  619. # loadBalancerIP:
  620. ## Load Balancer sources
  621. ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
  622. ## Example:
  623. ## loadBalancerSourceRanges:
  624. ## - 10.10.10.0/24
  625. ##
  626. loadBalancerSourceRanges: []
  627. ## Provide any additional annotations which may be required. Evaluated as a template
  628. ##
  629. annotations: {}
  630.  
  631. ## External Access to Kafka brokers configuration
  632. ##
  633. externalAccess:
  634. ## Enable Kubernetes external cluster access to Kafka brokers
  635. ##
  636. enabled: true
  637.  
  638. ## External IPs auto-discovery configuration
  639. ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API
  640. ## Note: RBAC might be required
  641. ##
  642. autoDiscovery:
  643. ## Enable external IP/ports auto-discovery
  644. ##
  645. enabled: true
  646. ## Bitnami Kubectl image
  647. ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/
  648. ##
  649. image:
  650. registry: docker.io
  651. repository: bitnami/kubectl
  652. tag: 1.19.11-debian-10-r14
  653. ## Specify a imagePullPolicy
  654. ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
  655. ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
  656. ##
  657. pullPolicy: IfNotPresent
  658. ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
  659. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
  660. ## Example:
  661. ## pullSecrets:
  662. ## - myRegistryKeySecretName
  663. ##
  664. pullSecrets: []
  665. ## Init Container resource requests and limits
  666. ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
  667. ##
  668. resources:
  669. # We usually recommend not to specify default resources and to leave this as a conscious
  670. # choice for the user. This also increases chances charts run on environments with little
  671. # resources, such as Minikube. If you do want to specify resources, uncomment the following
  672. # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
  673. limits: {}
  674. # cpu: 100m
  675. # memory: 128Mi
  676. requests: {}
  677. # cpu: 100m
  678. # memory: 128Mi
  679.  
  680. ## Parameters to configure K8s service(s) used to externally access Kafka brokers
  681. ## A new service per broker will be created
  682. ##
  683. service:
  684. ## Service type. Allowed values: LoadBalancer or NodePort
  685. ##
  686. type: NodePort
  687. # type: LoadBalancer
  688. ## Port used when service type is LoadBalancer
  689. ##
  690. port: 9094
  691. ## Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount
  692. ## Example:
  693. ## loadBalancerIPs:
  694. ## - X.X.X.X
  695. ## - Y.Y.Y.Y
  696. ##
  697. # loadBalancerIPs: []
  698. ## Load Balancer sources
  699. ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
  700. ## Example:
  701. ## loadBalancerSourceRanges:
  702. ## - 10.10.10.0/24
  703. ##
  704. # loadBalancerSourceRanges: []
  705. ## Array of node ports used for each Kafka broker. Length must be the same as replicaCount
  706. ## Example:
  707. ## nodePorts:
  708. ## - 30001
  709. ## - 30002
  710. ##
  711. nodePorts: []
  712. ## Use worker host ips
  713. useHostIPs: false
  714. ## When service type is NodePort, you can specify the domain used for Kafka advertised listeners.
  715. ## If not specified, the container will try to get the kubernetes node external IP
  716. ##
  717. # domain: mydomain.com
  718. ## Provide any additional annotations which may be required. Evaluated as a template
  719. ##
  720. annotations: {}
  721.  
  722. ## Persistence parameters
  723. ##
  724. persistence:
  725. enabled: true
  726. ## A manually managed Persistent Volume and Claim
  727. ## If defined, PVC must be created manually before volume will be bound
  728. ## The value is evaluated as a template
  729. ##
  730. # existingClaim:
  731. ## PV Storage Class
  732. ## If defined, storageClassName: <storageClass>
  733. ## If set to "-", storageClassName: "", which disables dynamic provisioning
  734. ## If undefined (the default) or set to null, no storageClassName spec is
  735. ## set, choosing the default provisioner.
  736. ##
  737. # storageClass: "-"
  738. ## PV Access Mode
  739. ##
  740. accessModes:
  741. - ReadWriteOnce
  742. ## PVC size
  743. ##
  744. size: 8Gi
  745. ## PVC annotations
  746. ##
  747. annotations: {}
  748. ## selector can be used to match an existing PersistentVolume
  749. ## selector:
  750. ## matchLabels:
  751. ## app: my-app
  752. selector: {}
  753. ## Mount point for persistence
  754. ##
  755. mountPath: /bitnami/kafka
  756.  
  757. ## Log Persistence parameters
  758. ##
  759. logPersistence:
  760. enabled: false
  761. ## A manually managed Persistent Volume and Claim
  762. ## If defined, PVC must be created manually before volume will be bound
  763. ## The value is evaluated as a template
  764. ##
  765. # existingClaim:
  766. ## PV Storage Class
  767. ## If defined, storageClassName: <storageClass>
  768. ## If set to "-", storageClassName: "", which disables dynamic provisioning
  769. ## If undefined (the default) or set to null, no storageClassName spec is
  770. ## set, choosing the default provisioner.
  771. # existingLogClaim:
  772. ## PV Storage Class
  773. ## It getted from persistence.storageClass
  774. ##
  775. ## PV Access Mode
  776. ##
  777. accessModes:
  778. - ReadWriteOnce
  779. ## PVC size
  780. ##
  781. size: 8Gi
  782. ## PVC annotations
  783. ##
  784. annotations: {}
  785. ## selector can be used to match an existing PersistentVolume
  786. ## selector:
  787. ## matchLabels:
  788. ## app: my-app
  789. selector: {}
  790. ## Mount path for persistent logs
  791. ##
  792. mountPath: /opt/bitnami/kafka/logs
  793.  
  794. ## Init Container parameters
  795. ## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component
  796. ## values from the securityContext section of the component
  797. ##
  798. volumePermissions:
  799. enabled: false
  800. ## The security context for the volumePermissions init container
  801. ##
  802. securityContext:
  803. runAsUser: 0
  804. image:
  805. registry: docker.io
  806. repository: bitnami/bitnami-shell
  807. tag: 10-debian-10-r98
  808. ## Specify a imagePullPolicy
  809. ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
  810. ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
  811. ##
  812. pullPolicy: Always
  813. ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
  814. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
  815. ## Example:
  816. ## pullSecrets:
  817. ## - myRegistryKeySecretName
  818. ##
  819. pullSecrets: []
  820. ## Init Container resource requests and limits
  821. ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
  822. ##
  823. resources:
  824. # We usually recommend not to specify default resources and to leave this as a conscious
  825. # choice for the user. This also increases chances charts run on environments with little
  826. # resources, such as Minikube. If you do want to specify resources, uncomment the following
  827. # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
  828. limits: {}
  829. # cpu: 100m
  830. # memory: 128Mi
  831. requests: {}
  832. # cpu: 100m
  833. # memory: 128Mi
  834.  
  835. ## Kafka pods ServiceAccount
  836. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
  837. ##
  838. serviceAccount:
  839. ## Specifies whether a ServiceAccount should be created
  840. ##
  841. create: true
  842. ## The name of the ServiceAccount to use.
  843. ## If not set and create is true, a name is generated using the kafka.serviceAccountName template
  844. ##
  845. # name:
  846. # Allows auto mount of ServiceAccountToken on the serviceAccount created
  847. # Can be set to false if pods using this serviceAccount do not need to use K8s API
  848. automountServiceAccountToken: true
  849.  
  850. ## Role Based Access
  851. ## ref: https://kubernetes.io/docs/admin/authorization/rbac/
  852. ##
  853. rbac:
  854. ## Specifies whether RBAC rules should be created
  855. ## binding Kafka ServiceAccount to a role
  856. ## that allows Kafka pods querying the K8s API
  857. ##
  858. create: true
  859.  
  860. ## Kafka provisioning
  861. ##
  862. provisioning:
  863. enabled: false
  864.  
  865. image:
  866. registry: docker.io
  867. repository: bitnami/kafka
  868. tag: 2.8.0-debian-10-r29
  869. ## Specify a imagePullPolicy
  870. ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
  871. ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
  872. ##
  873. pullPolicy: IfNotPresent
  874. ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
  875. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
  876. ## Example:
  877. ## pullSecrets:
  878. ## - myRegistryKeySecretName
  879. ##
  880. pullSecrets: []
  881.  
  882. ## Set to true if you would like to see extra information on logs
  883. ##
  884. debug: false
  885.  
  886. # provisioning.numPartitions: Number of partitions for the topic when it does not specify.
  887. numPartitions: 1
  888.  
  889. # provisioning.replicationFactor: Replication factor for the topic when it does not specify.
  890. replicationFactor: 1
  891.  
  892. ## Use an alternate scheduler, e.g. "stork".
  893. ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
  894. ##
  895. # schedulerName:
  896.  
  897. podAnnotations: {}
  898.  
  899. resources:
  900. # We usually recommend not to specify default resources and to leave this as a conscious
  901. # choice for the user. This also increases chances charts run on environments with little
  902. # resources, such as Minikube. If you do want to specify resources, uncomment the following
  903. # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
  904. limits: {}
  905. # cpu: 250m
  906. # memory: 1Gi
  907. requests: {}
  908. # cpu: 250m
  909. # memory: 256Mi
  910.  
  911. ## Command and args for running the container (set to default if not set). Use array form
  912. ##
  913. command: []
  914. args: []
  915.  
  916. topics: []
  917. # - name: topic-name
  918. # partitions: 1
  919. # replicationFactor: 1
  920. # # https://kafka.apache.org/documentation/#topicconfigs
  921. # config:
  922. # max.message.bytes: 64000
  923. # flush.messages: 1
  924.  
  925. ## Prometheus Exporters / Metrics
  926. ##
  927. metrics:
  928. ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter
  929. ##
  930. kafka:
  931. enabled: false
  932.  
  933. ## Bitnami Kafka exporter image
  934. ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/
  935. ##
  936. image:
  937. registry: docker.io
  938. repository: bitnami/kafka-exporter
  939. tag: 1.3.1-debian-10-r14
  940. ## Specify a imagePullPolicy
  941. ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
  942. ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
  943. ##
  944. pullPolicy: IfNotPresent
  945. ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
  946. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
  947. ## Example:
  948. ## pullSecrets:
  949. ## - myRegistryKeySecretName
  950. ##
  951. pullSecrets: []
  952.  
  953. ## Use an alternate scheduler, e.g. "stork".
  954. ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
  955. ##
  956. # schedulerName:
  957.  
  958. ## Extra flags to be passed to Kafka exporter
  959. ## Example:
  960. ## extraFlags:
  961. ## tls.insecure-skip-tls-verify: ""
  962. ## web.telemetry-path: "/metrics"
  963. ##
  964. extraFlags: {}
  965.  
  966. ## Name of the existing secret containing the optional certificate and key files
  967. ## for Kafka Exporter client authentication
  968. ##
  969. # certificatesSecret:
  970.  
  971. ## The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file)
  972. ##
  973. tlsCert: cert-file
  974.  
  975. ## The secret key from the certificatesSecret if 'client-key' key different from the default (key-file)
  976. ##
  977. tlsKey: key-file
  978.  
  979. ## Name of the existing secret containing the optional ca certificate
  980. ## for Kafka Exporter client authentication
  981. ##
  982. # tlsCaSecret:
  983.  
  984. ## The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file)
  985. ##
  986. tlsCaCert: ca-file
  987.  
  988. ## Prometheus Kafka Exporter' resource requests and limits
  989. ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
  990. ##
  991. resources:
  992. # We usually recommend not to specify default resources and to leave this as a conscious
  993. # choice for the user. This also increases chances charts run on environments with little
  994. # resources, such as Minikube. If you do want to specify resources, uncomment the following
  995. # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
  996. limits: {}
  997. # cpu: 100m
  998. # memory: 128Mi
  999. requests: {}
  1000. # cpu: 100m
  1001. # memory: 128Mi
  1002.  
  1003. ## Affinity for pod assignment
  1004. ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
  1005. ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
  1006. ##
  1007. affinity: {}
  1008.  
  1009. ## Node labels for pod assignment
  1010. ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
  1011. ##
  1012. nodeSelector: {}
  1013.  
  1014. ## Tolerations for pod assignment
  1015. ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
  1016. ##
  1017. tolerations: []
  1018.  
  1019. ## Add init containers to the Kafka exporter pods.
  1020. ## Example:
  1021. ## initContainers:
  1022. ## - name: your-image-name
  1023. ## image: your-image
  1024. ## imagePullPolicy: Always
  1025. ## ports:
  1026. ## - name: portname
  1027. ## containerPort: 1234
  1028. ##
  1029. initContainers: {}
  1030.  
  1031. ## Service configuration
  1032. ##
  1033. service:
  1034. ## Kafka Exporter Service type
  1035. ##
  1036. type: ClusterIP
  1037. ## Kafka Exporter Prometheus port
  1038. ##
  1039. port: 9308
  1040. ## Specify the nodePort value for the LoadBalancer and NodePort service types.
  1041. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
  1042. ##
  1043. nodePort: ""
  1044. ## Set the LoadBalancer service type to internal only.
  1045. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
  1046. ##
  1047. # loadBalancerIP:
  1048. ## Load Balancer sources
  1049. ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
  1050. ## Example:
  1051. ## loadBalancerSourceRanges:
  1052. ## - 10.10.10.0/24
  1053. ##
  1054. loadBalancerSourceRanges: []
  1055. ## Set the Cluster IP to use
  1056. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
  1057. ##
  1058. # clusterIP: None
  1059. ## Annotations for the Kafka Exporter Prometheus metrics service
  1060. ##
  1061. annotations:
  1062. prometheus.io/scrape: "true"
  1063. prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}"
  1064. prometheus.io/path: "/metrics"
  1065.  
  1066. ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics
  1067. ##
  1068. jmx:
  1069. enabled: false
  1070.  
  1071. ## Bitnami JMX exporter image
  1072. ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/
  1073. ##
  1074. image:
  1075. registry: docker.io
  1076. repository: bitnami/jmx-exporter
  1077. tag: 0.15.0-debian-10-r121
  1078. ## Specify a imagePullPolicy
  1079. ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
  1080. ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
  1081. ##
  1082. pullPolicy: IfNotPresent
  1083. ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
  1084. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
  1085. ## Example:
  1086. ## pullSecrets:
  1087. ## - myRegistryKeySecretName
  1088. ##
  1089. pullSecrets: []
  1090.  
  1091. ## Prometheus JMX Exporter' resource requests and limits
  1092. ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
  1093. ##
  1094. resources:
  1095. # We usually recommend not to specify default resources and to leave this as a conscious
  1096. # choice for the user. This also increases chances charts run on environments with little
  1097. # resources, such as Minikube. If you do want to specify resources, uncomment the following
  1098. # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
  1099. limits: {}
  1100. # cpu: 100m
  1101. # memory: 128Mi
  1102. requests: {}
  1103. # cpu: 100m
  1104. # memory: 128Mi
  1105.  
  1106. ## Service configuration
  1107. ##
  1108. service:
  1109. ## JMX Exporter Service type
  1110. ##
  1111. type: ClusterIP
  1112. ## JMX Exporter Prometheus port
  1113. ##
  1114. port: 5556
  1115. ## Specify the nodePort value for the LoadBalancer and NodePort service types.
  1116. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
  1117. ##
  1118. nodePort: ""
  1119. ## Set the LoadBalancer service type to internal only.
  1120. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
  1121. ##
  1122. # loadBalancerIP:
  1123. ## Load Balancer sources
  1124. ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
  1125. ## Example:
  1126. ## loadBalancerSourceRanges:
  1127. ## - 10.10.10.0/24
  1128. ##
  1129. loadBalancerSourceRanges: []
  1130. ## Set the Cluster IP to use
  1131. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
  1132. ##
  1133. # clusterIP: None
  1134. ## Annotations for the JMX Exporter Prometheus metrics service
  1135. ##
  1136. annotations:
  1137. prometheus.io/scrape: "true"
  1138. prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}"
  1139. prometheus.io/path: "/"
  1140.  
  1141. ## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted
  1142. ## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics
  1143. ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []`
  1144. ## (2) commented out above `overrideConfig`.
  1145. ##
  1146. whitelistObjectNames:
  1147. - kafka.controller:*
  1148. - kafka.server:*
  1149. - java.lang:*
  1150. - kafka.network:*
  1151. - kafka.log:*
  1152.  
  1153. ## Prometheus JMX exporter configuration
  1154. ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template
  1155. ##
  1156. ## Credits to the incubator/kafka chart for the JMX configuration.
  1157. ## https://github.com/helm/charts/tree/master/incubator/kafka
  1158. ##
  1159. config: |-
  1160. jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi
  1161. lowercaseOutputName: true
  1162. lowercaseOutputLabelNames: true
  1163. ssl: false
  1164. {{- if .Values.metrics.jmx.whitelistObjectNames }}
  1165. whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"]
  1166. {{- end }}
  1167. ## ConfigMap with Prometheus JMX exporter configuration
  1168. ## NOTE: This will override metrics.jmx.config
  1169. ##
  1170. # existingConfigmap:
  1171.  
  1172. ## Prometheus Operator ServiceMonitor configuration
  1173. ##
  1174. serviceMonitor:
  1175. enabled: false
  1176. ## Namespace in which Prometheus is running
  1177. ##
  1178. # namespace: monitoring
  1179.  
  1180. ## Interval at which metrics should be scraped.
  1181. ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
  1182. ##
  1183. # interval: 10s
  1184.  
  1185. ## Timeout after which the scrape is ended
  1186. ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
  1187. ##
  1188. # scrapeTimeout: 10s
  1189.  
  1190. ## ServiceMonitor selector labels
  1191. ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
  1192. ##
  1193. # selector:
  1194. # prometheus: my-prometheus
  1195.  
  1196. ## Relabel configuration for the metrics.
  1197. ##
  1198. # relabelings: []
  1199.  
  1200. # MetricRelabelConfigs to apply to samples before ingestion.
  1201. ##
  1202. # metricRelabelings: []
  1203.  
  1204. ##
  1205. ## Zookeeper chart configuration
  1206. ##
  1207. ## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml
  1208. ##
  1209. zookeeper:
  1210. enabled: true
  1211. auth:
  1212. ## Enable Zookeeper auth
  1213. ##
  1214. enabled: false
  1215. ## User that will use Zookeeper clients to auth
  1216. ##
  1217. # clientUser:
  1218. ## Password that will use Zookeeper clients to auth
  1219. ##
  1220. # clientPassword:
  1221. ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin"
  1222. ##
  1223. # serverUsers:
  1224. ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin"
  1225. ##
  1226. # serverPasswords:
  1227.  
  1228. ## This value is only used when zookeeper.enabled is set to false
  1229. ##
  1230. externalZookeeper:
  1231. ## Server or list of external zookeeper servers to use.
  1232. ##
  1233. servers: []
  1234.  
  1235. ## Extra init containers to add to the deployment
  1236. ##
  1237. initContainers: []
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement