1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237 |
- ## Global Docker image parameters
- ## Please, note that this will override the image parameters, including dependencies, configured to use the global value
- ## Current available global Docker image parameters: imageRegistry and imagePullSecrets
- ##
- # global:
- # imageRegistry: myRegistryName
- # imagePullSecrets:
- # - myRegistryKeySecretName
- # storageClass: myStorageClass
- ## Bitnami Kafka image version
- ## ref: https://hub.docker.com/r/bitnami/kafka/tags/
- ##
- image:
- registry: docker.io
- repository: bitnami/kafka
- tag: 2.8.0-debian-10-r30
- ## Specify a imagePullPolicy
- ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
- ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
- ##
- pullPolicy: IfNotPresent
- ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
- ## Example:
- ## pullSecrets:
- ## - myRegistryKeySecretName
- ##
- pullSecrets: []
- ## Set to true if you would like to see extra information on logs
- ##
- debug: false
- ## String to partially override kafka.fullname template (will maintain the release name)
- ##
- # nameOverride:
- ## String to fully override kafka.fullname template
- ##
- # fullnameOverride:
- ## Deployment pod host aliases
- ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
- ##
- hostAliases: []
- ## Use an alternate scheduler, e.g. "stork".
- ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
- ##
- # schedulerName:
- ## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel
- ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy
- ##
- podManagementPolicy: Parallel
- ## Kubernetes Cluster Domain
- ##
- clusterDomain: cluster.local
- ## Add labels to all the deployed resources
- ##
- commonLabels: {}
- ## Add annotations to all the deployed resources
- ##
- commonAnnotations: {}
- ## Kafka Configuration
- ## Specify content for server.properties
- ## NOTE: This will override any KAFKA_CFG_ environment variables (including those set by the chart)
- ## The server.properties is auto-generated based on other parameters when this parameter is not specified
- ##
- ## Example:
- ## config: |-
- ## broker.id=-1
- ## listeners=PLAINTEXT://:9092
- ## advertised.listeners=PLAINTEXT://KAFKA_IP:9092
- ## num.network.threads=3
- ## num.io.threads=8
- ## socket.send.buffer.bytes=102400
- ## socket.receive.buffer.bytes=102400
- ## socket.request.max.bytes=104857600
- ## log.dirs=/bitnami/kafka/data
- ## num.partitions=1
- ## num.recovery.threads.per.data.dir=1
- ## offsets.topic.replication.factor=1
- ## transaction.state.log.replication.factor=1
- ## transaction.state.log.min.isr=1
- ## log.flush.interval.messages=10000
- ## log.flush.interval.ms=1000
- ## log.retention.hours=168
- ## log.retention.bytes=1073741824
- ## log.segment.bytes=1073741824
- ## log.retention.check.interval.ms=300000
- ## zookeeper.connect=ZOOKEEPER_SERVICE_NAME
- ## zookeeper.connection.timeout.ms=6000
- ## group.initial.rebalance.delay.ms=0
- ##
- # config:
- ## ConfigMap with Kafka Configuration
- ## NOTE: This will override config AND any KAFKA_CFG_ environment variables.
- ##
- # existingConfigmap:
- ## Kafka Log4J Configuration
- ## An optional log4j.properties file to overwrite the default of the Kafka brokers.
- ## See an example log4j.properties at:
- ## https://github.com/apache/kafka/blob/trunk/config/log4j.properties
- ##
- # log4j:
- ## Kafka Log4j ConfigMap
- ## The name of an existing ConfigMap containing a log4j.properties file.
- ## NOTE: this will override log4j.
- ##
- # existingLog4jConfigMap:
- ## Kafka's Java Heap size
- ##
- heapOpts: -Xmx1024m -Xms1024m
- ## Switch to enable topic deletion or not.
- ##
- deleteTopicEnable: false
- ## Switch to enable auto creation of topics.
- ## Enabling auto creation of topics not recommended for production or similar environments.
- ##
- autoCreateTopicsEnable: true
- ## The number of messages to accept before forcing a flush of data to disk.
- ##
- logFlushIntervalMessages: _10000
- ## The maximum amount of time a message can sit in a log before we force a flush.
- ##
- logFlushIntervalMs: 1000
- ## A size-based retention policy for logs.
- ##
- logRetentionBytes: _1073741824
- ## The interval at which log segments are checked to see if they can be deleted.
- ##
- logRetentionCheckIntervalMs: 300000
- ## The minimum age of a log file to be eligible for deletion due to age.
- ##
- logRetentionHours: 168
- ## The maximum size of a log segment file. When this size is reached a new log segment will be created.
- ##
- logSegmentBytes: _1073741824
- ## A comma separated list of directories under which to store log files.
- ##
- logsDirs: /bitnami/kafka/data
- ## The largest record batch size allowed by Kafka
- ##
- maxMessageBytes: _1000012
- ## Default replication factors for automatically created topics
- ##
- defaultReplicationFactor: 1
- ## The replication factor for the offsets topic
- ##
- offsetsTopicReplicationFactor: 1
- ## The replication factor for the transaction topic
- ##
- transactionStateLogReplicationFactor: 1
- ## Overridden min.insync.replicas config for the transaction topic
- ##
- transactionStateLogMinIsr: 1
- ## The number of threads doing disk I/O.
- ##
- numIoThreads: 8
- ## The number of threads handling network requests.
- ##
- numNetworkThreads: 3
- ## The default number of log partitions per topic.
- ##
- numPartitions: 1
- ## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
- ##
- numRecoveryThreadsPerDataDir: 1
- ## The receive buffer (SO_RCVBUF) used by the socket server.
- ##
- socketReceiveBufferBytes: 102400
- ## The maximum size of a request that the socket server will accept (protection against OOM).
- ##
- socketRequestMaxBytes: _104857600
- ## The send buffer (SO_SNDBUF) used by the socket server.
- ##
- socketSendBufferBytes: 102400
- ## Timeout in ms for connecting to zookeeper.
- ##
- zookeeperConnectionTimeoutMs: 6000
- ## Command and args for running the container. Use array form
- ##
- command:
- - /scripts/setup.sh
- args: []
- ## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY}
- ## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration
- ## Example:
- ## extraEnvVars:
- ## - name: KAFKA_CFG_BACKGROUND_THREADS
- ## value: "10"
- ##
- extraEnvVars: []
- ## extraVolumes and extraVolumeMounts allows you to mount other volumes
- ## Examples:
- # extraVolumes:
- # - name: kafka-jaas
- # secret:
- # secretName: kafka-jaas
- # extraVolumeMounts:
- # - name: kafka-jaas
- # mountPath: /bitnami/kafka/config/kafka_jaas.conf
- # subPath: kafka_jaas.conf
- extraVolumes: []
- extraVolumeMounts: []
- ## Extra objects to deploy (value evaluated as a template)
- ##
- extraDeploy: []
- ## Authentication parameteres
- ## https://github.com/bitnami/bitnami-docker-kafka#security
- ##
- auth:
- ## Authentication protocol for client and inter-broker communications
- ## Supported values: 'plaintext', 'tls', 'mtls', 'sasl' and 'sasl_tls'
- ## This table shows the security provided on each protocol:
- ## | Method | Authentication | Encryption via TLS |
- ## | plaintext | None | No |
- ## | tls | None | Yes |
- ## | mtls | Yes (two-way authentication) | Yes |
- ## | sasl | Yes (via SASL) | No |
- ## | sasl_tls | Yes (via SASL) | Yes |
- ##
- clientProtocol: plaintext
- interBrokerProtocol: plaintext
- ## SASL configuration
- ##
- sasl:
- ## Comma separated list of allowed SASL mechanisms.
- ## Note: ignored unless `auth.clientProtocol` or `auth.interBrokerProtocol` are using either `sasl` or `sasl_tls`
- ##
- mechanisms: plain,scram-sha-256,scram-sha-512
- ## SASL mechanism for inter broker communication.
- ##
- interBrokerMechanism: plain
- ## JAAS configuration for SASL authentication.
- ##
- jaas:
- ## Kafka client user list
- ##
- ## clientUsers:
- ## - user1
- ## - user2
- ##
- clientUsers:
- - user
- ## Kafka client passwords. This is mandatory if more than one user is specified in clientUsers.
- ##
- ## clientPasswords:
- ## - password1
- ## - password2"
- ##
- clientPasswords: []
- ## Kafka inter broker communication user
- ##
- interBrokerUser: admin
- ## Kafka inter broker communication password
- ##
- interBrokerPassword: ""
- ## Kafka Zookeeper user
- ##
- zookeeperUser: ""
- ## Kafka Zookeeper password
- ##
- zookeeperPassword: ""
- ## Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser.
- ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create:
- ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD
- ##
- existingSecret: ""
- ## DEPRECATED: use `auth.sasl.mechanisms` instead.
- saslMechanisms: plain,scram-sha-256,scram-sha-512
- ## DEPRECATED: use `auth.sasl.interBrokerMechanism` instead.
- saslInterBrokerMechanism: plain
- ## DEPRECATED: use `auth.sasl.jaas` instead.
- jaas:
- clientUsers:
- - user
- clientPasswords: []
- interBrokerUser: admin
- interBrokerPassword: ""
- zookeeperUser: ""
- zookeeperPassword: ""
- existingSecret: ""
- ## TLS configuration
- ##
- tls:
- ## Format to use for TLS certificates
- ## Supported values: 'jks' and 'pem'
- ##
- type: jks
- ## Name of an existing secret containing the TLS certificates
- ##
- ## When using 'jks' format for certificates, the secret should contain:
- ## - A truststore
- ## - One keystore per Kafka broker you have in the cluster
- ## Create this secret following the steps below:
- ## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh
- ## 2) Rename your truststore to `kafka.truststore.jks`.
- ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker.
- ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create:
- ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ...
- ##
- ## When using 'pem' format for certificates, the secret should contain:
- ## - A public CA certificate
- ## - One public certificate and one private key per Kafka broker you have in the cluster
- ## Create this secret following the steps below:
- ## 1) Create a certificate key and signing request per Kafka broker, and sign the signing request with your CA
- ## 2) Rename your CA file to `kafka.truststore.pem`.
- ## 3) Rename your certificates to `kafka-X.keystore.pem` where X is the ID of each Kafka broker.
- ## 3) Rename your keys to `kafka-X.keystore.key` where X is the ID of each Kafka broker.
- ## 5) Run the command below where SECRET_NAME is the name of the secret you want to create:
- ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.pem --from-file=./kafka-0.keystore.pem --from-file=./kafka-0.keystore.key --from-file=./kafka-1.keystore.pem --from-file=./kafka-1.keystore.key ...
- ##
- existingSecret: ""
- ## Create self-signed TLS certificates. Currently only supported for 'pem' format.
- ## Note: ignored when using 'jks' format or `auth.tls.existingSecret` is not empty
- ##
- autoGenerated: false
- ## Password to access the JKS files or PEM key when they are password-protected.
- ##
- password: ""
- ## Name of an existing secret containing your JKS truststore if the JKS truststore doesn't exist
- ## or is different from the one in the `auth.tls.existingSecret`.
- ## Note: ignored when using 'pem' format for certificates .
- ##
- jksTruststoreSecret: ""
- ## The secret key from the `auth.tls.existingSecret` containing the keystore with a SAN certificate.
- ## The SAN certificate in it should be issued with Subject Alternative Names for all headless services:
- ## - kafka-0.kafka-headless.kafka.svc.cluster.local
- ## - kafka-1.kafka-headless.kafka.svc.cluster.local
- ## - kafka-2.kafka-headless.kafka.svc.cluster.local
- ## Note: ignored when using 'pem' format for certificates.
- ##
- jksKeystoreSAN: ""
- ## The secret key from the `auth.tls.existingSecret` or `auth.tls.jksTruststoreSecret` containing the truststore.
- ## Note: ignored when using 'pem' format for certificates.
- ##
- jksTruststore: ""
- ## The endpoint identification algorithm used by clients to validate server host name.
- ## Disable server host name verification by setting it to an empty string.
- ## ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings
- ##
- endpointIdentificationAlgorithm: https
- ## DEPRECATED: use `auth.tls.existingSecret` instead.
- jksSecret: ""
- ## DEPRECATED: use `auth.tls.jksTruststoreSecret` instead.
- jksTruststoreSecret: ""
- ## DEPRECATED: use `auth.tls.jksKeystoreSAN` instead.
- jksKeystoreSAN: ""
- ## DEPRECATED: use `auth.tls.jksTruststore` instead.
- jksTruststore: ""
- ## DEPRECATED: use `auth.tls.password` instead.
- jksPassword: ""
- ## DEPRECATED: use `auth.tls.endpointIdentificationAlgorithm` instead.
- tlsEndpointIdentificationAlgorithm: https
- ## The address(es) the socket server listens on.
- ## When it's set to an empty array, the listeners will be configured
- ## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters)
- ##
- listeners: []
- ## The address(es) (hostname:port) the brokers will advertise to producers and consumers.
- ## When it's set to an empty array, the advertised listeners will be configured
- ## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters)
- ##
- advertisedListeners: []
- ## The listener->protocol mapping
- ## When it's nil, the listeners will be configured
- ## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters)
- ##
- # listenerSecurityProtocolMap:
- ## Allow to use the PLAINTEXT listener.
- ##
- allowPlaintextListener: true
- ## Name of listener used for communication between brokers.
- ##
- interBrokerListenerName: INTERNAL
- ## Number of Kafka brokers to deploy
- ##
- replicaCount: 1
- ## Minimal broker.id value
- ## Brokers increment their ID starting at this minimal value.
- ## E.g., with `minBrokerId=100` and 3 nodes, IDs will be 100, 101, 102 for brokers 0, 1, and 2, respectively.
- ##
- minBrokerId: 0
- ## StrategyType, can be set to RollingUpdate or OnDelete by default.
- ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
- ##
- updateStrategy: RollingUpdate
- ## Partition update strategy
- ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
- ##
- # rollingUpdatePartition:
- ## Pod labels. Evaluated as a template
- ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
- ##
- podLabels: {}
- ## Pod annotations. Evaluated as a template
- ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
- ##
- podAnnotations: {}
- ## Name of the priority class to be used by kafka pods, priority class needs to be created beforehand
- ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
- ##
- priorityClassName: ""
- ## Pod affinity preset
- ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
- ## Allowed values: soft, hard
- ##
- podAffinityPreset: ""
- ## Pod anti-affinity preset
- ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
- ## Allowed values: soft, hard
- ##
- podAntiAffinityPreset: soft
- ## Node affinity preset
- ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
- ## Allowed values: soft, hard
- ##
- nodeAffinityPreset:
- ## Node affinity type
- ## Allowed values: soft, hard
- ##
- type: ""
- ## Node label key to match
- ## E.g.
- ## key: "kubernetes.io/e2e-az-name"
- ##
- key: ""
- ## Node label values to match
- ## E.g.
- ## values:
- ## - e2e-az1
- ## - e2e-az2
- ##
- values: []
- ## Affinity for pod assignment
- ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
- ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
- ##
- affinity: {}
- ## Node labels for pod assignment
- ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
- ##
- nodeSelector: {}
- ## Tolerations for pod assignment
- ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
- ##
- tolerations: []
- ## Configure the grace time period for sig term
- ## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution
- ##
- # terminationGracePeriodSeconds: 30
- ## Kafka pods' Security Context
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
- ##
- podSecurityContext:
- enabled: true
- fsGroup: 1001
- runAsUser: 1001
- ## Kafka containers' Security Context
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
- ## Example:
- ## containerSecurityContext:
- ## capabilities:
- ## drop: ["NET_RAW"]
- ## readOnlyRootFilesystem: true
- ##
- containerSecurityContext: {}
- ## Kafka containers' resource requests and limits
- ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
- ##
- resources:
- # We usually recommend not to specify default resources and to leave this as a conscious
- # choice for the user. This also increases chances charts run on environments with little
- # resources, such as Minikube. If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- limits: {}
- # cpu: 250m
- # memory: 1Gi
- requests: {}
- # cpu: 250m
- # memory: 256Mi
- ## Kafka containers' liveness and readiness probes. Evaluated as a template.
- ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
- ##
- livenessProbe:
- enabled: true
- initialDelaySeconds: 10
- timeoutSeconds: 5
- # failureThreshold: 3
- # periodSeconds: 10
- # successThreshold: 1
- readinessProbe:
- enabled: true
- initialDelaySeconds: 5
- failureThreshold: 6
- timeoutSeconds: 5
- # periodSeconds: 10
- # successThreshold: 1
- ## Custom liveness/readiness probes that will override the default ones
- ##
- customLivenessProbe: {}
- customReadinessProbe: {}
- ## Pod Disruption Budget configuration
- ## The PDB will only be created if replicaCount is greater than 1
- ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions
- ##
- pdb:
- create: false
- ## Min number of pods that must still be available after the eviction
- ##
- # minAvailable: 1
- ## Max number of pods that can be unavailable after the eviction
- ##
- maxUnavailable: 1
- ## Add sidecars to the pod.
- ## Example:
- ## sidecars:
- ## - name: your-image-name
- ## image: your-image
- ## imagePullPolicy: Always
- ## ports:
- ## - name: portname
- ## containerPort: 1234
- ##
- sidecars: {}
- ## Service parameters
- ##
- service:
- ## Service type
- ##
- type: ClusterIP
- ## Kafka port for client connections
- ##
- port: 9092
- ## Kafka port for inter-broker connections
- ##
- internalPort: 9093
- ## Kafka port for external connections
- ##
- externalPort: 9094
- ## Specify the nodePort value for the LoadBalancer and NodePort service types.
- ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
- ##
- nodePorts:
- client: ""
- external: ""
- ## Set the LoadBalancer service type to internal only.
- ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
- ##
- # loadBalancerIP:
- ## Load Balancer sources
- ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
- ## Example:
- ## loadBalancerSourceRanges:
- ## - 10.10.10.0/24
- ##
- loadBalancerSourceRanges: []
- ## Provide any additional annotations which may be required. Evaluated as a template
- ##
- annotations: {}
- ## External Access to Kafka brokers configuration
- ##
- externalAccess:
- ## Enable Kubernetes external cluster access to Kafka brokers
- ##
- enabled: false
- ## External IPs auto-discovery configuration
- ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API
- ## Note: RBAC might be required
- ##
- autoDiscovery:
- ## Enable external IP/ports auto-discovery
- ##
- enabled: false
- ## Bitnami Kubectl image
- ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/
- ##
- image:
- registry: docker.io
- repository: bitnami/kubectl
- tag: 1.19.11-debian-10-r14
- ## Specify a imagePullPolicy
- ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
- ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
- ##
- pullPolicy: IfNotPresent
- ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
- ## Example:
- ## pullSecrets:
- ## - myRegistryKeySecretName
- ##
- pullSecrets: []
- ## Init Container resource requests and limits
- ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
- ##
- resources:
- # We usually recommend not to specify default resources and to leave this as a conscious
- # choice for the user. This also increases chances charts run on environments with little
- # resources, such as Minikube. If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- limits: {}
- # cpu: 100m
- # memory: 128Mi
- requests: {}
- # cpu: 100m
- # memory: 128Mi
- ## Parameters to configure K8s service(s) used to externally access Kafka brokers
- ## A new service per broker will be created
- ##
- service:
- ## Service type. Allowed values: LoadBalancer or NodePort
- ##
- type: LoadBalancer
- ## Port used when service type is LoadBalancer
- ##
- port: 9094
- ## Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount
- ## Example:
- ## loadBalancerIPs:
- ## - X.X.X.X
- ## - Y.Y.Y.Y
- ##
- loadBalancerIPs: []
- ## Load Balancer sources
- ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
- ## Example:
- ## loadBalancerSourceRanges:
- ## - 10.10.10.0/24
- ##
- loadBalancerSourceRanges: []
- ## Array of node ports used for each Kafka broker. Length must be the same as replicaCount
- ## Example:
- ## nodePorts:
- ## - 30001
- ## - 30002
- ##
- nodePorts: []
- ## Use worker host ips
- useHostIPs: false
- ## When service type is NodePort, you can specify the domain used for Kafka advertised listeners.
- ## If not specified, the container will try to get the kubernetes node external IP
- ##
- # domain: mydomain.com
- ## Provide any additional annotations which may be required. Evaluated as a template
- ##
- annotations: {}
- ## Persistence parameters
- ##
- persistence:
- enabled: false
- ## A manually managed Persistent Volume and Claim
- ## If defined, PVC must be created manually before volume will be bound
- ## The value is evaluated as a template
- ##
- # existingClaim:
- ## PV Storage Class
- ## If defined, storageClassName: <storageClass>
- ## If set to "-", storageClassName: "", which disables dynamic provisioning
- ## If undefined (the default) or set to null, no storageClassName spec is
- ## set, choosing the default provisioner.
- ##
- # storageClass: "-"
- ## PV Access Mode
- ##
- accessModes:
- - ReadWriteOnce
- ## PVC size
- ##
- size: 8Gi
- ## PVC annotations
- ##
- annotations: {}
- ## selector can be used to match an existing PersistentVolume
- ## selector:
- ## matchLabels:
- ## app: my-app
- selector: {}
- ## Mount point for persistence
- ##
- mountPath: /bitnami/kafka
- ## Log Persistence parameters
- ##
- logPersistence:
- enabled: false
- ## A manually managed Persistent Volume and Claim
- ## If defined, PVC must be created manually before volume will be bound
- ## The value is evaluated as a template
- ##
- # existingClaim:
- ## PV Storage Class
- ## If defined, storageClassName: <storageClass>
- ## If set to "-", storageClassName: "", which disables dynamic provisioning
- ## If undefined (the default) or set to null, no storageClassName spec is
- ## set, choosing the default provisioner.
- # existingLogClaim:
- ## PV Storage Class
- ## It getted from persistence.storageClass
- ##
- ## PV Access Mode
- ##
- accessModes:
- - ReadWriteOnce
- ## PVC size
- ##
- size: 8Gi
- ## PVC annotations
- ##
- annotations: {}
- ## selector can be used to match an existing PersistentVolume
- ## selector:
- ## matchLabels:
- ## app: my-app
- selector: {}
- ## Mount path for persistent logs
- ##
- mountPath: /opt/bitnami/kafka/logs
- ## Init Container parameters
- ## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component
- ## values from the securityContext section of the component
- ##
- volumePermissions:
- enabled: false
- ## The security context for the volumePermissions init container
- ##
- securityContext:
- runAsUser: 0
- image:
- registry: docker.io
- repository: bitnami/bitnami-shell
- tag: 10-debian-10-r98
- ## Specify a imagePullPolicy
- ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
- ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
- ##
- pullPolicy: Always
- ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
- ## Example:
- ## pullSecrets:
- ## - myRegistryKeySecretName
- ##
- pullSecrets: []
- ## Init Container resource requests and limits
- ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
- ##
- resources:
- # We usually recommend not to specify default resources and to leave this as a conscious
- # choice for the user. This also increases chances charts run on environments with little
- # resources, such as Minikube. If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- limits: {}
- # cpu: 100m
- # memory: 128Mi
- requests: {}
- # cpu: 100m
- # memory: 128Mi
- ## Kafka pods ServiceAccount
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
- ##
- serviceAccount:
- ## Specifies whether a ServiceAccount should be created
- ##
- create: true
- ## The name of the ServiceAccount to use.
- ## If not set and create is true, a name is generated using the kafka.serviceAccountName template
- ##
- # name:
- # Allows auto mount of ServiceAccountToken on the serviceAccount created
- # Can be set to false if pods using this serviceAccount do not need to use K8s API
- automountServiceAccountToken: true
- ## Role Based Access
- ## ref: https://kubernetes.io/docs/admin/authorization/rbac/
- ##
- rbac:
- ## Specifies whether RBAC rules should be created
- ## binding Kafka ServiceAccount to a role
- ## that allows Kafka pods querying the K8s API
- ##
- create: false
- ## Kafka provisioning
- ##
- provisioning:
- enabled: false
- image:
- registry: docker.io
- repository: bitnami/kafka
- tag: 2.8.0-debian-10-r29
- ## Specify a imagePullPolicy
- ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
- ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
- ##
- pullPolicy: IfNotPresent
- ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
- ## Example:
- ## pullSecrets:
- ## - myRegistryKeySecretName
- ##
- pullSecrets: []
- ## Set to true if you would like to see extra information on logs
- ##
- debug: false
- # provisioning.numPartitions: Number of partitions for the topic when it does not specify.
- numPartitions: 1
- # provisioning.replicationFactor: Replication factor for the topic when it does not specify.
- replicationFactor: 1
- ## Use an alternate scheduler, e.g. "stork".
- ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
- ##
- # schedulerName:
- podAnnotations: {}
- resources:
- # We usually recommend not to specify default resources and to leave this as a conscious
- # choice for the user. This also increases chances charts run on environments with little
- # resources, such as Minikube. If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- limits: {}
- # cpu: 250m
- # memory: 1Gi
- requests: {}
- # cpu: 250m
- # memory: 256Mi
- ## Command and args for running the container (set to default if not set). Use array form
- ##
- command: []
- args: []
- topics: []
- # - name: topic-name
- # partitions: 1
- # replicationFactor: 1
- # # https://kafka.apache.org/documentation/#topicconfigs
- # config:
- # max.message.bytes: 64000
- # flush.messages: 1
- ## Prometheus Exporters / Metrics
- ##
- metrics:
- ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter
- ##
- kafka:
- enabled: false
- ## Bitnami Kafka exporter image
- ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/
- ##
- image:
- registry: docker.io
- repository: bitnami/kafka-exporter
- tag: 1.3.1-debian-10-r14
- ## Specify a imagePullPolicy
- ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
- ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
- ##
- pullPolicy: IfNotPresent
- ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
- ## Example:
- ## pullSecrets:
- ## - myRegistryKeySecretName
- ##
- pullSecrets: []
- ## Use an alternate scheduler, e.g. "stork".
- ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
- ##
- # schedulerName:
- ## Extra flags to be passed to Kafka exporter
- ## Example:
- ## extraFlags:
- ## tls.insecure-skip-tls-verify: ""
- ## web.telemetry-path: "/metrics"
- ##
- extraFlags: {}
- ## Name of the existing secret containing the optional certificate and key files
- ## for Kafka Exporter client authentication
- ##
- # certificatesSecret:
- ## The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file)
- ##
- tlsCert: cert-file
- ## The secret key from the certificatesSecret if 'client-key' key different from the default (key-file)
- ##
- tlsKey: key-file
- ## Name of the existing secret containing the optional ca certificate
- ## for Kafka Exporter client authentication
- ##
- # tlsCaSecret:
- ## The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file)
- ##
- tlsCaCert: ca-file
- ## Prometheus Kafka Exporter' resource requests and limits
- ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
- ##
- resources:
- # We usually recommend not to specify default resources and to leave this as a conscious
- # choice for the user. This also increases chances charts run on environments with little
- # resources, such as Minikube. If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- limits: {}
- # cpu: 100m
- # memory: 128Mi
- requests: {}
- # cpu: 100m
- # memory: 128Mi
- ## Affinity for pod assignment
- ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
- ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
- ##
- affinity: {}
- ## Node labels for pod assignment
- ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
- ##
- nodeSelector: {}
- ## Tolerations for pod assignment
- ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
- ##
- tolerations: []
- ## Add init containers to the Kafka exporter pods.
- ## Example:
- ## initContainers:
- ## - name: your-image-name
- ## image: your-image
- ## imagePullPolicy: Always
- ## ports:
- ## - name: portname
- ## containerPort: 1234
- ##
- initContainers: {}
- ## Service configuration
- ##
- service:
- ## Kafka Exporter Service type
- ##
- type: ClusterIP
- ## Kafka Exporter Prometheus port
- ##
- port: 9308
- ## Specify the nodePort value for the LoadBalancer and NodePort service types.
- ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
- ##
- nodePort: ""
- ## Set the LoadBalancer service type to internal only.
- ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
- ##
- # loadBalancerIP:
- ## Load Balancer sources
- ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
- ## Example:
- ## loadBalancerSourceRanges:
- ## - 10.10.10.0/24
- ##
- loadBalancerSourceRanges: []
- ## Set the Cluster IP to use
- ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
- ##
- # clusterIP: None
- ## Annotations for the Kafka Exporter Prometheus metrics service
- ##
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}"
- prometheus.io/path: "/metrics"
- ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics
- ##
- jmx:
- enabled: false
- ## Bitnami JMX exporter image
- ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/
- ##
- image:
- registry: docker.io
- repository: bitnami/jmx-exporter
- tag: 0.15.0-debian-10-r121
- ## Specify a imagePullPolicy
- ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
- ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
- ##
- pullPolicy: IfNotPresent
- ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
- ## Example:
- ## pullSecrets:
- ## - myRegistryKeySecretName
- ##
- pullSecrets: []
- ## Prometheus JMX Exporter' resource requests and limits
- ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
- ##
- resources:
- # We usually recommend not to specify default resources and to leave this as a conscious
- # choice for the user. This also increases chances charts run on environments with little
- # resources, such as Minikube. If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- limits: {}
- # cpu: 100m
- # memory: 128Mi
- requests: {}
- # cpu: 100m
- # memory: 128Mi
- ## Service configuration
- ##
- service:
- ## JMX Exporter Service type
- ##
- type: ClusterIP
- ## JMX Exporter Prometheus port
- ##
- port: 5556
- ## Specify the nodePort value for the LoadBalancer and NodePort service types.
- ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
- ##
- nodePort: ""
- ## Set the LoadBalancer service type to internal only.
- ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
- ##
- # loadBalancerIP:
- ## Load Balancer sources
- ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
- ## Example:
- ## loadBalancerSourceRanges:
- ## - 10.10.10.0/24
- ##
- loadBalancerSourceRanges: []
- ## Set the Cluster IP to use
- ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
- ##
- # clusterIP: None
- ## Annotations for the JMX Exporter Prometheus metrics service
- ##
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}"
- prometheus.io/path: "/"
- ## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted
- ## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics
- ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []`
- ## (2) commented out above `overrideConfig`.
- ##
- whitelistObjectNames:
- - kafka.controller:*
- - kafka.server:*
- - java.lang:*
- - kafka.network:*
- - kafka.log:*
- ## Prometheus JMX exporter configuration
- ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template
- ##
- ## Credits to the incubator/kafka chart for the JMX configuration.
- ## https://github.com/helm/charts/tree/master/incubator/kafka
- ##
- config: |-
- jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi
- lowercaseOutputName: true
- lowercaseOutputLabelNames: true
- ssl: false
- {{- if .Values.metrics.jmx.whitelistObjectNames }}
- whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"]
- {{- end }}
- ## ConfigMap with Prometheus JMX exporter configuration
- ## NOTE: This will override metrics.jmx.config
- ##
- # existingConfigmap:
- ## Prometheus Operator ServiceMonitor configuration
- ##
- serviceMonitor:
- enabled: false
- ## Namespace in which Prometheus is running
- ##
- # namespace: monitoring
- ## Interval at which metrics should be scraped.
- ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
- ##
- # interval: 10s
- ## Timeout after which the scrape is ended
- ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
- ##
- # scrapeTimeout: 10s
- ## ServiceMonitor selector labels
- ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
- ##
- # selector:
- # prometheus: my-prometheus
- ## Relabel configuration for the metrics.
- ##
- # relabelings: []
- # MetricRelabelConfigs to apply to samples before ingestion.
- ##
- # metricRelabelings: []
- ##
- ## Zookeeper chart configuration
- ##
- ## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml
- ##
- zookeeper:
- enabled: false
- auth:
- ## Enable Zookeeper auth
- ##
- enabled: false
- ## User that will use Zookeeper clients to auth
- ##
- # clientUser:
- ## Password that will use Zookeeper clients to auth
- ##
- # clientPassword:
- ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin"
- ##
- # serverUsers:
- ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin"
- ##
- # serverPasswords:
- ## This value is only used when zookeeper.enabled is set to false
- ##
- externalZookeeper:
- ## Server or list of external zookeeper servers to use.
- ##
- servers: zookeeper
- ## Extra init containers to add to the deployment
- ##
- initContainers: []
|