Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ## Atlassian Confluence Data Center Helm values
- #
- # HEADS UP!
- #
- # Data loss will occur if sections declared as 'REQUIRED' are not configured appropriately!
- # These sections are:
- # - database
- # - volumes
- #
- # Additional details on pre-provisioning these required resources can be found here:
- # https://atlassian.github.io/data-center-helm-charts/userguide/INSTALLATION/#3-configure-database
- # https://atlassian.github.io/data-center-helm-charts/userguide/INSTALLATION/#5-configure-persistent-storage
- #
- # To manage external access to the Confluence instance, an ingress resource can also be configured
- # under the 'ingress' stanza. This requires a pre-provisioned ingress controller to be present.
- #
- # Additional details on pre-provisioning an ingress controller can be found here:
- # https://atlassian.github.io/data-center-helm-charts/userguide/INSTALLATION/#4-configure-ingress
- #
- ##
- # -- The initial number of Confluence pods that should be started at deployment time.
- # Note that Confluence requires manual configuration via the browser post deployment
- # after the first pod is deployed. This configuration must be completed before
- # scaling up additional pods. As such this value should always be kept as 1,
- # but can be altered once manual configuration is complete.
- #
- replicaCount: 1
- # Image configuration
- #
- image:
- # -- The Confluence Docker image to use
- #
- repository: atlassian/confluence
- # -- Image pull policy
- #
- pullPolicy: IfNotPresent
- # -- The docker image tag to be used - defaults to the Chart appVersion
- #
- tag: "latest"
- # K8s ServiceAccount configuration. Give fine-grained identity and authorization
- # to Pods
- #
- serviceAccount:
- # -- Set to 'true' if a ServiceAccount should be created, or 'false' if it
- # already exists.
- #
- create: true
- # -- The name of the ServiceAccount to be used by the pods. If not specified, but
- # the "serviceAccount.create" flag is set to 'true', then the ServiceAccount name
- # will be auto-generated, otherwise the 'default' ServiceAccount will be used.
- # https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server
- #
- name:
- # -- For Docker images hosted in private registries, define the list of image pull
- # secrets that should be utilized by the created ServiceAccount
- # https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
- #
- imagePullSecrets:
- - name: alt-confluence-gitlab
- # - name: secretName
- # -- Annotations to add to the ServiceAccount (if created)
- #
- annotations: {}
- # Define permissions
- # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole
- #
- clusterRole:
- # -- Set to 'true' if a ClusterRole should be created, or 'false' if it
- # already exists.
- #
- create: true
- # -- The name of the ClusterRole to be used. If not specified, but
- # the "serviceAccount.clusterRole.create" flag is set to 'true',
- # then the ClusterRole name will be auto-generated.
- #
- name:
- # Grant permissions defined in ClusterRole
- # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding
- #
- clusterRoleBinding:
- # -- Set to 'true' if a ClusterRoleBinding should be created, or 'false' if it
- # already exists.
- #
- create: true
- # -- The name of the ClusterRoleBinding to be created. If not specified, but
- # the "serviceAccount.clusterRoleBinding.create" flag is set to 'true',
- # then the ClusterRoleBinding name will be auto-generated.
- #
- name:
- # REQUIRED - Database configuration
- #
- # Confluence requires a backend database. The configuration below can be used to define the
- # database to use and its connection details.
- # https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#database-connectivity
- #
- database:
- # -- The database type that should be used. If not specified, then it will need to be
- # provided via the browser during manual configuration post deployment. Valid values
- # include:
- # - 'postgresql'
- # - 'mysql'
- # - 'oracle'
- # - 'mssql'
- # https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databasetype
- #
- type: postgresql
- # -- The jdbc URL of the database. If not specified, then it will need to be provided
- # via the browser during manual configuration post deployment. Example URLs include:
- # - 'jdbc:postgresql://<dbhost>:5432/<dbname>'
- # - 'jdbc:mysql://<dbhost>/<dbname>'
- # - 'jdbc:sqlserver://<dbhost>:1433;databaseName=<dbname>'
- # - 'jdbc:oracle:thin:@<dbhost>:1521:<SID>'
- # https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databaseurl
- #
- url: jdbc:postgresql://confluence-test-db:5432/confluence
- # JDBC connection credentials
- #
- credentials:
- # -- The name of the K8s Secret that contains the database login credentials.
- # If the secret is specified, then the credentials will be automatically utilised on
- # Confluence startup. If the secret is not provided, then the credentials will need to be
- # provided via the browser during manual configuration post deployment.
- #
- # Example of creating a database credentials K8s secret below:
- # 'kubectl create secret generic <secret-name> --from-literal=username=<username> \
- # --from-literal=password=<password>'
- # https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
- #
- secretName: confluence-test-creds
- # -- The key ('username') in the Secret used to store the database login username
- #
- usernameSecretKey: username
- # -- The key ('password') in the Secret used to store the database login password
- #
- passwordSecretKey: password
- # REQUIRED - Volume configuration
- #
- # By default, the charts will configure the local-home, synchrony-home and shared-home as ephemeral
- # volumes i.e. 'emptyDir: {}'. This is fine for evaluation purposes but for production
- # deployments this is not ideal and so local-home, synchrony-home and shared-home should all be configured
- # appropriately.
- # https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#volumes
- #
- volumes:
- # Each pod requires its own volume for 'local-home'. This is needed for key data
- # that help define how Confluence works.
- # https://confluence.atlassian.com/doc/confluence-home-and-other-important-directories-590259707.html
- #
- localHome:
- # Dynamic provisioning of local-home using the K8s Storage Classes
- #
- # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#dynamic
- # https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/LOCAL_STORAGE/
- #
- persistentVolumeClaim:
- # -- If 'true', then a 'PersistentVolume' and 'PersistentVolumeClaim' will be dynamically
- # created for each pod based on the 'StorageClassName' supplied below.
- #
- create: true
- # -- Specify the name of the 'StorageClass' that should be used for the local-home
- # volume claim.
- #
- storageClassName: rook-nfs-share1
- # -- Specifies the standard K8s resource requests for the local-home
- # volume claims.
- #
- resources:
- requests:
- storage: 1Gi
- # -- Static provisioning of local-home using K8s PVs and PVCs
- #
- # NOTE: Due to the ephemeral nature of pods this approach to provisioning volumes for
- # pods is not recommended. Dynamic provisioning described above is the prescribed
- # approach.
- #
- # When 'persistentVolumeClaim.create' is 'false', then this value can be used to define
- # a standard K8s volume that will be used for the local-home volume(s). If not defined,
- # then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify
- # the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object.
- # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static
- #
- customVolume: {}
- # persistentVolumeClaim:
- # claimName: "<pvc>"
- # -- Specifies the path in the Confluence container to which the local-home volume will be
- # mounted.
- #
- mountPath: "/var/atlassian/application-data/confluence"
- # A volume for 'shared-home' is required by Confluence to effectively operate in multi-node
- # environment
- # https://confluence.atlassian.com/doc/set-up-a-confluence-data-center-cluster-982322030.html#SetupaConfluenceDataCentercluster-Setupandconfigureyourcluster
- #
- sharedHome:
- # Dynamic provisioning of shared-home using the K8s Storage Class
- #
- # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#dynamic
- #
- persistentVolumeClaim:
- # -- If 'true', then a 'PersistentVolumeClaim' and 'PersistentVolume' will be dynamically
- # created for shared-home based on the 'StorageClassName' supplied below.
- #
- create: true
- # -- Specify the name of the 'StorageClass' that should be used for the 'shared-home'
- # volume claim.
- #
- storageClassName: rook-nfs-share1
- # -- Specifies the standard K8s resource requests limits for the shared-home
- # volume claims.
- #
- resources:
- requests:
- storage: 1Gi
- # -- Static provisioning of shared-home using K8s PVs and PVCs
- #
- # When 'persistentVolumeClaim.create' is 'false', then this value can be used to define
- # a standard K8s volume that will be used for the shared-home volume. If not defined,
- # then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify
- # the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object.
- # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static
- # https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/SHARED_STORAGE/
- #
- customVolume: {}
- # persistentVolumeClaim:
- # claimName: "<pvc>"
- # -- Specifies the path in the Confluence container to which the shared-home volume will be
- # mounted.
- #
- mountPath: "/var/atlassian/application-data/shared-home"
- # -- Specifies the sub-directory of the shared-home volume that will be mounted in to the
- # Confluence container.
- #
- subPath:
- # Modify permissions on shared-home
- #
- nfsPermissionFixer:
- # -- If 'true', this will alter the shared-home volume's root directory so that Confluence
- # can write to it. This is a workaround for a K8s bug affecting NFS volumes:
- # https://github.com/kubernetes/examples/issues/260
- #
- enabled: true
- # -- The path in the K8s initContainer where the shared-home volume will be mounted
- #
- mountPath: "/shared-home"
- # -- By default, the fixer will change the group ownership of the volume's root directory
- # to match the Confluence container's GID (2002), and then ensures the directory is
- # group-writeable. If this is not the desired behaviour, command used can be specified
- # here.
- #
- command: "chown -R 2002:2002 /shared-home; chmod g+rw /shared-home"
- # Each synchrony pod needs its own volume for 'synchrony-home'. The Synchrony process will write logs to that location
- # and any configuration files can be placed there.
- #
- synchronyHome:
- # Dynamic provisioning of synchrony-home using the K8s Storage Classes
- #
- # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#dynamic
- # https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/LOCAL_STORAGE/
- #
- persistentVolumeClaim:
- # -- If 'true', then a 'PersistentVolume' and 'PersistentVolumeClaim' will be dynamically
- # created for each pod based on the 'StorageClassName' supplied below.
- #
- create: false
- # -- Specify the name of the 'StorageClass' that should be used for the synchrony-home
- # volume claim.
- #
- storageClassName:
- # -- Specifies the standard K8s resource requests for the synchrony-home
- # volume claims.
- #
- resources:
- requests:
- storage: 1Gi
- # -- Static provisioning of synchrony-home using K8s PVs and PVCs
- #
- # NOTE: Due to the ephemeral nature of pods this approach to provisioning volumes for
- # pods is not recommended. Dynamic provisioning described above is the prescribed
- # approach.
- #
- # When 'persistentVolumeClaim.create' is 'false', then this value can be used to define
- # a standard K8s volume that will be used for the synchrony-home volume(s). If not defined,
- # then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify
- # the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object.
- # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static
- #
- customVolume: { }
- # persistentVolumeClaim:
- # claimName: "<pvc>"
- # -- Specifies the path in the Synchrony container to which the synchrony-home volume will be
- # mounted.
- #
- mountPath: "/var/atlassian/application-data/confluence"
- # -- Defines additional volumes that should be applied to all Confluence and Synchrony pods.
- # Note that this will not create any corresponding volume mounts;
- # those needs to be defined in confluence.additionalVolumeMounts
- #
- additional: []
- # Ingress configuration
- #
- # To make the Atlassian product available from outside the K8s cluster an Ingress
- # Controller should be pre-provisioned. With this in place the configuration below
- # can be used to configure an appropriate Ingress Resource.
- # https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#ingress
- #
- ingress:
- # -- Set to 'true' if an Ingress Resource should be created. This depends on a
- # pre-provisioned Ingress Controller being available.
- #
- create: true
- # -- The class name used by the ingress controller if it's being used.
- #
- # Please follow documentation of your ingress controller. If the cluster
- # contains multiple ingress controllers, this setting allows you to control
- # which of them is used for Atlassian application traffic.
- #
- className: "nginx"
- # -- Set to 'true' if the Ingress Resource is to use the K8s 'ingress-nginx'
- # controller.
- # https://kubernetes.github.io/ingress-nginx/
- #
- # This will populate the Ingress Resource with annotations that are specific to
- # the K8s ingress-nginx controller. Set to 'false' if a different controller is
- # to be used, in which case the appropriate annotations for that controller must
- # be specified below under 'ingress.annotations'.
- #
- nginx: true
- # -- The max body size to allow. Requests exceeding this size will result
- # in an HTTP 413 error being returned to the client.
- #
- maxBodySize: 250m
- # -- The fully-qualified hostname (FQDN) of the Ingress Resource. Traffic coming in on
- # this hostname will be routed by the Ingress Resource to the appropriate backend
- # Service.
- #
- host: "confluence-test.myhost"
- # -- The base path for the Ingress Resource. For example '/confluence'. Based on a
- # 'ingress.host' value of 'company.k8s.com' this would result in a URL of
- # 'company.k8s.com/confluence'. Default value is 'confluence.service.contextPath'
- path:
- # -- The custom annotations that should be applied to the Ingress Resource
- # when NOT using the K8s ingress-nginx controller.
- #
- annotations:
- cert-manager.io/cluster-issuer: cloudflare
- kubernetes.io/ingress.class: nginx
- kubernetes.io/tls-acme: "true"
- nginx.ingress.kubernetes.io/proxy-body-size: 15m
- nginx.ingress.kubernetes.io/affinity: "cookie"
- nginx.ingress.kubernetes.io/affinity-mode: "persistent"
- nginx.ingress.kubernetes.io/proxy-buffering: "on"
- # -- Set to 'true' if browser communication with the application should be TLS
- # (HTTPS) enforced.
- #
- https: true
- # -- The name of the K8s Secret that contains the TLS private key and corresponding
- # certificate. When utilised, TLS termination occurs at the ingress point where
- # traffic to the Service, and it's Pods is in plaintext.
- #
- # Usage is optional and depends on your use case. The Ingress Controller itself
- # can also be configured with a TLS secret for all Ingress Resources.
- # https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets
- # https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
- #
- tlsSecretName:
- # Confluence configuration
- #
- confluence:
- # K8s Confluence Service configuration
- #
- service:
- # -- The port on which the Confluence K8s Service will listen
- #
- port: 80
- # -- The type of K8s service to use for Confluence
- #
- type: ClusterIP
- # -- Use specific loadBalancerIP. Only applies to service type LoadBalancer.
- #
- loadBalancerIP:
- # -- The Tomcat context path that Confluence will use. The ATL_TOMCAT_CONTEXTPATH
- # will be set automatically.
- #
- contextPath:
- # -- Additional annotations to apply to the Service
- #
- annotations: {}
- # Standard K8s field that holds pod-level security attributes and common container settings.
- # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
- # Do not populate when deploying to OpenShift, unless anyuid policy is attached to a service account.
- #
- securityContext:
- # -- The GID used by the Confluence docker image
- # If not supplied, will default to 2002
- # This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Confluence container.
- # However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260
- fsGroup: 2002
- # -- Standard K8s field that holds security configurations that will be applied to a container.
- # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
- #
- containerSecurityContext:
- # runAsUser: 2002
- runAsGroup: 2002
- # -- The umask used by the Confluence process when it creates new files.
- # The default is 0022. This gives the new files:
- # - read/write permissions for the Confluence user
- # - read permissions for everyone else.
- #
- umask: "0022"
- # -- Boolean to define whether to set local home directory permissions on startup
- # of Confluence container. Set to 'false' to disable this behaviour.
- #
- setPermissions: true
- # Port definitions
- #
- ports:
- # -- The port on which the Confluence container listens for HTTP traffic
- #
- http: 8090
- # -- The port on which the Confluence container listens for Hazelcast traffic
- #
- hazelcast: 5701
- # Confluence licensing details
- #
- license:
- # -- The name of the K8s Secret that contains the Confluence license key. If specified, then
- # the license will be automatically populated during Confluence setup. Otherwise, it will
- # need to be provided via the browser after initial startup. An Example of creating
- # a K8s secret for the license below:
- # 'kubectl create secret generic <secret-name> --from-literal=license-key=<license>
- # https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
- #
- secretName:
- # -- The key in the K8s Secret that contains the Confluence license key
- #
- secretKey: license-key
- # Confirm that Confluence is up and running with a ReadinessProbe
- # https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes
- #
- readinessProbe:
- # -- The initial delay (in seconds) for the Confluence container readiness probe,
- # after which the probe will start running.
- #
- initialDelaySeconds: 10
- # -- How often (in seconds) the Confluence container readiness probe will run
- #
- periodSeconds: 5
- # -- The number of consecutive failures of the Confluence container readiness probe
- # before the pod fails readiness checks.
- #
- failureThreshold: 6
- # Confluence log configuration
- #
- accessLog:
- # -- Set to 'true' if access logging should be enabled.
- #
- enabled: true
- # -- The path within the Confluence container where the local-home volume should be
- # mounted in order to capture access logs.
- #
- mountPath: "/opt/atlassian/confluence/logs"
- # -- The subdirectory within the local-home volume where access logs should be
- # stored.
- #
- localHomeSubPath: "logs"
- # Data Center clustering
- #
- clustering:
- # -- Set to 'true' if Data Center clustering should be enabled
- # This will automatically configure cluster peer discovery between cluster nodes.
- #
- enabled: true
- # -- Set to 'true' if the K8s pod name should be used as the end-user-visible
- # name of the Data Center cluster node.
- #
- usePodNameAsClusterNodeName: true
- # Confluence Pod resource requests
- #
- resources:
- # JVM Memory / Heap Size definitions. The values below are based on the
- # defaults defined for the Confluence docker container.
- # https://bitbucket.org/atlassian-docker/docker-atlassian-confluence-server/src/master/#markdown-header-memory-heap-size
- #
- jvm:
- # -- The maximum amount of heap memory that will be used by the Confluence JVM
- #
- maxHeap: "2g"
- # -- The minimum amount of heap memory that will be used by the Confluence JVM
- #
- minHeap: "1g"
- # -- The memory reserved for the Confluence JVM code cache
- #
- reservedCodeCache: "256m"
- # Specifies the standard K8s resource requests and/or limits for the Confluence
- # container. It is important that if the memory resources are specified here,
- # they must allow for the size of the Confluence JVM. That means the maximum heap
- # size, the reserved code cache size, plus other JVM overheads, must be
- # accommodated. Allowing for (maxHeap+codeCache)*1.5 would be an example.
- #
- container:
- requests:
- # -- Initial CPU request by Confluence pod.
- #
- cpu: "2"
- # -- Initial Memory request by Confluence pod
- #
- memory: "2G"
- # limits:
- # cpu: "2"
- # memory: "2G"
- shutdown:
- # -- The termination grace period for pods during shutdown. This
- # should be set to the Confluence internal grace period (default 20
- # seconds), plus a small buffer to allow the JVM to fully terminate.
- #
- terminationGracePeriodSeconds: 25
- # -- By default pods will be stopped via a [preStop hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/),
- # using a script supplied by the Docker image. If any other
- # shutdown behaviour is needed it can be achieved by overriding
- # this value. Note that the shutdown command needs to wait for the
- # application shutdown completely before exiting; see [the default
- # command](https://bitbucket.org/atlassian-docker/docker-atlassian-confluence-server/src/master/shutdown-wait.sh)
- # for details.
- #
- command: "/shutdown-wait.sh"
- # -- Specifies a list of additional arguments that can be passed to the Confluence JVM, e.g.
- # system properties.
- #
- additionalJvmArgs:
- - -Djava.awt.headless=true
- - -javaagent:/var/agent/atlassian-agent.jar
- # -- Specifies a list of additional Java libraries that should be added to the
- # Confluence container. Each item in the list should specify the name of the volume
- # that contains the library, as well as the name of the library file within that
- # volume's root directory. Optionally, a subDirectory field can be included to
- # specify which directory in the volume contains the library file. Additional details:
- # https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/
- #
- additionalLibraries: []
- # - volumeName:
- # subDirectory:
- # fileName:
- # -- Specifies a list of additional Confluence plugins that should be added to the
- # Confluence container. Note plugins installed via this method will appear as
- # bundled plugins rather than user plugins. These should be specified in the same
- # manner as the 'additionalLibraries' property. Additional details:
- # https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/
- #
- # NOTE: only .jar files can be loaded using this approach. OBR's can be extracted
- # (unzipped) to access the associated .jar
- #
- # An alternative to this method is to install the plugins via "Manage Apps" in the
- # product system administration UI.
- #
- additionalBundledPlugins: []
- # - volumeName:
- # subDirectory:
- # fileName:
- # -- Defines any additional volumes mounts for the Confluence container. These
- # can refer to existing volumes, or new volumes can be defined via
- # 'volumes.additional'.
- #
- additionalVolumeMounts: []
- # -- Defines any additional environment variables to be passed to the Confluence
- # container. See https://hub.docker.com/r/atlassian/confluence-server for
- # supported variables.
- #
- additionalEnvironmentVariables:
- - name: CONFLUENCE_LOG_STDOUT
- value: "true"
- - name: ATL_PROXY_NAME
- value: "confluence-test.myhost"
- - name: ATL_PROXY_PORT
- value: "443"
- - name: ATL_TOMCAT_SCHEME
- value: "https"
- # - name: ATL_CLUSTER_TYPE
- # value: "tcp_ip"
- # TODO: remove hard-coded
- # - name: ATL_CLUSTER_PEERS
- # value: ""
- # -- Defines any additional ports for the Confluence container.
- #
- additionalPorts:
- - name: hazelcastconf
- containerPort: 5801
- protocol: TCP
- # -- Defines additional volumeClaimTemplates that should be applied to the Confluence pod.
- # Note that this will not create any corresponding volume mounts;
- # those needs to be defined in confluence.additionalVolumeMounts
- #
- additionalVolumeClaimTemplates: []
- # - name: myadditionalvolumeclaim
- # storageClassName:
- # resources:
- # requests:
- # storage: 1Gi
- # -- Defines topology spread constraints for Confluence pods. See details:
- # https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
- #
- topologySpreadConstraints: []
- # - maxSkew: 1
- # topologyKey: kubernetes.io/hostname
- # whenUnsatisfiable: ScheduleAnyway
- # labelSelector:
- # matchLabels:
- # app.kubernetes.io/name: confluence
- # Debugging
- #
- jvmDebug:
- # -- Set to 'true' for remote debugging. Confluence JVM will be started with debugging
- # port 5005 open.
- enabled: false
- # Confluence Synchrony configuration
- # https://confluence.atlassian.com/doc/configuring-synchrony-858772125.html
- synchrony:
- # -- Set to 'true' if Synchrony (i.e. collaborative editing) should be enabled.
- # This will result in a separate StatefulSet and Service to be created for Synchrony.
- # If disabled, then collaborative editing will be disabled in Confluence.
- enabled: false
- # K8s Synchrony Service configuration
- #
- service:
- # -- The port on which the Synchrony K8s Service will listen
- #
- port: 80
- # -- The type of K8s service to use for Synchrony
- #
- type: ClusterIP
- # -- Use specific loadBalancerIP. Only applies to service type LoadBalancer.
- #
- loadBalancerIP:
- # -- Boolean to define whether to set synchrony home directory permissions on startup
- # of Synchrony container. Set to 'false' to disable this behaviour.
- #
- setPermissions: true
- # Port definitions
- #
- ports:
- # -- The port on which the Synchrony container listens for HTTP traffic
- #
- http: 8091
- # -- The port on which the Synchrony container listens for Hazelcast traffic
- #
- hazelcast: 5701
- hazelcastConf: 5801
- # Confirm that Synchrony is up and running with a ReadinessProbe
- # https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes
- #
- readinessProbe:
- # -- The initial delay (in seconds) for the Synchrony container readiness probe,
- # after which the probe will start running.
- #
- initialDelaySeconds: 5
- # -- How often (in seconds) the Synchrony container readiness probe will run
- #
- periodSeconds: 1
- # -- The number of consecutive failures of the Synchrony container readiness probe
- # before the pod fails readiness checks.
- #
- failureThreshold: 10
- # Synchrony Pod resource requests
- #
- resources:
- # JVM Memory / Heap Size definitions. The values below are based on the
- # defaults defined for the Synchrony docker container.
- #
- jvm:
- # -- The maximum amount of heap memory that will be used by the Synchrony JVM
- #
- minHeap: "1g"
- # -- The minimum amount of heap memory that will be used by the Synchrony JVM
- #
- maxHeap: "2g"
- # -- The memory allocated for the Synchrony stack
- #
- stackSize: "2048k"
- # Specifies the standard K8s resource requests and/or limits for the Synchrony
- # container. It is important that if the memory resources are specified here,
- # they must allow for the size of the Synchrony JVM. That means the maximum heap
- # size, the reserved code cache size, plus other JVM overheads, must be
- # accommodated. Allowing for (maxHeap+codeCache)*1.5 would be an example.
- #
- container:
- requests:
- # -- Initial CPU request by Synchrony pod
- #
- cpu: "2"
- # -- Initial Memory request Synchrony pod
- #
- memory: "2.5G"
- # -- Specifies a list of additional arguments that can be passed to the Synchrony JVM, e.g.
- # system properties.
- #
- additionalJvmArgs: {}
- #- -Dsynchrony.example.system.property=46
- shutdown:
- # -- The termination grace period for pods during shutdown. This
- # should be set to the Synchrony internal grace period (default 20
- # seconds), plus a small buffer to allow the JVM to fully terminate.
- terminationGracePeriodSeconds: 25
- # -- The base URL of the Synchrony service. This will be the URL that users' browsers will
- # be given to communicate with Synchrony, as well as the URL that the Confluence service
- # will use to communicate directly with Synchrony, so the URL must be resolvable both from
- # inside and outside the Kubernetes cluster.
- ingressUrl:
- # -- Specifies a list of additional Java libraries that should be added to the
- # Synchrony container. Each item in the list should specify the name of the volume
- # that contains the library, as well as the name of the library file within that
- # volume's root directory. Optionally, a subDirectory field can be included to
- # specify which directory in the volume contains the library file. Additional details:
- # https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/
- #
- additionalLibraries: []
- # - volumeName:
- # subDirectory:
- # fileName:
- # -- Defines any additional ports for the Synchrony container.
- #
- additionalPorts: []
- # - name: jmx
- # containerPort: 5555
- # protocol: TCP
- # -- Defines topology spread constraints for Synchrony pods. See details:
- # https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
- #
- topologySpreadConstraints: []
- # - maxSkew: 1
- # topologyKey: kubernetes.io/hostname
- # whenUnsatisfiable: ScheduleAnyway
- # labelSelector:
- # matchLabels:
- # app.kubernetes.io/name: confluence-synchrony
- # Fluentd configuration
- #
- # Confluence log collection and aggregation can be enabled using Fluentd. This config
- # assumes an existing ELK stack has been stood up and is available.
- # https://www.fluentd.org/
- #
- fluentd:
- # -- Set to 'true' if the Fluentd sidecar (DaemonSet) should be added to each pod
- #
- enabled: false
- # -- The Fluentd sidecar image
- #
- imageName: fluent/fluentd-kubernetes-daemonset:v1.11.5-debian-elasticsearch7-1.2
- # -- The command used to start Fluentd. If not supplied the default command
- # will be used: "fluentd -c /fluentd/etc/fluent.conf -v"
- #
- # Note: The custom command can be free-form, however pay particular attention to
- # the process that should ultimately be left running in the container. This process
- # should be invoked with 'exec' so that signals are appropriately propagated to it,
- # for instance SIGTERM. An example of how such a command may look is:
- # "<command 1> && <command 2> && exec <primary command>"
- command:
- # -- Set to 'true' if a custom config (see 'configmap-fluentd.yaml' for default)
- # should be used for Fluentd. If enabled this config must be supplied via the
- # 'fluentdCustomConfig' property below.
- #
- customConfigFile: false
- # -- Custom fluent.conf file
- #
- fluentdCustomConfig: {}
- # fluent.conf: |
- # <source>
- # @type tail
- # <parse>
- # @type multiline
- # format_firstline /\d{4}-\d{1,2}-\d{1,2}/
- # </parse>
- # path /opt/atlassian/confluence/logs/access_log.*
- # pos_file /tmp/confluencelog.pos
- # tag confluence-access-logs
- # </source>
- # -- The port on which the Fluentd sidecar will listen
- #
- httpPort: 9880
- # Elasticsearch config based on your ELK stack
- #
- elasticsearch:
- # -- Set to 'true' if Fluentd should send all log events to an Elasticsearch service.
- #
- enabled: true
- # -- The hostname of the Elasticsearch service that Fluentd should send logs to.
- #
- hostname: elasticsearch
- # -- The prefix of the Elasticsearch index name that will be used
- #
- indexNamePrefix: confluence
- # -- Specify custom volumes to be added to Fluentd container (e.g. more log sources)
- #
- extraVolumes: []
- # - name: local-home
- # mountPath: /opt/atlassian/confluence/logs
- # subPath: log
- # readOnly: true
- # -- Custom annotations that will be applied to all Confluence pods
- #
- podAnnotations: {}
- # name: <value>
- # -- Custom labels that will be applied to all Confluence pods
- #
- podLabels: {}
- # name: <value>
- # -- Standard K8s node-selectors that will be applied to all Confluence pods
- #
- nodeSelector: {}
- # name: <value>
- # -- Standard K8s tolerations that will be applied to all Confluence pods
- #
- tolerations: []
- # - effect: <name>
- # operator: <operator>
- # key: <key>
- # -- Standard K8s affinities that will be applied to all Confluence pods
- #
- affinity: {}
- # name: <value>
- # -- Standard K8s schedulerName that will be applied to all Confluence pods.
- # Check Kubernetes documentation on how to configure multiple schedulers:
- # https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods
- #
- schedulerName:
- # -- Additional container definitions that will be added to all Confluence pods
- #
- additionalContainers: []
- # - name: <name>
- # image: <image>:<tag>
- # -- Additional initContainer definitions that will be added to all Confluence pods
- #
- additionalInitContainers:
- - name: nfs-permission-fixer-2
- image: alpine
- imagePullPolicy: IfNotPresent
- securityContext:
- runAsUser: 0 # make sure we run as root so we get the ability to change the volume permissions
- volumeMounts:
- - name: local-home
- mountPath: "/logs"
- subPath: "logs"
- command: ["sh", "-c", "chown -R 2002:2002 /logs; chmod g+rw /logs"]
- # - name: <name>
- # image: <image>:<tag>
- # -- Additional labels that should be applied to all resources
- #
- additionalLabels: {}
- # name: <value>
- # -- Additional existing ConfigMaps and Secrets not managed by Helm that should be
- # mounted into service container. Configuration details below (camelCase is important!):
- # 'name' - References existing ConfigMap or secret name.
- # 'type' - 'configMap' or 'secret'
- # 'key' - The file name.
- # 'mountPath' - The destination directory in a container.
- # VolumeMount and Volumes are added with this name and index position, for example;
- # custom-config-0, keystore-2
- #
- additionalFiles:
- # - name: custom-server-config
- # type: configMap
- # key: server.xml
- # mountPath: /opt/atlassian/confluence/conf
- # - name: custom-config
- # type: configMap
- # key: log4j.properties
- # mountPath: /var/atlassian
- # - name: custom-config
- # type: configMap
- # key: web.xml
- # mountPath: /var/atlassian
- # - name: keystore
- # type: secret
- # key: keystore.jks
- # mountPath: /var/ssl
- # -- Additional host aliases for each pod, equivalent to adding them to the /etc/hosts file.
- # https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
- additionalHosts: []
- # - ip: "127.0.0.1"
- # hostnames:
- # - "foo.local"
- # - "bar.local"
Advertisement
Add Comment
Please, Sign In to add comment